ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
ad18d8db-ff85-4f70-86f2-d0a7b774dad7 | cpp | tensorflow/tensorflow | tf_op_wrapper | tensorflow/core/ir/tf_op_wrapper.cc | tensorflow/core/ir/tf_op_wrapper_test.cc | #include "tensorflow/core/ir/tf_op_wrapper.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
namespace mlir {
namespace tfg {
TFOp::TFOp(Operation *op) : op_(op) {
assert(!op || classof(op) && "expected a TFG op");
}
StringAttr TFOp::nameAttr() {
return op_->getAttrOfType<StringAttr>(getDialect()->getNameAttrIdentifier());
}
StringRef TFOp::name() { return nameAttr().getValue(); }
void TFOp::setName(const Twine &name) {
setName(StringAttr::get(op_->getContext(), name.str()));
}
void TFOp::setName(StringAttr name) {
op_->setAttr(getDialect()->getNameAttrIdentifier(), name);
}
StringAttr TFOp::requestedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getDeviceAttrIdentifier());
}
StringRef TFOp::requestedDevice() { return requestedDeviceAttr().getValue(); }
void TFOp::setRequestedDevice(const Twine &device) {
setRequestedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setRequestedDevice(StringAttr device) {
op_->setAttr(getDialect()->getDeviceAttrIdentifier(), device);
}
StringAttr TFOp::assignedDeviceAttr() {
return op_->getAttrOfType<StringAttr>(
getDialect()->getAssignedDeviceAttrIdentifier());
}
StringRef TFOp::assignedDevice() { return assignedDeviceAttr().getValue(); }
void TFOp::setAssignedDevice(const Twine &device) {
setAssignedDevice(StringAttr::get(op_->getContext(), device.str()));
}
void TFOp::setAssignedDevice(StringAttr device) {
op_->setAttr(getDialect()->getAssignedDeviceAttrIdentifier(), device);
}
StringAttr TFOp::tpuReplicate() {
return op_->getAttrOfType<StringAttr>("_tpu_replicate");
}
void TFOp::setTpuReplicate(StringAttr tpu_replicate) {
op_->setAttr("_tpu_replicate", tpu_replicate);
}
}
} | #include "tensorflow/core/ir/tf_op_wrapper.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/ir/dialect.h"
#include "tensorflow/core/ir/ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFOpWrapper, LLVMRTTI) {
const char *const code = R"mlir(
tfg.func @test() -> (tensor<i32>) {
%A, %ctlA = A : () -> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
Operation *module_op = module.get();
EXPECT_FALSE(isa<TFOp>(module_op));
EXPECT_FALSE(dyn_cast<TFOp>(module_op));
module->walk([&](TFOp op) {
EXPECT_TRUE(isa<TFOp>(op.getOperation()));
EXPECT_TRUE(dyn_cast<TFOp>(op.getOperation()));
});
}
TEST(TFOpWrapper, ControlOperands) {
const char *const code = R"mlir(
tfg.func @test(%a: tensor<i32> {tfg.name = "a"},
%b: tensor<i32> {tfg.name = "b"}) -> (tensor<i32>) {
%A, %ctlA = A(%a, %b) [%a.ctl, %b.ctl] : (tensor<i32>, tensor<i32>)
-> (tensor<i32>)
return(%A) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
TFOp a_op;
module->walk([&](TFOp op) {
if (op->getName().getStringRef() == "tfg.A") a_op = op;
});
ASSERT_TRUE(a_op);
EXPECT_EQ(a_op.controlRet().getDefiningOp(), a_op.getOperation());
OperandRange operands = a_op->getOperands();
OperandRange data = a_op.getNonControlOperands();
OperandRange ctls = a_op.getControlOperands();
EXPECT_EQ(operands.size(), 4u);
EXPECT_EQ(data.size(), 2u);
EXPECT_EQ(ctls.size(), 2u);
OperandRange::iterator ctl_it = llvm::find_if(operands, [](Value operand) {
return mlir::isa<ControlType>(operand.getType());
});
EXPECT_NE(ctl_it, operands.end());
EXPECT_EQ(data.end(), ctl_it);
EXPECT_EQ(*ctls.begin(), *ctl_it);
}
TEST(TFOpWrapper, AttributeGetterSetters) {
MLIRContext context;
auto *tfg_dialect = context.getOrLoadDialect<TFGraphDialect>();
OperationState state(UnknownLoc::get(&context), "tfg.A");
state.addTypes(tfg_dialect->getControlType());
TFOp op = Operation::create(state);
auto cleanup = llvm::make_scope_exit([&] { op->destroy(); });
{
EXPECT_FALSE(op.nameAttr());
StringRef a_name = "a_name";
op.setName(a_name);
EXPECT_EQ(op.name(), a_name);
StringRef another_name = "another_name";
op.setName(StringAttr::get(&context, another_name));
EXPECT_EQ(op.name(), another_name);
}
{
StringRef a_device = "/some_device";
EXPECT_FALSE(op.requestedDeviceAttr());
op.setRequestedDevice(a_device);
EXPECT_EQ(op.requestedDevice(), a_device);
StringRef another_device = "/some_other_device";
op.setRequestedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.requestedDevice(), another_device);
}
{
StringRef a_device = "/some_assigned_device";
EXPECT_FALSE(op.assignedDeviceAttr());
op.setAssignedDevice(a_device);
EXPECT_EQ(op.assignedDevice(), a_device);
StringRef another_device = "/some_other_assigned_device";
op.setAssignedDevice(StringAttr::get(&context, another_device));
EXPECT_EQ(op.assignedDevice(), another_device);
}
{
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
EXPECT_EQ(op.deviceAttr(), op.requestedDeviceAttr());
StringRef device = "/an_assigned_device";
op.setAssignedDevice(device);
EXPECT_EQ(op.deviceAttr(), op.assignedDeviceAttr());
EXPECT_EQ(op.device(), device);
op->removeAttr(tfg_dialect->getAssignedDeviceAttrIdentifier());
op->removeAttr(tfg_dialect->getDeviceAttrIdentifier());
EXPECT_EQ(op.device(), "");
}
{
auto tpu_replicate = StringAttr::get(op->getContext(), "a_tpu");
op.setTpuReplicate(tpu_replicate);
EXPECT_EQ(op.tpuReplicate(), tpu_replicate);
}
}
TEST(TFOpWrapper, ValueControlRet) {
const char *const code = R"mlir(
tfg.func @test(%arg: tensor<i32> {tfg.name = "arg"}) -> (tensor<i32>) {
%Const, %ctl = Const {dtype = i32, value = dense<0> : tensor<i32>} : () -> (tensor<i32>)
%Add, %ctl_2 = Add(%Const, %arg) [%ctl] {T = i32} : (tensor<i32>, tensor<i32>) -> (tensor<i32>)
return(%Add) : tensor<i32>
}
)mlir";
MLIRContext context;
context.getOrLoadDialect<TFGraphDialect>();
OwningOpRef<ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
GraphFuncOp func = module->lookupSymbol<GraphFuncOp>("test");
ASSERT_TRUE(func);
auto iterator = func.getBody().begin()->begin();
TFOp const_op = &(*iterator++);
TFOp add_op = &(*iterator);
OperandControlRetRange ret_range(add_op->getOperands());
EXPECT_EQ(ret_range[0], const_op.controlRet());
EXPECT_EQ(ret_range[1], func.getBody().begin()->getArguments()[1]);
EXPECT_EQ(ret_range[2], const_op.controlRet());
for (Value v : ret_range) EXPECT_TRUE(mlir::isa<ControlType>(v.getType()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/tf_op_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90432f51-de56-4e88-bba2-65270ed31614 | cpp | google/leveldb | env_windows | util/env_windows.cc | util/env_windows_test.cc | #ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <algorithm>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <mutex>
#include <queue>
#include <sstream>
#include <string>
#include <vector>
#include "leveldb/env.h"
#include "leveldb/slice.h"
#include "port/port.h"
#include "port/thread_annotations.h"
#include "util/env_windows_test_helper.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/windows_logger.h"
namespace leveldb {
namespace {
constexpr const size_t kWritableFileBufferSize = 65536;
constexpr int kDefaultMmapLimit = (sizeof(void*) >= 8) ? 1000 : 0;
int g_mmap_limit = kDefaultMmapLimit;
std::string GetWindowsErrorMessage(DWORD error_code) {
std::string message;
char* error_text = nullptr;
size_t error_text_size = ::FormatMessageA(
FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER |
FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<char*>(&error_text), 0, nullptr);
if (!error_text) {
return message;
}
message.assign(error_text, error_text_size);
::LocalFree(error_text);
return message;
}
Status WindowsError(const std::string& context, DWORD error_code) {
if (error_code == ERROR_FILE_NOT_FOUND || error_code == ERROR_PATH_NOT_FOUND)
return Status::NotFound(context, GetWindowsErrorMessage(error_code));
return Status::IOError(context, GetWindowsErrorMessage(error_code));
}
class ScopedHandle {
public:
ScopedHandle(HANDLE handle) : handle_(handle) {}
ScopedHandle(const ScopedHandle&) = delete;
ScopedHandle(ScopedHandle&& other) noexcept : handle_(other.Release()) {}
~ScopedHandle() { Close(); }
ScopedHandle& operator=(const ScopedHandle&) = delete;
ScopedHandle& operator=(ScopedHandle&& rhs) noexcept {
if (this != &rhs) handle_ = rhs.Release();
return *this;
}
bool Close() {
if (!is_valid()) {
return true;
}
HANDLE h = handle_;
handle_ = INVALID_HANDLE_VALUE;
return ::CloseHandle(h);
}
bool is_valid() const {
return handle_ != INVALID_HANDLE_VALUE && handle_ != nullptr;
}
HANDLE get() const { return handle_; }
HANDLE Release() {
HANDLE h = handle_;
handle_ = INVALID_HANDLE_VALUE;
return h;
}
private:
HANDLE handle_;
};
class Limiter {
public:
Limiter(int max_acquires)
:
#if !defined(NDEBUG)
max_acquires_(max_acquires),
#endif
acquires_allowed_(max_acquires) {
assert(max_acquires >= 0);
}
Limiter(const Limiter&) = delete;
Limiter operator=(const Limiter&) = delete;
bool Acquire() {
int old_acquires_allowed =
acquires_allowed_.fetch_sub(1, std::memory_order_relaxed);
if (old_acquires_allowed > 0) return true;
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
return false;
}
void Release() {
int old_acquires_allowed =
acquires_allowed_.fetch_add(1, std::memory_order_relaxed);
(void)old_acquires_allowed;
assert(old_acquires_allowed < max_acquires_);
}
private:
#if !defined(NDEBUG)
const int max_acquires_;
#endif
std::atomic<int> acquires_allowed_;
};
class WindowsSequentialFile : public SequentialFile {
public:
WindowsSequentialFile(std::string filename, ScopedHandle handle)
: handle_(std::move(handle)), filename_(std::move(filename)) {}
~WindowsSequentialFile() override {}
Status Read(size_t n, Slice* result, char* scratch) override {
DWORD bytes_read;
assert(n <= std::numeric_limits<DWORD>::max());
if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
nullptr)) {
return WindowsError(filename_, ::GetLastError());
}
*result = Slice(scratch, bytes_read);
return Status::OK();
}
Status Skip(uint64_t n) override {
LARGE_INTEGER distance;
distance.QuadPart = n;
if (!::SetFilePointerEx(handle_.get(), distance, nullptr, FILE_CURRENT)) {
return WindowsError(filename_, ::GetLastError());
}
return Status::OK();
}
private:
const ScopedHandle handle_;
const std::string filename_;
};
class WindowsRandomAccessFile : public RandomAccessFile {
public:
WindowsRandomAccessFile(std::string filename, ScopedHandle handle)
: handle_(std::move(handle)), filename_(std::move(filename)) {}
~WindowsRandomAccessFile() override = default;
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
DWORD bytes_read = 0;
OVERLAPPED overlapped = {0};
overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
overlapped.Offset = static_cast<DWORD>(offset);
if (!::ReadFile(handle_.get(), scratch, static_cast<DWORD>(n), &bytes_read,
&overlapped)) {
DWORD error_code = ::GetLastError();
if (error_code != ERROR_HANDLE_EOF) {
*result = Slice(scratch, 0);
return Status::IOError(filename_, GetWindowsErrorMessage(error_code));
}
}
*result = Slice(scratch, bytes_read);
return Status::OK();
}
private:
const ScopedHandle handle_;
const std::string filename_;
};
class WindowsMmapReadableFile : public RandomAccessFile {
public:
WindowsMmapReadableFile(std::string filename, char* mmap_base, size_t length,
Limiter* mmap_limiter)
: mmap_base_(mmap_base),
length_(length),
mmap_limiter_(mmap_limiter),
filename_(std::move(filename)) {}
~WindowsMmapReadableFile() override {
::UnmapViewOfFile(mmap_base_);
mmap_limiter_->Release();
}
Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const override {
if (offset + n > length_) {
*result = Slice();
return WindowsError(filename_, ERROR_INVALID_PARAMETER);
}
*result = Slice(mmap_base_ + offset, n);
return Status::OK();
}
private:
char* const mmap_base_;
const size_t length_;
Limiter* const mmap_limiter_;
const std::string filename_;
};
class WindowsWritableFile : public WritableFile {
public:
WindowsWritableFile(std::string filename, ScopedHandle handle)
: pos_(0), handle_(std::move(handle)), filename_(std::move(filename)) {}
~WindowsWritableFile() override = default;
Status Append(const Slice& data) override {
size_t write_size = data.size();
const char* write_data = data.data();
size_t copy_size = std::min(write_size, kWritableFileBufferSize - pos_);
std::memcpy(buf_ + pos_, write_data, copy_size);
write_data += copy_size;
write_size -= copy_size;
pos_ += copy_size;
if (write_size == 0) {
return Status::OK();
}
Status status = FlushBuffer();
if (!status.ok()) {
return status;
}
if (write_size < kWritableFileBufferSize) {
std::memcpy(buf_, write_data, write_size);
pos_ = write_size;
return Status::OK();
}
return WriteUnbuffered(write_data, write_size);
}
Status Close() override {
Status status = FlushBuffer();
if (!handle_.Close() && status.ok()) {
status = WindowsError(filename_, ::GetLastError());
}
return status;
}
Status Flush() override { return FlushBuffer(); }
Status Sync() override {
Status status = FlushBuffer();
if (!status.ok()) {
return status;
}
if (!::FlushFileBuffers(handle_.get())) {
return Status::IOError(filename_,
GetWindowsErrorMessage(::GetLastError()));
}
return Status::OK();
}
private:
Status FlushBuffer() {
Status status = WriteUnbuffered(buf_, pos_);
pos_ = 0;
return status;
}
Status WriteUnbuffered(const char* data, size_t size) {
DWORD bytes_written;
if (!::WriteFile(handle_.get(), data, static_cast<DWORD>(size),
&bytes_written, nullptr)) {
return Status::IOError(filename_,
GetWindowsErrorMessage(::GetLastError()));
}
return Status::OK();
}
char buf_[kWritableFileBufferSize];
size_t pos_;
ScopedHandle handle_;
const std::string filename_;
};
bool LockOrUnlock(HANDLE handle, bool lock) {
if (lock) {
return ::LockFile(handle,
0, 0,
MAXDWORD,
MAXDWORD);
} else {
return ::UnlockFile(handle,
0, 0,
MAXDWORD,
MAXDWORD);
}
}
class WindowsFileLock : public FileLock {
public:
WindowsFileLock(ScopedHandle handle, std::string filename)
: handle_(std::move(handle)), filename_(std::move(filename)) {}
const ScopedHandle& handle() const { return handle_; }
const std::string& filename() const { return filename_; }
private:
const ScopedHandle handle_;
const std::string filename_;
};
class WindowsEnv : public Env {
public:
WindowsEnv();
~WindowsEnv() override {
static const char msg[] =
"WindowsEnv singleton destroyed. Unsupported behavior!\n";
std::fwrite(msg, 1, sizeof(msg), stderr);
std::abort();
}
Status NewSequentialFile(const std::string& filename,
SequentialFile** result) override {
*result = nullptr;
DWORD desired_access = GENERIC_READ;
DWORD share_mode = FILE_SHARE_READ;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), desired_access, share_mode,
nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
return WindowsError(filename, ::GetLastError());
}
*result = new WindowsSequentialFile(filename, std::move(handle));
return Status::OK();
}
Status NewRandomAccessFile(const std::string& filename,
RandomAccessFile** result) override {
*result = nullptr;
DWORD desired_access = GENERIC_READ;
DWORD share_mode = FILE_SHARE_READ;
ScopedHandle handle =
::CreateFileA(filename.c_str(), desired_access, share_mode,
nullptr, OPEN_EXISTING,
FILE_ATTRIBUTE_READONLY,
nullptr);
if (!handle.is_valid()) {
return WindowsError(filename, ::GetLastError());
}
if (!mmap_limiter_.Acquire()) {
*result = new WindowsRandomAccessFile(filename, std::move(handle));
return Status::OK();
}
LARGE_INTEGER file_size;
Status status;
if (!::GetFileSizeEx(handle.get(), &file_size)) {
mmap_limiter_.Release();
return WindowsError(filename, ::GetLastError());
}
ScopedHandle mapping =
::CreateFileMappingA(handle.get(),
nullptr, PAGE_READONLY,
0,
0,
nullptr);
if (mapping.is_valid()) {
void* mmap_base = ::MapViewOfFile(mapping.get(), FILE_MAP_READ,
0,
0,
0);
if (mmap_base) {
*result = new WindowsMmapReadableFile(
filename, reinterpret_cast<char*>(mmap_base),
static_cast<size_t>(file_size.QuadPart), &mmap_limiter_);
return Status::OK();
}
}
mmap_limiter_.Release();
return WindowsError(filename, ::GetLastError());
}
Status NewWritableFile(const std::string& filename,
WritableFile** result) override {
DWORD desired_access = GENERIC_WRITE;
DWORD share_mode = 0;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), desired_access, share_mode,
nullptr, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
*result = nullptr;
return WindowsError(filename, ::GetLastError());
}
*result = new WindowsWritableFile(filename, std::move(handle));
return Status::OK();
}
Status NewAppendableFile(const std::string& filename,
WritableFile** result) override {
DWORD desired_access = FILE_APPEND_DATA;
DWORD share_mode = 0;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), desired_access, share_mode,
nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
*result = nullptr;
return WindowsError(filename, ::GetLastError());
}
*result = new WindowsWritableFile(filename, std::move(handle));
return Status::OK();
}
bool FileExists(const std::string& filename) override {
return GetFileAttributesA(filename.c_str()) != INVALID_FILE_ATTRIBUTES;
}
Status GetChildren(const std::string& directory_path,
std::vector<std::string>* result) override {
const std::string find_pattern = directory_path + "\\*";
WIN32_FIND_DATAA find_data;
HANDLE dir_handle = ::FindFirstFileA(find_pattern.c_str(), &find_data);
if (dir_handle == INVALID_HANDLE_VALUE) {
DWORD last_error = ::GetLastError();
if (last_error == ERROR_FILE_NOT_FOUND) {
return Status::OK();
}
return WindowsError(directory_path, last_error);
}
do {
char base_name[_MAX_FNAME];
char ext[_MAX_EXT];
if (!_splitpath_s(find_data.cFileName, nullptr, 0, nullptr, 0, base_name,
ARRAYSIZE(base_name), ext, ARRAYSIZE(ext))) {
result->emplace_back(std::string(base_name) + ext);
}
} while (::FindNextFileA(dir_handle, &find_data));
DWORD last_error = ::GetLastError();
::FindClose(dir_handle);
if (last_error != ERROR_NO_MORE_FILES) {
return WindowsError(directory_path, last_error);
}
return Status::OK();
}
Status RemoveFile(const std::string& filename) override {
if (!::DeleteFileA(filename.c_str())) {
return WindowsError(filename, ::GetLastError());
}
return Status::OK();
}
Status CreateDir(const std::string& dirname) override {
if (!::CreateDirectoryA(dirname.c_str(), nullptr)) {
return WindowsError(dirname, ::GetLastError());
}
return Status::OK();
}
Status RemoveDir(const std::string& dirname) override {
if (!::RemoveDirectoryA(dirname.c_str())) {
return WindowsError(dirname, ::GetLastError());
}
return Status::OK();
}
Status GetFileSize(const std::string& filename, uint64_t* size) override {
WIN32_FILE_ATTRIBUTE_DATA file_attributes;
if (!::GetFileAttributesExA(filename.c_str(), GetFileExInfoStandard,
&file_attributes)) {
return WindowsError(filename, ::GetLastError());
}
ULARGE_INTEGER file_size;
file_size.HighPart = file_attributes.nFileSizeHigh;
file_size.LowPart = file_attributes.nFileSizeLow;
*size = file_size.QuadPart;
return Status::OK();
}
Status RenameFile(const std::string& from, const std::string& to) override {
if (::MoveFileA(from.c_str(), to.c_str())) {
return Status::OK();
}
DWORD move_error = ::GetLastError();
if (::ReplaceFileA(to.c_str(), from.c_str(), nullptr,
REPLACEFILE_IGNORE_MERGE_ERRORS,
nullptr, nullptr)) {
return Status::OK();
}
DWORD replace_error = ::GetLastError();
if (replace_error == ERROR_FILE_NOT_FOUND ||
replace_error == ERROR_PATH_NOT_FOUND) {
return WindowsError(from, move_error);
} else {
return WindowsError(from, replace_error);
}
}
Status LockFile(const std::string& filename, FileLock** lock) override {
*lock = nullptr;
Status result;
ScopedHandle handle = ::CreateFileA(
filename.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ,
nullptr, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL,
nullptr);
if (!handle.is_valid()) {
result = WindowsError(filename, ::GetLastError());
} else if (!LockOrUnlock(handle.get(), true)) {
result = WindowsError("lock " + filename, ::GetLastError());
} else {
*lock = new WindowsFileLock(std::move(handle), filename);
}
return result;
}
Status UnlockFile(FileLock* lock) override {
WindowsFileLock* windows_file_lock =
reinterpret_cast<WindowsFileLock*>(lock);
if (!LockOrUnlock(windows_file_lock->handle().get(), false)) {
return WindowsError("unlock " + windows_file_lock->filename(),
::GetLastError());
}
delete windows_file_lock;
return Status::OK();
}
void Schedule(void (*background_work_function)(void* background_work_arg),
void* background_work_arg) override;
void StartThread(void (*thread_main)(void* thread_main_arg),
void* thread_main_arg) override {
std::thread new_thread(thread_main, thread_main_arg);
new_thread.detach();
}
Status GetTestDirectory(std::string* result) override {
const char* env = getenv("TEST_TMPDIR");
if (env && env[0] != '\0') {
*result = env;
return Status::OK();
}
char tmp_path[MAX_PATH];
if (!GetTempPathA(ARRAYSIZE(tmp_path), tmp_path)) {
return WindowsError("GetTempPath", ::GetLastError());
}
std::stringstream ss;
ss << tmp_path << "leveldbtest-" << std::this_thread::get_id();
*result = ss.str();
CreateDir(*result);
return Status::OK();
}
Status NewLogger(const std::string& filename, Logger** result) override {
std::FILE* fp = std::fopen(filename.c_str(), "wN");
if (fp == nullptr) {
*result = nullptr;
return WindowsError(filename, ::GetLastError());
} else {
*result = new WindowsLogger(fp);
return Status::OK();
}
}
uint64_t NowMicros() override {
FILETIME ft;
::GetSystemTimeAsFileTime(&ft);
uint64_t num_ticks =
(static_cast<uint64_t>(ft.dwHighDateTime) << 32) + ft.dwLowDateTime;
return num_ticks / 10;
}
void SleepForMicroseconds(int micros) override {
std::this_thread::sleep_for(std::chrono::microseconds(micros));
}
private:
void BackgroundThreadMain();
static void BackgroundThreadEntryPoint(WindowsEnv* env) {
env->BackgroundThreadMain();
}
struct BackgroundWorkItem {
explicit BackgroundWorkItem(void (*function)(void* arg), void* arg)
: function(function), arg(arg) {}
void (*const function)(void*);
void* const arg;
};
port::Mutex background_work_mutex_;
port::CondVar background_work_cv_ GUARDED_BY(background_work_mutex_);
bool started_background_thread_ GUARDED_BY(background_work_mutex_);
std::queue<BackgroundWorkItem> background_work_queue_
GUARDED_BY(background_work_mutex_);
Limiter mmap_limiter_;
};
int MaxMmaps() { return g_mmap_limit; }
WindowsEnv::WindowsEnv()
: background_work_cv_(&background_work_mutex_),
started_background_thread_(false),
mmap_limiter_(MaxMmaps()) {}
void WindowsEnv::Schedule(
void (*background_work_function)(void* background_work_arg),
void* background_work_arg) {
background_work_mutex_.Lock();
if (!started_background_thread_) {
started_background_thread_ = true;
std::thread background_thread(WindowsEnv::BackgroundThreadEntryPoint, this);
background_thread.detach();
}
if (background_work_queue_.empty()) {
background_work_cv_.Signal();
}
background_work_queue_.emplace(background_work_function, background_work_arg);
background_work_mutex_.Unlock();
}
void WindowsEnv::BackgroundThreadMain() {
while (true) {
background_work_mutex_.Lock();
while (background_work_queue_.empty()) {
background_work_cv_.Wait();
}
assert(!background_work_queue_.empty());
auto background_work_function = background_work_queue_.front().function;
void* background_work_arg = background_work_queue_.front().arg;
background_work_queue_.pop();
background_work_mutex_.Unlock();
background_work_function(background_work_arg);
}
}
template <typename EnvType>
class SingletonEnv {
public:
SingletonEnv() {
#if !defined(NDEBUG)
env_initialized_.store(true, std::memory_order_relaxed);
#endif
static_assert(sizeof(env_storage_) >= sizeof(EnvType),
"env_storage_ will not fit the Env");
static_assert(alignof(decltype(env_storage_)) >= alignof(EnvType),
"env_storage_ does not meet the Env's alignment needs");
new (&env_storage_) EnvType();
}
~SingletonEnv() = default;
SingletonEnv(const SingletonEnv&) = delete;
SingletonEnv& operator=(const SingletonEnv&) = delete;
Env* env() { return reinterpret_cast<Env*>(&env_storage_); }
static void AssertEnvNotInitialized() {
#if !defined(NDEBUG)
assert(!env_initialized_.load(std::memory_order_relaxed));
#endif
}
private:
typename std::aligned_storage<sizeof(EnvType), alignof(EnvType)>::type
env_storage_;
#if !defined(NDEBUG)
static std::atomic<bool> env_initialized_;
#endif
};
#if !defined(NDEBUG)
template <typename EnvType>
std::atomic<bool> SingletonEnv<EnvType>::env_initialized_;
#endif
using WindowsDefaultEnv = SingletonEnv<WindowsEnv>;
}
void EnvWindowsTestHelper::SetReadOnlyMMapLimit(int limit) {
WindowsDefaultEnv::AssertEnvNotInitialized();
g_mmap_limit = limit;
}
Env* Env::Default() {
static WindowsDefaultEnv env_container;
return env_container.env();
}
} | #include "gtest/gtest.h"
#include "leveldb/env.h"
#include "port/port.h"
#include "util/env_windows_test_helper.h"
#include "util/testutil.h"
namespace leveldb {
static const int kMMapLimit = 4;
class EnvWindowsTest : public testing::Test {
public:
static void SetFileLimits(int mmap_limit) {
EnvWindowsTestHelper::SetReadOnlyMMapLimit(mmap_limit);
}
EnvWindowsTest() : env_(Env::Default()) {}
Env* env_;
};
TEST_F(EnvWindowsTest, TestOpenOnRead) {
std::string test_dir;
ASSERT_LEVELDB_OK(env_->GetTestDirectory(&test_dir));
std::string test_file = test_dir + "/open_on_read.txt";
FILE* f = std::fopen(test_file.c_str(), "w");
ASSERT_TRUE(f != nullptr);
const char kFileData[] = "abcdefghijklmnopqrstuvwxyz";
fputs(kFileData, f);
std::fclose(f);
const int kNumFiles = kMMapLimit + 5;
leveldb::RandomAccessFile* files[kNumFiles] = {0};
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(env_->NewRandomAccessFile(test_file, &files[i]));
}
char scratch;
Slice read_result;
for (int i = 0; i < kNumFiles; i++) {
ASSERT_LEVELDB_OK(files[i]->Read(i, 1, &read_result, &scratch));
ASSERT_EQ(kFileData[i], read_result[0]);
}
for (int i = 0; i < kNumFiles; i++) {
delete files[i];
}
ASSERT_LEVELDB_OK(env_->RemoveFile(test_file));
}
}
int main(int argc, char** argv) {
leveldb::EnvWindowsTest::SetFileLimits(leveldb::kMMapLimit);
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_windows.cc | https://github.com/google/leveldb/blob/23e35d792b9154f922b8b575b12596a4d8664c65/util/env_windows_test.cc | 23e35d792b9154f922b8b575b12596a4d8664c65 |
a03b19bf-a0bc-4b63-a4e3-76d768738a45 | cpp | abseil/abseil-cpp | low_level_hash | absl/hash/internal/low_level_hash.cc | absl/hash/internal/low_level_hash_test.cc | #include "absl/hash/internal/low_level_hash.h"
#include <cstddef>
#include <cstdint>
#include "absl/base/internal/unaligned_access.h"
#include "absl/base/prefetch.h"
#include "absl/numeric/int128.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
static uint64_t Mix(uint64_t v0, uint64_t v1) {
absl::uint128 p = v0;
p *= v1;
return absl::Uint128Low64(p) ^ absl::Uint128High64(p);
}
uint64_t LowLevelHashLenGt16(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) {
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
const uint8_t* last_16_ptr = ptr + starting_length - 16;
uint64_t current_state = seed ^ salt[0];
if (len > 64) {
uint64_t duplicated_state0 = current_state;
uint64_t duplicated_state1 = current_state;
uint64_t duplicated_state2 = current_state;
do {
PrefetchToLocalCache(ptr + ABSL_CACHELINE_SIZE);
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
uint64_t e = absl::base_internal::UnalignedLoad64(ptr + 32);
uint64_t f = absl::base_internal::UnalignedLoad64(ptr + 40);
uint64_t g = absl::base_internal::UnalignedLoad64(ptr + 48);
uint64_t h = absl::base_internal::UnalignedLoad64(ptr + 56);
current_state = Mix(a ^ salt[1], b ^ current_state);
duplicated_state0 = Mix(c ^ salt[2], d ^ duplicated_state0);
duplicated_state1 = Mix(e ^ salt[3], f ^ duplicated_state1);
duplicated_state2 = Mix(g ^ salt[4], h ^ duplicated_state2);
ptr += 64;
len -= 64;
} while (len > 64);
current_state = (current_state ^ duplicated_state0) ^
(duplicated_state1 + duplicated_state2);
}
if (len > 32) {
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
uint64_t c = absl::base_internal::UnalignedLoad64(ptr + 16);
uint64_t d = absl::base_internal::UnalignedLoad64(ptr + 24);
uint64_t cs0 = Mix(a ^ salt[1], b ^ current_state);
uint64_t cs1 = Mix(c ^ salt[2], d ^ current_state);
current_state = cs0 ^ cs1;
ptr += 32;
len -= 32;
}
if (len > 16) {
uint64_t a = absl::base_internal::UnalignedLoad64(ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(ptr + 8);
current_state = Mix(a ^ salt[1], b ^ current_state);
}
uint64_t a = absl::base_internal::UnalignedLoad64(last_16_ptr);
uint64_t b = absl::base_internal::UnalignedLoad64(last_16_ptr + 8);
return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
}
uint64_t LowLevelHash(const void* data, size_t len, uint64_t seed,
const uint64_t salt[5]) {
if (len > 16) return LowLevelHashLenGt16(data, len, seed, salt);
PrefetchToLocalCache(data);
const uint8_t* ptr = static_cast<const uint8_t*>(data);
uint64_t starting_length = static_cast<uint64_t>(len);
uint64_t current_state = seed ^ salt[0];
if (len == 0) return current_state;
uint64_t a = 0;
uint64_t b = 0;
if (len > 8) {
a = absl::base_internal::UnalignedLoad64(ptr);
b = absl::base_internal::UnalignedLoad64(ptr + len - 8);
} else if (len > 3) {
a = absl::base_internal::UnalignedLoad32(ptr);
b = absl::base_internal::UnalignedLoad32(ptr + len - 4);
} else {
a = static_cast<uint64_t>((ptr[0] << 8) | ptr[len - 1]);
b = static_cast<uint64_t>(ptr[len >> 1]);
}
return Mix(a ^ salt[1] ^ starting_length, b ^ current_state);
}
}
ABSL_NAMESPACE_END
} | #include "absl/hash/internal/low_level_hash.h"
#include <cinttypes>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/escaping.h"
#define UPDATE_GOLDEN 0
namespace {
static const uint64_t kSalt[5] = {0xa0761d6478bd642f, 0xe7037ed1a0b428dbl,
0x8ebc6af09c88c6e3, 0x589965cc75374cc3l,
0x1d8e4e27c47d124f};
TEST(LowLevelHashTest, VerifyGolden) {
constexpr size_t kNumGoldenOutputs = 134;
static struct {
absl::string_view base64_data;
uint64_t seed;
} cases[] = {
{"", uint64_t{0xec42b7ab404b8acb}},
{"ICAg", uint64_t{0}},
{"YWFhYQ==", uint64_t{0}},
{"AQID", uint64_t{0}},
{"AQIDBA==", uint64_t{0}},
{"dGhpcmRfcGFydHl8d3loYXNofDY0", uint64_t{0}},
{"Zw==", uint64_t{0xeeee074043a3ee0f}},
{"xmk=", uint64_t{0x857902089c393de}},
{"c1H/", uint64_t{0x993df040024ca3af}},
{"SuwpzQ==", uint64_t{0xc4e4c2acea740e96}},
{"uqvy++M=", uint64_t{0x6a214b3db872d0cf}},
{"RnzCVPgb", uint64_t{0x44343db6a89dba4d}},
{"6OeNdlouYw==", uint64_t{0x77b5d6d1ae1dd483}},
{"M5/JmmYyDbc=", uint64_t{0x89ab8ecb44d221f1}},
{"MVijWiVdBRdY", uint64_t{0x60244b17577ca81b}},
{"6V7Uq7LNxpu0VA==", uint64_t{0x59a08dcee0717067}},
{"EQ6CdEEhPdyHcOk=", uint64_t{0xf5f20db3ade57396}},
{"PqFB4fxnPgF+l+rc", uint64_t{0xbf8dee0751ad3efb}},
{"a5aPOFwq7LA7+zKvPA==", uint64_t{0x6b7a06b268d63e30}},
{"VOwY21wCGv5D+/qqOvs=", uint64_t{0xb8c37f0ae0f54c82}},
{"KdHmBTx8lHXYvmGJ+Vy7", uint64_t{0x9fcbed0c38e50eef}},
{"qJkPlbHr8bMF7/cA6aE65Q==", uint64_t{0x2af4bade1d8e3a1d}},
{"ygvL0EhHZL0fIx6oHHtkxRQ=", uint64_t{0x714e3aa912da2f2c}},
{"c1rFXkt5YztwZCQRngncqtSs", uint64_t{0xf5ee75e3cbb82c1c}},
{"8hsQrzszzeNQSEcVXLtvIhm6mw==", uint64_t{0x620e7007321b93b9}},
{"ffUL4RocfyP4KfikGxO1yk7omDI=", uint64_t{0xc08528cac2e551fc}},
{"OOB5TT00vF9Od/rLbAWshiErqhpV", uint64_t{0x6a1debf9cc3ad39}},
{"or5wtXM7BFzTNpSzr+Lw5J5PMhVJ/Q==", uint64_t{0x7e0a3c88111fc226}},
{"gk6pCHDUsoopVEiaCrzVDhioRKxb844=", uint64_t{0x1301fef15df39edb}},
{"TNctmwlC5QbEM6/No4R/La3UdkfeMhzs", uint64_t{0x64e181f3d5817ab}},
{"SsQw9iAjhWz7sgcE9OwLuSC6hsM+BfHs2Q==", uint64_t{0xafafc44961078ecb}},
{"ZzO3mVCj4xTT2TT3XqDyEKj2BZQBvrS8RHg=", uint64_t{0x4f7bb45549250094}},
{"+klp5iPQGtppan5MflEls0iEUzqU+zGZkDJX", uint64_t{0xa30061abaa2818c}},
{"RO6bvOnlJc8I9eniXlNgqtKy0IX6VNg16NRmgg==",
uint64_t{0xd902ee3e44a5705f}},
{"ZJjZqId1ZXBaij9igClE3nyliU5XWdNRrayGlYA=", uint64_t{0x316d36da516f583}},
{"7BfkhfGMDGbxfMB8uyL85GbaYQtjr2K8g7RpLzr/",
uint64_t{0x402d83f9f834f616}},
{"rycWk6wHH7htETQtje9PidS2YzXBx+Qkg2fY7ZYS7A==",
uint64_t{0x9c604164c016b72c}},
{"RTkC2OUK+J13CdGllsH0H5WqgspsSa6QzRZouqx6pvI=",
uint64_t{0x3f4507e01f9e73ba}},
{"tKjKmbLCNyrLCM9hycOAXm4DKNpM12oZ7dLTmUx5iwAi",
uint64_t{0xc3fe0d5be8d2c7c7}},
{"VprUGNH+5NnNRaORxgH/ySrZFQFDL+4VAodhfBNinmn8cg==",
uint64_t{0x531858a40bfa7ea1}},
{"gc1xZaY+q0nPcUvOOnWnT3bqfmT/geth/f7Dm2e/DemMfk4=",
uint64_t{0x86689478a7a7e8fa}},
{"Mr35fIxqx1ukPAL0su1yFuzzAU3wABCLZ8+ZUFsXn47UmAph",
uint64_t{0x4ec948b8e7f27288}},
{"A9G8pw2+m7+rDtWYAdbl8tb2fT7FFo4hLi2vAsa5Y8mKH3CX3g==",
uint64_t{0xce46c7213c10032}},
{"DFaJGishGwEHDdj9ixbCoaTjz9KS0phLNWHVVdFsM93CvPft3hM=",
uint64_t{0xf63e96ee6f32a8b6}},
{"7+Ugx+Kr3aRNgYgcUxru62YkTDt5Hqis+2po81hGBkcrJg4N0uuy",
uint64_t{0x1cfe85e65fc5225}},
{"H2w6O8BUKqu6Tvj2xxaecxEI2wRgIgqnTTG1WwOgDSINR13Nm4d4Vg==",
uint64_t{0x45c474f1cee1d2e8}},
{"1XBMnIbqD5jy65xTDaf6WtiwtdtQwv1dCVoqpeKj+7cTR1SaMWMyI04=",
uint64_t{0x6e024e14015f329c}},
{"znZbdXG2TSFrKHEuJc83gPncYpzXGbAebUpP0XxzH0rpe8BaMQ17nDbt",
uint64_t{0x760c40502103ae1c}},
{"ylu8Atu13j1StlcC1MRMJJXIl7USgDDS22HgVv0WQ8hx/8pNtaiKB17hCQ==",
uint64_t{0x17fd05c3c560c320}},
{"M6ZVVzsd7vAvbiACSYHioH/440dp4xG2mLlBnxgiqEvI/aIEGpD0Sf4VS0g=",
uint64_t{0x8b34200a6f8e90d9}},
{"li3oFSXLXI+ubUVGJ4blP6mNinGKLHWkvGruun85AhVn6iuMtocbZPVhqxzn",
uint64_t{0x6be89e50818bdf69}},
{"kFuQHuUCqBF3Tc3hO4dgdIp223ShaCoog48d5Do5zMqUXOh5XpGK1t5XtxnfGA==",
uint64_t{0xfb389773315b47d8}},
{"jWmOad0v0QhXVJd1OdGuBZtDYYS8wBVHlvOeTQx9ZZnm8wLEItPMeihj72E0nWY=",
uint64_t{0x4f2512a23f61efee}},
{"z+DHU52HaOQdW4JrZwDQAebEA6rm13Zg/9lPYA3txt3NjTBqFZlOMvTRnVzRbl23",
uint64_t{0x59ccd92fc16c6fda}},
{"MmBiGDfYeTayyJa/tVycg+rN7f9mPDFaDc+23j0TlW9094er0ADigsl4QX7V3gG/qw==",
uint64_t{0x25c5a7f5bd330919}},
{"774RK+9rOL4iFvs1q2qpo/JVc/I39buvNjqEFDtDvyoB0FXxPI2vXqOrk08VPfIHkmU=",
uint64_t{0x51df4174d34c97d7}},
{"+slatXiQ7/2lK0BkVUI1qzNxOOLP3I1iK6OfHaoxgqT63FpzbElwEXSwdsryq3UlHK0I",
uint64_t{0x80ce6d76f89cb57}},
{"64mVTbQ47dHjHlOHGS/hjJwr/"
"K2frCNpn87exOqMzNUVYiPKmhCbfS7vBUce5tO6Ec9osQ==",
uint64_t{0x20961c911965f684}},
{"fIsaG1r530SFrBqaDj1kqE0AJnvvK8MNEZbII2Yw1OK77v0V59xabIh0B5axaz/"
"+a2V5WpA=",
uint64_t{0x4e5b926ec83868e7}},
{"PGih0zDEOWCYGxuHGDFu9Ivbff/"
"iE7BNUq65tycTR2R76TerrXALRosnzaNYO5fjFhTi+CiS",
uint64_t{0x3927b30b922eecef}},
{"RnpA/"
"zJnEnnLjmICORByRVb9bCOgxF44p3VMiW10G7PvW7IhwsWajlP9kIwNA9FjAD2GoQHk2Q="
"=",
uint64_t{0xbd0291284a49b61c}},
{"qFklMceaTHqJpy2qavJE+EVBiNFOi6OxjOA3LeIcBop1K7w8xQi3TrDk+"
"BrWPRIbfprszSaPfrI=",
uint64_t{0x73a77c575bcc956}},
{"cLbfUtLl3EcQmITWoTskUR8da/VafRDYF/ylPYwk7/"
"zazk6ssyrzxMN3mmSyvrXR2yDGNZ3WDrTT",
uint64_t{0x766a0e2ade6d09a6}},
{"s/"
"Jf1+"
"FbsbCpXWPTUSeWyMH6e4CvTFvPE5Fs6Z8hvFITGyr0dtukHzkI84oviVLxhM1xMxrMAy1db"
"w==",
uint64_t{0x2599f4f905115869}},
{"FvyQ00+j7nmYZVQ8hI1Edxd0AWplhTfWuFGiu34AK5X8u2hLX1bE97sZM0CmeLe+"
"7LgoUT1fJ/axybE=",
uint64_t{0xd8256e5444d21e53}},
{"L8ncxMaYLBH3g9buPu8hfpWZNlOF7nvWLNv9IozH07uQsIBWSKxoPy8+"
"LW4tTuzC6CIWbRGRRD1sQV/4",
uint64_t{0xf664a91333fb8dfd}},
{"CDK0meI07yrgV2kQlZZ+"
"wuVqhc2NmzqeLH7bmcA6kchsRWFPeVF5Wqjjaj556ABeUoUr3yBmfU3kWOakkg==",
uint64_t{0x9625b859be372cd1}},
{"d23/vc5ONh/"
"HkMiq+gYk4gaCNYyuFKwUkvn46t+dfVcKfBTYykr4kdvAPNXGYLjM4u1YkAEFpJP+"
"nX7eOvs=",
uint64_t{0x7b99940782e29898}},
{"NUR3SRxBkxTSbtQORJpu/GdR6b/h6sSGfsMj/KFd99ahbh+9r7LSgSGmkGVB/"
"mGoT0pnMTQst7Lv2q6QN6Vm",
uint64_t{0x4fe12fa5383b51a8}},
{"2BOFlcI3Z0RYDtS9T9Ie9yJoXlOdigpPeeT+CRujb/"
"O39Ih5LPC9hP6RQk1kYESGyaLZZi3jtabHs7DiVx/VDg==",
uint64_t{0xe2ccb09ac0f5b4b6}},
{"FF2HQE1FxEvWBpg6Z9zAMH+Zlqx8S1JD/"
"wIlViL6ZDZY63alMDrxB0GJQahmAtjlm26RGLnjW7jmgQ4Ie3I+014=",
uint64_t{0x7d0a37adbd7b753b}},
{"tHmO7mqVL/PX11nZrz50Hc+M17Poj5lpnqHkEN+4bpMx/"
"YGbkrGOaYjoQjgmt1X2QyypK7xClFrjeWrCMdlVYtbW",
uint64_t{0xd3ae96ef9f7185f2}},
{"/WiHi9IQcxRImsudkA/KOTqGe8/"
"gXkhKIHkjddv5S9hi02M049dIK3EUyAEjkjpdGLUs+BN0QzPtZqjIYPOgwsYE9g==",
uint64_t{0x4fb88ea63f79a0d8}},
{"qds+1ExSnU11L4fTSDz/QE90g4Jh6ioqSh3KDOTOAo2pQGL1k/"
"9CCC7J23YF27dUTzrWsCQA2m4epXoCc3yPHb3xElA=",
uint64_t{0xed564e259bb5ebe9}},
{"8FVYHx40lSQPTHheh08Oq0/"
"pGm2OlG8BEf8ezvAxHuGGdgCkqpXIueJBF2mQJhTfDy5NncO8ntS7vaKs7sCNdDaNGOEi",
uint64_t{0x3e3256b60c428000}},
{"4ZoEIrJtstiCkeew3oRzmyJHVt/pAs2pj0HgHFrBPztbQ10NsQ/"
"lM6DM439QVxpznnBSiHMgMQJhER+70l72LqFTO1JiIQ==",
uint64_t{0xfb05bad59ec8705}},
{"hQPtaYI+wJyxXgwD5n8jGIKFKaFA/"
"P83KqCKZfPthnjwdOFysqEOYwAaZuaaiv4cDyi9TyS8hk5cEbNP/jrI7q6pYGBLbsM=",
uint64_t{0xafdc251dbf97b5f8}},
{"S4gpMSKzMD7CWPsSfLeYyhSpfWOntyuVZdX1xSBjiGvsspwOZcxNKCRIOqAA0moUfOh3I5+"
"juQV4rsqYElMD/gWfDGpsWZKQ",
uint64_t{0x10ec9c92ddb5dcbc}},
{"oswxop+"
"bthuDLT4j0PcoSKby4LhF47ZKg8K17xxHf74UsGCzTBbOz0MM8hQEGlyqDT1iUiAYnaPaUp"
"L2mRK0rcIUYA4qLt5uOw==",
uint64_t{0x9a767d5822c7dac4}},
{"0II/"
"697p+"
"BtLSjxj5989OXI004TogEb94VUnDzOVSgMXie72cuYRvTFNIBgtXlKfkiUjeqVpd4a+"
"n5bxNOD1TGrjQtzKU5r7obo=",
uint64_t{0xee46254080d6e2db}},
{"E84YZW2qipAlMPmctrg7TKlwLZ68l4L+c0xRDUfyyFrA4MAti0q9sHq3TDFviH0Y+"
"Kq3tEE5srWFA8LM9oomtmvm5PYxoaarWPLc",
uint64_t{0xbbb669588d8bf398}},
{"x3pa4HIElyZG0Nj7Vdy9IdJIR4izLmypXw5PCmZB5y68QQ4uRaVVi3UthsoJROvbjDJkP2D"
"Q6L/eN8pFeLFzNPKBYzcmuMOb5Ull7w==",
uint64_t{0xdc2afaa529beef44}},
{"jVDKGYIuWOP/"
"QKLdd2wi8B2VJA8Wh0c8PwrXJVM8FOGM3voPDVPyDJOU6QsBDPseoR8uuKd19OZ/"
"zAvSCB+zlf6upAsBlheUKgCfKww=",
uint64_t{0xf1f67391d45013a8}},
{"mkquunhmYe1aR2wmUz4vcvLEcKBoe6H+kjUok9VUn2+eTSkWs4oDDtJvNCWtY5efJwg/"
"j4PgjRYWtqnrCkhaqJaEvkkOwVfgMIwF3e+d",
uint64_t{0x16fce2b8c65a3429}},
{"fRelvKYonTQ+s+rnnvQw+JzGfFoPixtna0vzcSjiDqX5s2Kg2
"UGrK+AVCyMUhO98WoB1DDbrsOYSw2QzrcPe0+3ck9sePvb+Q/IRaHbw==",
uint64_t{0xf4b096699f49fe67}},
{"DUwXFJzagljo44QeJ7/"
"6ZKw4QXV18lhkYT2jglMr8WB3CHUU4vdsytvw6AKv42ZcG6fRkZkq9fpnmXy6xG0aO3WPT1"
"eHuyFirAlkW+zKtwg=",
uint64_t{0xca584c4bc8198682}},
{"cYmZCrOOBBongNTr7e4nYn52uQUy2mfe48s50JXx2AZ6cRAt/"
"xRHJ5QbEoEJOeOHsJyM4nbzwFm++SlT6gFZZHJpkXJ92JkR86uS/eV1hJUR",
uint64_t{0xed269fc3818b6aad}},
{"EXeHBDfhwzAKFhsMcH9+2RHwV+mJaN01+9oacF6vgm8mCXRd6jeN9U2oAb0of5c5cO4i+"
"Vb/LlHZSMI490SnHU0bejhSCC2gsC5d2K30ER3iNA==",
uint64_t{0x33f253cbb8fe66a8}},
{"FzkzRYoNjkxFhZDso94IHRZaJUP61nFYrh5MwDwv9FNoJ5jyNCY/"
"eazPZk+tbmzDyJIGw2h3GxaWZ9bSlsol/vK98SbkMKCQ/wbfrXRLcDzdd/8=",
uint64_t{0xd0b76b2c1523d99c}},
{"Re4aXISCMlYY/XsX7zkIFR04ta03u4zkL9dVbLXMa/q6hlY/CImVIIYRN3VKP4pnd0AUr/"
"ugkyt36JcstAInb4h9rpAGQ7GMVOgBniiMBZ/MGU7H",
uint64_t{0xfd28f0811a2a237f}},
{"ueLyMcqJXX+MhO4UApylCN9WlTQ+"
"ltJmItgG7vFUtqs2qNwBMjmAvr5u0sAKd8jpzV0dDPTwchbIeAW5zbtkA2NABJV6hFM48ib"
"4/J3A5mseA3cS8w==",
uint64_t{0x6261fb136482e84}},
{"6Si7Yi11L+jZMkwaN+GUuzXMrlvEqviEkGOilNq0h8TdQyYKuFXzkYc/"
"q74gP3pVCyiwz9KpVGMM9vfnq36riMHRknkmhQutxLZs5fbmOgEO69HglCU=",
uint64_t{0x458efc750bca7c3a}},
{"Q6AbOofGuTJOegPh9Clm/"
"9crtUMQqylKrTc1fhfJo1tqvpXxhU4k08kntL1RG7woRnFrVh2UoMrL1kjin+s9CanT+"
"y4hHwLqRranl9FjvxfVKm3yvg68",
uint64_t{0xa7e69ff84e5e7c27}},
{"ieQEbIPvqY2YfIjHnqfJiO1/MIVRk0RoaG/WWi3kFrfIGiNLCczYoklgaecHMm/"
"1sZ96AjO+a5stQfZbJQwS7Sc1ODABEdJKcTsxeW2hbh9A6CFzpowP1A==",
uint64_t{0x3c59bfd0c29efe9e}},
{"zQUv8hFB3zh2GGl3KTvCmnfzE+"
"SUgQPVaSVIELFX5H9cE3FuVFGmymkPQZJLAyzC90Cmi8GqYCvPqTuAAB
"XTJxy4bCcVArgZG9zJXpjowpNBfr3ngWrSE=",
uint64_t{0x10befacc6afd298d}},
{"US4hcC1+op5JKGC7eIs8CUgInjKWKlvKQkapulxW262E/"
"B2ye79QxOexf188u2mFwwe3WTISJHRZzS61IwljqAWAWoBAqkUnW8SHmIDwHUP31J0p5sGd"
"P47L",
uint64_t{0x41d5320b0a38efa7}},
{"9bHUWFna2LNaGF6fQLlkx1Hkt24nrkLE2CmFdWgTQV3FFbUe747SSqYw6ebpTa07MWSpWRP"
"sHesVo2B9tqHbe7eQmqYebPDFnNqrhSdZwFm9arLQVs+7a3Ic6A==",
uint64_t{0x58db1c7450fe17f3}},
{"Kb3DpHRUPhtyqgs3RuXjzA08jGb59hjKTOeFt1qhoINfYyfTt2buKhD6YVffRCPsgK9SeqZ"
"qRPJSyaqsa0ovyq1WnWW8jI/NhvAkZTVHUrX2pC+cD3OPYT05Dag=",
uint64_t{0x6098c055a335b7a6}},
{"gzxyMJIPlU+bJBwhFUCHSofZ/"
"319LxqMoqnt3+L6h2U2+ZXJCSsYpE80xmR0Ta77Jq54o92SMH87HV8dGOaCTuAYF+"
"lDL42SY1P316Cl0sZTS2ow3ZqwGbcPNs/1",
uint64_t{0x1bbacec67845a801}},
{"uR7V0TW+FGVMpsifnaBAQ3IGlr1wx5sKd7TChuqRe6OvUXTlD4hKWy8S+"
"8yyOw8lQabism19vOQxfmocEOW/"
"vzY0pEa87qHrAZy4s9fH2Bltu8vaOIe+agYohhYORQ==",
uint64_t{0xc419cfc7442190}},
{"1UR5eoo2aCwhacjZHaCh9bkOsITp6QunUxHQ2SfeHv0imHetzt/"
"Z70mhyWZBalv6eAx+YfWKCUib2SHDtz/"
"A2dc3hqUWX5VfAV7FQsghPUAtu6IiRatq4YSLpDvKZBQ=",
uint64_t{0xc95e510d94ba270c}},
{"opubR7H63BH7OtY+Avd7QyQ25UZ8kLBdFDsBTwZlY6gA/"
"u+x+"
"czC9AaZMgmQrUy15DH7YMGsvdXnviTtI4eVI4aF1H9Rl3NXMKZgwFOsdTfdcZeeHVRzBBKX"
"8jUfh1il",
uint64_t{0xff1ae05c98089c3f}},
{"DC0kXcSXtfQ9FbSRwirIn5tgPri0sbzHSa78aDZVDUKCMaBGyFU6BmrulywYX8yzvwprdLs"
"oOwTWN2wMjHlPDqrvVHNEjnmufRDblW+nSS+xtKNs3N5xsxXdv6JXDrAB/Q==",
uint64_t{0x90c02b8dceced493}},
{"BXRBk+3wEP3Lpm1y75wjoz+PgB0AMzLe8tQ1AYU2/"
"oqrQB2YMC6W+9QDbcOfkGbeH+b7IBkt/"
"gwCMw2HaQsRFEsurXtcQ3YwRuPz5XNaw5NAvrNa67Fm7eRzdE1+hWLKtA8=",
uint64_t{0x9f8a76697ab1aa36}},
{"RRBSvEGYnzR9E45Aps/+WSnpCo/X7gJLO4DRnUqFrJCV/kzWlusLE/"
"6ZU6RoUf2ROwcgEvUiXTGjLs7ts3t9SXnJHxC1KiOzxHdYLMhVvgNd3hVSAXODpKFSkVXND"
"55G2L1W",
uint64_t{0x6ba1bf3d811a531d}},
{"jeh6Qazxmdi57pa9S3XSnnZFIRrnc6s8QLrah5OX3SB/V2ErSPoEAumavzQPkdKF1/"
"SfvmdL+qgF1C+Yawy562QaFqwVGq7+tW0yxP8FStb56ZRgNI4IOmI30s1Ei7iops9Uuw==",
uint64_t{0x6a418974109c67b4}},
{"6QO5nnDrY2/"
"wrUXpltlKy2dSBcmK15fOY092CR7KxAjNfaY+"
"aAmtWbbzQk3MjBg03x39afSUN1fkrWACdyQKRaGxgwq6MGNxI6W+8DLWJBHzIXrntrE/"
"ml6fnNXEpxplWJ1vEs4=",
uint64_t{0x8472f1c2b3d230a3}},
{"0oPxeEHhqhcFuwonNfLd5jF3RNATGZS6NPoS0WklnzyokbTqcl4BeBkMn07+fDQv83j/"
"BpGUwcWO05f3+DYzocfnizpFjLJemFGsls3gxcBYxcbqWYev51tG3lN9EvRE+X9+Pwww",
uint64_t{0x5e06068f884e73a7}},
{"naSBSjtOKgAOg8XVbR5cHAW3Y+QL4Pb/JO9/"
"oy6L08wvVRZqo0BrssMwhzBP401Um7A4ppAupbQeJFdMrysY34AuSSNvtNUy5VxjNECwiNt"
"gwYHw7yakDUv8WvonctmnoSPKENegQg==",
uint64_t{0x55290b1a8f170f59}},
{"vPyl8DxVeRe1OpilKb9KNwpGkQRtA94UpAHetNh+"
"95V7nIW38v7PpzhnTWIml5kw3So1Si0TXtIUPIbsu32BNhoH7QwFvLM+"
"JACgSpc5e3RjsL6Qwxxi11npwxRmRUqATDeMUfRAjxg=",
uint64_t{0x5501cfd83dfe706a}},
{"QC9i2GjdTMuNC1xQJ74ngKfrlA4w3o58FhvNCltdIpuMhHP1YsDA78scQPLbZ3OCUgeQguY"
"f/vw6zAaVKSgwtaykqg5ka/4vhz4hYqWU5ficdXqClHl+zkWEY26slCNYOM5nnDlly8Cj",
uint64_t{0xe43ed13d13a66990}},
{"7CNIgQhAHX27nxI0HeB5oUTnTdgKpRDYDKwRcXfSFGP1XeT9nQF6WKCMjL1tBV6x7KuJ91G"
"Zz11F4c+8s+MfqEAEpd4FHzamrMNjGcjCyrVtU6y+7HscMVzr7Q/"
"ODLcPEFztFnwjvCjmHw==",
uint64_t{0xdf43bc375cf5283f}},
{"Qa/hC2RPXhANSospe+gUaPfjdK/yhQvfm4cCV6/pdvCYWPv8p1kMtKOX3h5/"
"8oZ31fsmx4Axphu5qXJokuhZKkBUJueuMpxRyXpwSWz2wELx5glxF7CM0Fn+"
"OevnkhUn5jsPlG2r5jYlVn8=",
uint64_t{0x8112b806d288d7b5}},
{"kUw/0z4l3a89jTwN5jpG0SHY5km/"
"IVhTjgM5xCiPRLncg40aqWrJ5vcF891AOq5hEpSq0bUCJUMFXgct7kvnys905HjerV7Vs1G"
"y84tgVJ70/2+pAZTsB/PzNOE/G6sOj4+GbTzkQu819OLB",
uint64_t{0xd52a18abb001cb46}},
{"VDdfSDbO8Tdj3T5W0XM3EI7iHh5xpIutiM6dvcJ/fhe23V/srFEkDy5iZf/"
"VnA9kfi2C79ENnFnbOReeuZW1b3MUXB9lgC6U4pOTuC+"
"jHK3Qnpyiqzj7h3ISJSuo2pob7vY6VHZo6Fn7exEqHg==",
uint64_t{0xe12b76a2433a1236}},
{"Ldfvy3ORdquM/R2fIkhH/ONi69mcP1AEJ6n/"
"oropwecAsLJzQSgezSY8bEiEs0VnFTBBsW+RtZY6tDj03fnb3amNUOq1b7jbqyQkL9hpl+"
"2Z2J8IaVSeownWl+bQcsR5/xRktIMckC5AtF4YHfU=",
uint64_t{0x175bf7319cf1fa00}},
{"BrbNpb42+"
"VzZAjJw6QLirXzhweCVRfwlczzZ0VX2xluskwBqyfnGovz5EuX79JJ31VNXa5hTkAyQat3l"
"YKRADTdAdwE5PqM1N7YaMqqsqoAAAeuYVXuk5eWCykYmClNdSspegwgCuT+403JigBzi",
uint64_t{0xd63d57b3f67525ae}},
{"gB3NGHJJvVcuPyF0ZSvHwnWSIfmaI7La24VMPQVoIIWF7Z74NltPZZpx2f+cocESM+"
"ILzQW9p+BC8x5IWz7N4Str2WLGKMdgmaBfNkEhSHQDU0IJEOnpUt0HmjhFaBlx0/"
"LTmhua+rQ6Wup8ezLwfg==",
uint64_t{0x933faea858832b73}},
{"hTKHlRxx6Pl4gjG+6ksvvj0CWFicUg3WrPdSJypDpq91LUWRni2KF6+"
"81ZoHBFhEBrCdogKqeK+hy9bLDnx7g6rAFUjtn1+cWzQ2YjiOpz4+"
"ROBB7lnwjyTGWzJD1rXtlso1g2qVH8XJVigC5M9AIxM=",
uint64_t{0x53d061e5f8e7c04f}},
{"IWQBelSQnhrr0F3BhUpXUIDauhX6f95Qp+A0diFXiUK7irwPG1oqBiqHyK/SH/"
"9S+"
"rln9DlFROAmeFdH0OCJi2tFm4afxYzJTFR4HnR4cG4x12JqHaZLQx6iiu6CE3rtWBVz99oA"
"wCZUOEXIsLU24o2Y",
uint64_t{0xdb4124556dd515e0}},
{"TKo+l+"
"1dOXdLvIrFqeLaHdm0HZnbcdEgOoLVcGRiCbAMR0j5pIFw8D36tefckAS1RCFOH5IgP8yiF"
"T0Gd0a2hI3+"
"fTKA7iK96NekxWeoeqzJyctc6QsoiyBlkZerRxs5RplrxoeNg29kKDTM0K94mnhD9g==",
uint64_t{0x4fb31a0dd681ee71}},
{"YU4e7G6EfQYvxCFoCrrT0EFgVLHFfOWRTJQJ5gxM3G2b+"
"1kJf9YPrpsxF6Xr6nYtS8reEEbDoZJYqnlk9lXSkVArm88Cqn6d25VCx3+"
"49MqC0trIlXtb7SXUUhwpJK16T0hJUfPH7s5cMZXc6YmmbFuBNPE=",
uint64_t{0x27cc72eefa138e4c}},
{"/I/"
"eImMwPo1U6wekNFD1Jxjk9XQVi1D+"
"FPdqcHifYXQuP5aScNQfxMAmaPR2XhuOQhADV5tTVbBKwCDCX4E3jcDNHzCiPvViZF1W27t"
"xaf2BbFQdwKrNCmrtzcluBFYu0XZfc7RU1RmxK/RtnF1qHsq/O4pp",
uint64_t{0x44bc2dfba4bd3ced}},
{"CJTT9WGcY2XykTdo8KodRIA29qsqY0iHzWZRjKHb9alwyJ7RZAE3V5Juv4MY3MeYEr1EPCC"
"MxO7yFXqT8XA8YTjaMp3bafRt17Pw8JC4iKJ1zN+WWKOESrj+"
"3aluGQqn8z1EzqY4PH7rLG575PYeWsP98BugdA==",
uint64_t{0x242da1e3a439bed8}},
{"ZlhyQwLhXQyIUEnMH/"
"AEW27vh9xrbNKJxpWGtrEmKhd+nFqAfbeNBQjW0SfG1YI0xQkQMHXjuTt4P/"
"EpZRtA47ibZDVS8TtaxwyBjuIDwqcN09eCtpC+Ls+"
"vWDTLmBeDM3u4hmzz4DQAYsLiZYSJcldg9Q3wszw=",
uint64_t{0xdc559c746e35c139}},
{"v2KU8y0sCrBghmnm8lzGJlwo6D6ObccAxCf10heoDtYLosk4ztTpLlpSFEyu23MLA1tJkcg"
"Rko04h19QMG0mOw/"
"wc93EXAweriBqXfvdaP85sZABwiKO+6rtS9pacRVpYYhHJeVTQ5NzrvBvi1huxAr+"
"xswhVMfL",
uint64_t{0xd0b0350275b9989}},
{"QhKlnIS6BuVCTQsnoE67E/"
"yrgogE8EwO7xLaEGei26m0gEU4OksefJgppDh3X0x0Cs78Dr9IHK5b977CmZlrTRmwhlP8p"
"M+UzXPNRNIZuN3ntOum/QhUWP8SGpirheXENWsXMQ/"
"nxtxakyEtrNkKk471Oov9juP8oQ==",
uint64_t{0xb04489e41d17730c}},
{"/ZRMgnoRt+Uo6fUPr9FqQvKX7syhgVqWu+"
"WUSsiQ68UlN0efSP6Eced5gJZL6tg9gcYJIkhjuQNITU0Q3TjVAnAcobgbJikCn6qZ6pRxK"
"BY4MTiAlfGD3T7R7hwJwx554MAy++Zb/YUFlnCaCJiwQMnowF7aQzwYFCo=",
uint64_t{0x2217285eb4572156}},
{"NB7tU5fNE8nI+SXGfipc7sRkhnSkUF1krjeo6k+8FITaAtdyz+"
"o7mONgXmGLulBPH9bEwyYhKNVY0L+njNQrZ9YC2aXsFD3PdZsxAFaBT3VXEzh+"
"NGBTjDASNL3mXyS8Yv1iThGfHoY7T4aR0NYGJ+k+pR6f+KrPC96M",
uint64_t{0x12c2e8e68aede73b}},
{"8T6wrqCtEO6/rwxF6lvMeyuigVOLwPipX/FULvwyu+1wa5sQGav/"
"2FsLHUVn6cGSi0LlFwLewGHPFJDLR0u4t7ZUyM
"x6da0sWgOa5hzDqjsVGmjxEHXiaXKW3i4iSZNuxoNbMQkIbVML+"
"DkYu9ND0O2swg4itGeVSzXA==",
uint64_t{0x4d612125bdc4fd00}},
{"Ntf1bMRdondtMv1CYr3G80iDJ4WSAlKy5H34XdGruQiCrnRGDBa+"
"eUi7vKp4gp3BBcVGl8eYSasVQQjn7MLvb3BjtXx6c/"
"bCL7JtpzQKaDnPr9GWRxpBXVxKREgMM7d8lm35EODv0w+"
"hQLfVSh8OGs7fsBb68nNWPLeeSOo=",
uint64_t{0x81826b553954464e}},
{"VsSAw72Ro6xks02kaiLuiTEIWBC5bgqr4WDnmP8vglXzAhixk7td926rm9jNimL+"
"kroPSygZ9gl63aF5DCPOACXmsbmhDrAQuUzoh9ZKhWgElLQsrqo1KIjWoZT5b5QfVUXY9lS"
"IBg3U75SqORoTPq7HalxxoIT5diWOcJQi",
uint64_t{0xc2e5d345dc0ddd2d}},
{"j+loZ+C87+"
"bJxNVebg94gU0mSLeDulcHs84tQT7BZM2rzDSLiCNxUedHr1ZWJ9ejTiBa0dqy2I2ABc++"
"xzOLcv+
"O6xO+XOBhOWAQ+IHJVHf7wZnDxIXB8AUHsnjEISKj7823biqXjyP3g==",
uint64_t{0x3da6830a9e32631e}},
{"f3LlpcPElMkspNtDq5xXyWU62erEaKn7RWKlo540gR6mZsNpK1czV/"
"sOmqaq8XAQLEn68LKj6/"
"cFkJukxRzCa4OF1a7cCAXYFp9+wZDu0bw4y63qbpjhdCl8GO6Z2lkcXy7KOzbPE01ukg7+"
"gN+7uKpoohgAhIwpAKQXmX5xtd0=",
uint64_t{0xc9ae5c8759b4877a}},
};
#if defined(ABSL_IS_BIG_ENDIAN)
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0x4c34aacf38f6eee4, 0x88b1366815e50b88, 0x1a36bd0c6150fb9c,
0xa783aba8a67366c7, 0x5e4a92123ae874f2, 0x0cc9ecf27067ee9a,
0xbe77aa94940527f9, 0x7ea5c12f2669fe31, 0xa33eed8737d946b9,
0x310aec5b1340bb36, 0x354e400861c5d8ff, 0x15be98166adcf42f,
0xc51910b62a90ae51, 0x539d47fc7fdf6a1f, 0x3ebba9daa46eef93,
0xd96bcd3a9113c17f, 0xc78eaf6256ded15a, 0x98902ed321c2f0d9,
0x75a4ac96414b954a, 0x2cb90e00a39e307b, 0x46539574626c3637,
0x186ec89a2be3ff45, 0x972a3bf7531519d2, 0xa14df0d25922364b,
0xa351e19d22752109, 0x08bd311d8fed4f82, 0xea2b52ddc6af54f9,
0x5f20549941338336, 0xd43b07422dc2782e, 0x377c68e2acda4835,
0x1b31a0a663b1d7b3, 0x7388ba5d68058a1a, 0xe382794ea816f032,
0xd4c3fe7889276ee0, 0x2833030545582ea9, 0x554d32a55e55df32,
0x8d6d33d7e17b424d, 0xe51a193d03ae1e34, 0xabb6a80835bd66b3,
0x0e4ba5293f9ce9b7, 0x1ebd8642cb762cdf, 0xcb54b555850888ee,
0x1e4195e4717c701f, 0x6235a13937f6532a, 0xd460960741e845c0,
0x2a72168a2d6af7b1, 0x6be38fbbfc5b17de, 0x4ee97cffa0d0fb39,
0xfdf1119ad5e71a55, 0x0dff7f66b3070727, 0x812d791d6ed62744,
0x60962919074b70b8, 0x956fa5c7d6872547, 0xee892daa58aae597,
0xeeda546e998ee369, 0x454481f5eb9b1fa8, 0x1054394634c98b1b,
0x55bb425415f591fb, 0x9601fa97416232c4, 0xd7a18506519daad7,
0x90935cb5de039acf, 0xe64054c5146ed359, 0xe5b323fb1e866c09,
0x10a472555f5ba1bc, 0xe3c0cd57d26e0972, 0x7ca3db7c121da3e8,
0x7004a89c800bb466, 0x865f69c1a1ff7f39, 0xbe0edd48f0cf2b99,
0x10e5e4ba3cc400f5, 0xafc2b91a220eef50, 0x6f04a259289b24f1,
0x2179a8070e880ef0, 0xd6a9a3d023a740c2, 0x96e6d7954755d9b8,
0xc8e4bddecce5af9f, 0x93941f0fbc724c92, 0xbef5fb15bf76a479,
0x534dca8f5da86529, 0x70789790feec116b, 0x2a296e167eea1fe9,
0x54cb1efd2a3ec7ea, 0x357b43897dfeb9f7, 0xd1eda89bc7ff89d3,
0x434f2e10cbb83c98, 0xeec4cdac46ca69ce, 0xd46aafd52a303206,
0x4bf05968ff50a5c9, 0x71c533747a6292df, 0xa40bd0d16a36118c,
0x597b4ee310c395ab, 0xc5b3e3e386172583, 0x12ca0b32284e6c70,
0xb48995fadcf35630, 0x0646368454cd217d, 0xa21c168e40d765b5,
0x4260d3811337da30, 0xb72728a01cff78e4, 0x8586920947f4756f,
0xc21e5f853cae7dc1, 0xf08c9533be9de285, 0x72df06653b4256d6,
0xf7b7f937f8db1779, 0x976db27dd0418127, 0x9ce863b7bc3f9e00,
0xebb679854fcf3a0a, 0x2ccebabbcf1afa99, 0x44201d6be451dac5,
0xb4af71c0e9a537d1, 0xad8fe9bb33ed2681, 0xcb30128bb68df43b,
0x154d8328903e8d07, 0x5844276dabeabdff, 0xd99017d7d36d930b,
0xabb0b4774fb261ca, 0x0a43f075d62e67e0, 0x8df7b371355ada6b,
0xf4c7a40d06513dcf, 0x257a3615955a0372, 0x987ac410bba74c06,
0xa011a46f25a632a2, 0xa14384b963ddd995, 0xf51b6b8cf9d50ba7,
0x3acdb91ee3abf18d, 0x34e799be08920e8c, 0x8766748a31304b36,
0x0aa239d5d0092f2e, 0xadf473ed26628594, 0xc4094b798eb4b79b,
0xe04ee5f33cd130f4, 0x85045d098c341d46, 0xf936cdf115a890ec,
0x51d137b6d8d2eb4f, 0xd10738bb2fccc1ef,
};
#else
constexpr uint64_t kGolden[kNumGoldenOutputs] = {
0x4c34aacf38f6eee4, 0x88b1366815e50b88, 0x1a36bd0c6150fb9c,
0xa783aba8a67366c7, 0xbc89ebdc622314e4, 0x632bc3cfcc7544d8,
0xbe77aa94940527f9, 0x7ea5c12f2669fe31, 0xa33eed8737d946b9,
0x74d832ea11fd18ab, 0x49c0487486246cdc, 0x3fdd986c87ddb0a0,
0xac3fa52a64d7c09a, 0xbff0e330196e7ed2, 0x8c8138d3ad7d3cce,
0x968c7d4b48e93778, 0xa04c78d3a421f529, 0x8854bc9c3c3c0241,
0xcccfcdf5a41113fe, 0xe6fc63dc543d984d, 0x00a39ff89e903c05,
0xaf7e9da25f9a26f9, 0x6e269a13d01a43df, 0x846d2300ce2ecdf8,
0xe7ea8c8f08478260, 0x9a2db0d62f6232f3, 0x6f66c761d168c59f,
0x55f9feacaae82043, 0x518084043700f614, 0xb0c8cfc11bead99f,
0xe4a68fdab6359d80, 0x97b17caa8f92236e, 0x96edf5e8363643dc,
0x9b3fbcd8d5b254cd, 0x22a263621d9b3a8b, 0xde90bf6f81800a6d,
0x1b51cae38c2e9513, 0x689215b3c414ef21, 0x064dc85afae8f557,
0xa2f3a8b51f408378, 0x6907c197ec1f6a3b, 0xfe83a42ef5c1cf13,
0x9b8b1d8f7a20cc13, 0x1f1681d52ca895d0, 0xd7b1670bf28e0f96,
0xb32f20f82d8b038a, 0x6a61d030fb2f5253, 0x8eb2bb0bc29ebb39,
0x144f36f7a9eef95c, 0xe77aa47d29808d8c, 0xf14d34c1fc568bad,
0x9796dcd4383f3c73, 0xa2f685fc1be7225b, 0xf3791295b16068b1,
0xb6b8f63424618948, 0x8ac4fd587045db19, 0x7e2aec2c34feb72e,
0x72e135a6910ccbb1, 0x661ff16f3c904e6f, 0xdf92cf9d67ca092d,
0x98a9953d79722eef, 0xe0649ed2181d1707, 0xcd8b8478636a297b,
0x9516258709c8471b, 0xc703b675b51f4394, 0xdb740eae020139f3,
0x57d1499ac4212ff2, 0x355cc03713d43825, 0x0e71ac9b8b1e101e,
0x8029fa72258ff559, 0xa2159726b4c16a50, 0x04e61582fba43007,
0xdab25af835be8cce, 0x13510b1b184705ee, 0xabdbc9e53666fdeb,
0x94a788fcb8173cef, 0x750d5e031286e722, 0x02559e72f4f5b497,
0x7d6e0e5996a646fa, 0x66e871b73b014132, 0x2ec170083f8b784f,
0x34ac9540cfce3fd9, 0x75c5622c6aad1295, 0xf799a6bb2651acc1,
0x8f6bcd3145bdc452, 0xddd9d326eb584a04, 0x5411af1e3532f8dc,
0xeb34722f2ad0f509, 0x835bc952a82298cc, 0xeb3839ff60ea92ad,
0x70bddf1bcdc8a4bc, 0x4bfb3ee86fcde525, 0xc7b3b93b81dfa386,
0xe66db544d57997e8, 0xf68a1b83fd363187, 0xe9b99bec615b171b,
0x093fba04d04ad28a, 0xba6117ed4231a303, 0x594bef25f9d4e206,
0x0a8cba60578b8f67, 0x88f6c7ca10b06019, 0x32a74082aef17b08,
0xe758222f971e22df, 0x4af14ff4a593e51e, 0xdba651e16cb09044,
0x3f3ac837d181eaac, 0xa5589a3f89610c01, 0xd409a7c3a18d5643,
0x8a89444f82962f26, 0x22eb62a13b9771b9, 0xd3a617615256ddd8,
0x7089b990c4bba297, 0x7d752893783eac4f, 0x1f2fcbb79372c915,
0x67a4446b17eb9839, 0x70d11df5cae46788, 0x52621e1780b47d0f,
0xcf63b93a6e590ee6, 0xb6bc96b58ee064b8, 0x2587f8d635ca9c75,
0xc6bddd62ec5e5d01, 0x957398ad3009cdb7, 0x05b6890b20bcd0d3,
0xbe6e965ff837222e, 0x47383a87d2b04b1a, 0x7d42207e6d8d7950,
0x7e981ed12a7f4aa3, 0xdebb05b30769441a, 0xaac5d86f4ff76c49,
0x384f195ca3248331, 0xec4c4b855e909ca1, 0x6a7eeb5a657d73d5,
0x9efbebe2fa9c2791, 0x19e7fa0546900c4d,
};
#endif
#if UPDATE_GOLDEN
(void)kGolden;
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
std::string str;
ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str));
uint64_t h = absl::hash_internal::LowLevelHash(str.data(), str.size(),
cases[i].seed, kSalt);
printf("0x%016" PRIx64 ", ", h);
if (i % 3 == 2) {
printf("\n");
}
}
printf("\n\n\n");
EXPECT_FALSE(true);
#else
for (size_t i = 0; i < kNumGoldenOutputs; ++i) {
SCOPED_TRACE(::testing::Message()
<< "i = " << i << "; input = " << cases[i].base64_data);
std::string str;
ASSERT_TRUE(absl::Base64Unescape(cases[i].base64_data, &str));
EXPECT_EQ(absl::hash_internal::LowLevelHash(str.data(), str.size(),
cases[i].seed, kSalt),
kGolden[i]);
}
#endif
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/hash/internal/low_level_hash.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/hash/internal/low_level_hash_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
94052271-aa52-432f-9cbf-956e8d8a55fd | cpp | abseil/abseil-cpp | hash_function_defaults | absl/container/internal/hash_function_defaults.h | absl/container/internal/hash_function_defaults_test.cc | #ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
#include <cstddef>
#include <functional>
#include <memory>
#include <string>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/container/internal/common.h"
#include "absl/hash/hash.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#ifdef ABSL_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
template <class T, class E = void>
struct HashEq {
using Hash = absl::Hash<T>;
using Eq = std::equal_to<T>;
};
struct StringHash {
using is_transparent = void;
size_t operator()(absl::string_view v) const {
return absl::Hash<absl::string_view>{}(v);
}
size_t operator()(const absl::Cord& v) const {
return absl::Hash<absl::Cord>{}(v);
}
};
struct StringEq {
using is_transparent = void;
bool operator()(absl::string_view lhs, absl::string_view rhs) const {
return lhs == rhs;
}
bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
return lhs == rhs;
}
bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
return lhs == rhs;
}
bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
return lhs == rhs;
}
};
struct StringHashEq {
using Hash = StringHash;
using Eq = StringEq;
};
template <>
struct HashEq<std::string> : StringHashEq {};
template <>
struct HashEq<absl::string_view> : StringHashEq {};
template <>
struct HashEq<absl::Cord> : StringHashEq {};
#ifdef ABSL_HAVE_STD_STRING_VIEW
template <typename TChar>
struct BasicStringHash {
using is_transparent = void;
size_t operator()(std::basic_string_view<TChar> v) const {
return absl::Hash<std::basic_string_view<TChar>>{}(v);
}
};
template <typename TChar>
struct BasicStringEq {
using is_transparent = void;
bool operator()(std::basic_string_view<TChar> lhs,
std::basic_string_view<TChar> rhs) const {
return lhs == rhs;
}
};
template <typename TChar>
struct BasicStringHashEq {
using Hash = BasicStringHash<TChar>;
using Eq = BasicStringEq<TChar>;
};
template <>
struct HashEq<std::wstring> : BasicStringHashEq<wchar_t> {};
template <>
struct HashEq<std::wstring_view> : BasicStringHashEq<wchar_t> {};
template <>
struct HashEq<std::u16string> : BasicStringHashEq<char16_t> {};
template <>
struct HashEq<std::u16string_view> : BasicStringHashEq<char16_t> {};
template <>
struct HashEq<std::u32string> : BasicStringHashEq<char32_t> {};
template <>
struct HashEq<std::u32string_view> : BasicStringHashEq<char32_t> {};
#endif
template <class T>
struct HashEq<T*> {
struct Hash {
using is_transparent = void;
template <class U>
size_t operator()(const U& ptr) const {
return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
}
};
struct Eq {
using is_transparent = void;
template <class A, class B>
bool operator()(const A& a, const B& b) const {
return HashEq::ToPtr(a) == HashEq::ToPtr(b);
}
};
private:
static const T* ToPtr(const T* ptr) { return ptr; }
template <class U, class D>
static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
return ptr.get();
}
template <class U>
static const T* ToPtr(const std::shared_ptr<U>& ptr) {
return ptr.get();
}
};
template <class T, class D>
struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
template <class T>
struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
template <typename T, typename E = void>
struct HasAbslContainerHash : std::false_type {};
template <typename T>
struct HasAbslContainerHash<T, absl::void_t<typename T::absl_container_hash>>
: std::true_type {};
template <typename T, typename E = void>
struct HasAbslContainerEq : std::false_type {};
template <typename T>
struct HasAbslContainerEq<T, absl::void_t<typename T::absl_container_eq>>
: std::true_type {};
template <typename T, typename E = void>
struct AbslContainerEq {
using type = std::equal_to<>;
};
template <typename T>
struct AbslContainerEq<
T, typename std::enable_if_t<HasAbslContainerEq<T>::value>> {
using type = typename T::absl_container_eq;
};
template <typename T, typename E = void>
struct AbslContainerHash {
using type = void;
};
template <typename T>
struct AbslContainerHash<
T, typename std::enable_if_t<HasAbslContainerHash<T>::value>> {
using type = typename T::absl_container_hash;
};
template <typename T>
struct HashEq<T, typename std::enable_if_t<HasAbslContainerHash<T>::value>> {
using Hash = typename AbslContainerHash<T>::type;
using Eq = typename AbslContainerEq<T>::type;
static_assert(IsTransparent<Hash>::value,
"absl_container_hash must be transparent. To achieve it add a "
"`using is_transparent = void;` clause to this type.");
static_assert(IsTransparent<Eq>::value,
"absl_container_eq must be transparent. To achieve it add a "
"`using is_transparent = void;` clause to this type.");
};
template <class T>
using hash_default_hash = typename container_internal::HashEq<T>::Hash;
template <class T>
using hash_default_eq = typename container_internal::HashEq<T>::Eq;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/container/internal/hash_function_defaults.h"
#include <cstddef>
#include <functional>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/hash/hash.h"
#include "absl/random/random.h"
#include "absl/strings/cord.h"
#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
#ifdef ABSL_HAVE_STD_STRING_VIEW
#include <string_view>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
using ::testing::Types;
TEST(Eq, Int32) {
hash_default_eq<int32_t> eq;
EXPECT_TRUE(eq(1, 1u));
EXPECT_TRUE(eq(1, char{1}));
EXPECT_TRUE(eq(1, true));
EXPECT_TRUE(eq(1, double{1.1}));
EXPECT_FALSE(eq(1, char{2}));
EXPECT_FALSE(eq(1, 2u));
EXPECT_FALSE(eq(1, false));
EXPECT_FALSE(eq(1, 2.));
}
TEST(Hash, Int32) {
hash_default_hash<int32_t> hash;
auto h = hash(1);
EXPECT_EQ(h, hash(1u));
EXPECT_EQ(h, hash(char{1}));
EXPECT_EQ(h, hash(true));
EXPECT_EQ(h, hash(double{1.1}));
EXPECT_NE(h, hash(2u));
EXPECT_NE(h, hash(char{2}));
EXPECT_NE(h, hash(false));
EXPECT_NE(h, hash(2.));
}
enum class MyEnum { A, B, C, D };
TEST(Eq, Enum) {
hash_default_eq<MyEnum> eq;
EXPECT_TRUE(eq(MyEnum::A, MyEnum::A));
EXPECT_FALSE(eq(MyEnum::A, MyEnum::B));
}
TEST(Hash, Enum) {
hash_default_hash<MyEnum> hash;
for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) {
auto h = hash(e);
EXPECT_EQ(h, hash_default_hash<int>{}(static_cast<int>(e)));
EXPECT_NE(h, hash(MyEnum::D));
}
}
using StringTypes = ::testing::Types<std::string, absl::string_view>;
template <class T>
struct EqString : ::testing::Test {
hash_default_eq<T> key_eq;
};
TYPED_TEST_SUITE(EqString, StringTypes);
template <class T>
struct HashString : ::testing::Test {
hash_default_hash<T> hasher;
};
TYPED_TEST_SUITE(HashString, StringTypes);
TYPED_TEST(EqString, Works) {
auto eq = this->key_eq;
EXPECT_TRUE(eq("a", "a"));
EXPECT_TRUE(eq("a", absl::string_view("a")));
EXPECT_TRUE(eq("a", std::string("a")));
EXPECT_FALSE(eq("a", "b"));
EXPECT_FALSE(eq("a", absl::string_view("b")));
EXPECT_FALSE(eq("a", std::string("b")));
}
TYPED_TEST(HashString, Works) {
auto hash = this->hasher;
auto h = hash("a");
EXPECT_EQ(h, hash(absl::string_view("a")));
EXPECT_EQ(h, hash(std::string("a")));
EXPECT_NE(h, hash(absl::string_view("b")));
EXPECT_NE(h, hash(std::string("b")));
}
TEST(BasicStringViewTest, WStringEqWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_eq<std::wstring> eq;
EXPECT_TRUE(eq(L"a", L"a"));
EXPECT_TRUE(eq(L"a", std::wstring_view(L"a")));
EXPECT_TRUE(eq(L"a", std::wstring(L"a")));
EXPECT_FALSE(eq(L"a", L"b"));
EXPECT_FALSE(eq(L"a", std::wstring_view(L"b")));
EXPECT_FALSE(eq(L"a", std::wstring(L"b")));
#endif
}
TEST(BasicStringViewTest, WStringViewEqWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_eq<std::wstring_view> eq;
EXPECT_TRUE(eq(L"a", L"a"));
EXPECT_TRUE(eq(L"a", std::wstring_view(L"a")));
EXPECT_TRUE(eq(L"a", std::wstring(L"a")));
EXPECT_FALSE(eq(L"a", L"b"));
EXPECT_FALSE(eq(L"a", std::wstring_view(L"b")));
EXPECT_FALSE(eq(L"a", std::wstring(L"b")));
#endif
}
TEST(BasicStringViewTest, U16StringEqWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_eq<std::u16string> eq;
EXPECT_TRUE(eq(u"a", u"a"));
EXPECT_TRUE(eq(u"a", std::u16string_view(u"a")));
EXPECT_TRUE(eq(u"a", std::u16string(u"a")));
EXPECT_FALSE(eq(u"a", u"b"));
EXPECT_FALSE(eq(u"a", std::u16string_view(u"b")));
EXPECT_FALSE(eq(u"a", std::u16string(u"b")));
#endif
}
TEST(BasicStringViewTest, U16StringViewEqWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_eq<std::u16string_view> eq;
EXPECT_TRUE(eq(u"a", u"a"));
EXPECT_TRUE(eq(u"a", std::u16string_view(u"a")));
EXPECT_TRUE(eq(u"a", std::u16string(u"a")));
EXPECT_FALSE(eq(u"a", u"b"));
EXPECT_FALSE(eq(u"a", std::u16string_view(u"b")));
EXPECT_FALSE(eq(u"a", std::u16string(u"b")));
#endif
}
TEST(BasicStringViewTest, U32StringEqWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_eq<std::u32string> eq;
EXPECT_TRUE(eq(U"a", U"a"));
EXPECT_TRUE(eq(U"a", std::u32string_view(U"a")));
EXPECT_TRUE(eq(U"a", std::u32string(U"a")));
EXPECT_FALSE(eq(U"a", U"b"));
EXPECT_FALSE(eq(U"a", std::u32string_view(U"b")));
EXPECT_FALSE(eq(U"a", std::u32string(U"b")));
#endif
}
TEST(BasicStringViewTest, U32StringViewEqWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_eq<std::u32string_view> eq;
EXPECT_TRUE(eq(U"a", U"a"));
EXPECT_TRUE(eq(U"a", std::u32string_view(U"a")));
EXPECT_TRUE(eq(U"a", std::u32string(U"a")));
EXPECT_FALSE(eq(U"a", U"b"));
EXPECT_FALSE(eq(U"a", std::u32string_view(U"b")));
EXPECT_FALSE(eq(U"a", std::u32string(U"b")));
#endif
}
TEST(BasicStringViewTest, WStringHashWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_hash<std::wstring> hash;
auto h = hash(L"a");
EXPECT_EQ(h, hash(std::wstring_view(L"a")));
EXPECT_EQ(h, hash(std::wstring(L"a")));
EXPECT_NE(h, hash(std::wstring_view(L"b")));
EXPECT_NE(h, hash(std::wstring(L"b")));
#endif
}
TEST(BasicStringViewTest, WStringViewHashWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_hash<std::wstring_view> hash;
auto h = hash(L"a");
EXPECT_EQ(h, hash(std::wstring_view(L"a")));
EXPECT_EQ(h, hash(std::wstring(L"a")));
EXPECT_NE(h, hash(std::wstring_view(L"b")));
EXPECT_NE(h, hash(std::wstring(L"b")));
#endif
}
TEST(BasicStringViewTest, U16StringHashWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_hash<std::u16string> hash;
auto h = hash(u"a");
EXPECT_EQ(h, hash(std::u16string_view(u"a")));
EXPECT_EQ(h, hash(std::u16string(u"a")));
EXPECT_NE(h, hash(std::u16string_view(u"b")));
EXPECT_NE(h, hash(std::u16string(u"b")));
#endif
}
TEST(BasicStringViewTest, U16StringViewHashWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_hash<std::u16string_view> hash;
auto h = hash(u"a");
EXPECT_EQ(h, hash(std::u16string_view(u"a")));
EXPECT_EQ(h, hash(std::u16string(u"a")));
EXPECT_NE(h, hash(std::u16string_view(u"b")));
EXPECT_NE(h, hash(std::u16string(u"b")));
#endif
}
TEST(BasicStringViewTest, U32StringHashWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_hash<std::u32string> hash;
auto h = hash(U"a");
EXPECT_EQ(h, hash(std::u32string_view(U"a")));
EXPECT_EQ(h, hash(std::u32string(U"a")));
EXPECT_NE(h, hash(std::u32string_view(U"b")));
EXPECT_NE(h, hash(std::u32string(U"b")));
#endif
}
TEST(BasicStringViewTest, U32StringViewHashWorks) {
#ifndef ABSL_HAVE_STD_STRING_VIEW
GTEST_SKIP();
#else
hash_default_hash<std::u32string_view> hash;
auto h = hash(U"a");
EXPECT_EQ(h, hash(std::u32string_view(U"a")));
EXPECT_EQ(h, hash(std::u32string(U"a")));
EXPECT_NE(h, hash(std::u32string_view(U"b")));
EXPECT_NE(h, hash(std::u32string(U"b")));
#endif
}
struct NoDeleter {
template <class T>
void operator()(const T* ptr) const {}
};
using PointerTypes =
::testing::Types<const int*, int*, std::unique_ptr<const int>,
std::unique_ptr<const int, NoDeleter>,
std::unique_ptr<int>, std::unique_ptr<int, NoDeleter>,
std::shared_ptr<const int>, std::shared_ptr<int>>;
template <class T>
struct EqPointer : ::testing::Test {
hash_default_eq<T> key_eq;
};
TYPED_TEST_SUITE(EqPointer, PointerTypes);
template <class T>
struct HashPointer : ::testing::Test {
hash_default_hash<T> hasher;
};
TYPED_TEST_SUITE(HashPointer, PointerTypes);
TYPED_TEST(EqPointer, Works) {
int dummy;
auto eq = this->key_eq;
auto sptr = std::make_shared<int>();
std::shared_ptr<const int> csptr = sptr;
int* ptr = sptr.get();
const int* cptr = ptr;
std::unique_ptr<int, NoDeleter> uptr(ptr);
std::unique_ptr<const int, NoDeleter> cuptr(ptr);
EXPECT_TRUE(eq(ptr, cptr));
EXPECT_TRUE(eq(ptr, sptr));
EXPECT_TRUE(eq(ptr, uptr));
EXPECT_TRUE(eq(ptr, csptr));
EXPECT_TRUE(eq(ptr, cuptr));
EXPECT_FALSE(eq(&dummy, cptr));
EXPECT_FALSE(eq(&dummy, sptr));
EXPECT_FALSE(eq(&dummy, uptr));
EXPECT_FALSE(eq(&dummy, csptr));
EXPECT_FALSE(eq(&dummy, cuptr));
}
TEST(Hash, DerivedAndBase) {
struct Base {};
struct Derived : Base {};
hash_default_hash<Base*> hasher;
Base base;
Derived derived;
EXPECT_NE(hasher(&base), hasher(&derived));
EXPECT_EQ(hasher(static_cast<Base*>(&derived)), hasher(&derived));
auto dp = std::make_shared<Derived>();
EXPECT_EQ(hasher(static_cast<Base*>(dp.get())), hasher(dp));
}
TEST(Hash, FunctionPointer) {
using Func = int (*)();
hash_default_hash<Func> hasher;
hash_default_eq<Func> eq;
Func p1 = [] { return 1; }, p2 = [] { return 2; };
EXPECT_EQ(hasher(p1), hasher(p1));
EXPECT_TRUE(eq(p1, p1));
EXPECT_NE(hasher(p1), hasher(p2));
EXPECT_FALSE(eq(p1, p2));
}
TYPED_TEST(HashPointer, Works) {
int dummy;
auto hash = this->hasher;
auto sptr = std::make_shared<int>();
std::shared_ptr<const int> csptr = sptr;
int* ptr = sptr.get();
const int* cptr = ptr;
std::unique_ptr<int, NoDeleter> uptr(ptr);
std::unique_ptr<const int, NoDeleter> cuptr(ptr);
EXPECT_EQ(hash(ptr), hash(cptr));
EXPECT_EQ(hash(ptr), hash(sptr));
EXPECT_EQ(hash(ptr), hash(uptr));
EXPECT_EQ(hash(ptr), hash(csptr));
EXPECT_EQ(hash(ptr), hash(cuptr));
EXPECT_NE(hash(&dummy), hash(cptr));
EXPECT_NE(hash(&dummy), hash(sptr));
EXPECT_NE(hash(&dummy), hash(uptr));
EXPECT_NE(hash(&dummy), hash(csptr));
EXPECT_NE(hash(&dummy), hash(cuptr));
}
TEST(EqCord, Works) {
hash_default_eq<absl::Cord> eq;
const absl::string_view a_string_view = "a";
const absl::Cord a_cord(a_string_view);
const absl::string_view b_string_view = "b";
const absl::Cord b_cord(b_string_view);
EXPECT_TRUE(eq(a_cord, a_cord));
EXPECT_TRUE(eq(a_cord, a_string_view));
EXPECT_TRUE(eq(a_string_view, a_cord));
EXPECT_FALSE(eq(a_cord, b_cord));
EXPECT_FALSE(eq(a_cord, b_string_view));
EXPECT_FALSE(eq(b_string_view, a_cord));
}
TEST(HashCord, Works) {
hash_default_hash<absl::Cord> hash;
const absl::string_view a_string_view = "a";
const absl::Cord a_cord(a_string_view);
const absl::string_view b_string_view = "b";
const absl::Cord b_cord(b_string_view);
EXPECT_EQ(hash(a_cord), hash(a_cord));
EXPECT_EQ(hash(b_cord), hash(b_cord));
EXPECT_EQ(hash(a_string_view), hash(a_cord));
EXPECT_EQ(hash(b_string_view), hash(b_cord));
EXPECT_EQ(hash(absl::Cord("")), hash(""));
EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view()));
EXPECT_NE(hash(a_cord), hash(b_cord));
EXPECT_NE(hash(a_cord), hash(b_string_view));
EXPECT_NE(hash(a_string_view), hash(b_cord));
EXPECT_NE(hash(a_string_view), hash(b_string_view));
}
void NoOpReleaser(absl::string_view data, void* arg) {}
TEST(HashCord, FragmentedCordWorks) {
hash_default_hash<absl::Cord> hash;
absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"});
EXPECT_FALSE(c.TryFlat().has_value());
EXPECT_EQ(hash(c), hash("abc"));
}
TEST(HashCord, FragmentedLongCordWorks) {
hash_default_hash<absl::Cord> hash;
std::string a(65536, 'a');
std::string b(65536, 'b');
absl::Cord c = absl::MakeFragmentedCord({a, b});
EXPECT_FALSE(c.TryFlat().has_value());
EXPECT_EQ(hash(c), hash(a + b));
}
TEST(HashCord, RandomCord) {
hash_default_hash<absl::Cord> hash;
auto bitgen = absl::BitGen();
for (int i = 0; i < 1000; ++i) {
const int number_of_segments = absl::Uniform(bitgen, 0, 10);
std::vector<std::string> pieces;
for (size_t s = 0; s < number_of_segments; ++s) {
std::string str;
str.resize(absl::Uniform(bitgen, 0, 4096));
std::generate(str.begin(), str.end(), [&]() -> char {
return static_cast<char>(absl::Uniform<unsigned char>(bitgen));
});
pieces.push_back(str);
}
absl::Cord c = absl::MakeFragmentedCord(pieces);
EXPECT_EQ(hash(c), hash(std::string(c)));
}
}
using StringTypesCartesianProduct = Types<
std::pair<absl::Cord, std::string>,
std::pair<absl::Cord, absl::string_view>,
std::pair<absl::Cord, absl::Cord>,
std::pair<absl::Cord, const char*>,
std::pair<std::string, absl::Cord>,
std::pair<absl::string_view, absl::Cord>,
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
std::pair<absl::string_view, const char*>>;
constexpr char kFirstString[] = "abc123";
constexpr char kSecondString[] = "ijk456";
template <typename T>
struct StringLikeTest : public ::testing::Test {
typename T::first_type a1{kFirstString};
typename T::second_type b1{kFirstString};
typename T::first_type a2{kSecondString};
typename T::second_type b2{kSecondString};
hash_default_eq<typename T::first_type> eq;
hash_default_hash<typename T::first_type> hash;
};
TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
TYPED_TEST(StringLikeTest, Eq) {
EXPECT_TRUE(this->eq(this->a1, this->b1));
EXPECT_TRUE(this->eq(this->b1, this->a1));
}
TYPED_TEST(StringLikeTest, NotEq) {
EXPECT_FALSE(this->eq(this->a1, this->b2));
EXPECT_FALSE(this->eq(this->b2, this->a1));
}
TYPED_TEST(StringLikeTest, HashEq) {
EXPECT_EQ(this->hash(this->a1), this->hash(this->b1));
EXPECT_EQ(this->hash(this->a2), this->hash(this->b2));
EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
}
struct TypeWithAbslContainerHash {
struct absl_container_hash {
using is_transparent = void;
size_t operator()(const TypeWithAbslContainerHash& foo) const {
return absl::HashOf(foo.value);
}
size_t operator()(int value) const { return absl::HashOf(value); }
};
friend bool operator==(const TypeWithAbslContainerHash& lhs,
const TypeWithAbslContainerHash& rhs) {
return lhs.value == rhs.value;
}
friend bool operator==(const TypeWithAbslContainerHash& lhs, int rhs) {
return lhs.value == rhs;
}
int value;
int noise;
};
struct TypeWithAbslContainerHashAndEq {
struct absl_container_hash {
using is_transparent = void;
size_t operator()(const TypeWithAbslContainerHashAndEq& foo) const {
return absl::HashOf(foo.value);
}
size_t operator()(int value) const { return absl::HashOf(value); }
};
struct absl_container_eq {
using is_transparent = void;
bool operator()(const TypeWithAbslContainerHashAndEq& lhs,
const TypeWithAbslContainerHashAndEq& rhs) const {
return lhs.value == rhs.value;
}
bool operator()(const TypeWithAbslContainerHashAndEq& lhs, int rhs) const {
return lhs.value == rhs;
}
};
template <typename T>
bool operator==(T&& other) const = delete;
int value;
int noise;
};
using AbslContainerHashTypes =
Types<TypeWithAbslContainerHash, TypeWithAbslContainerHashAndEq>;
template <typename T>
using AbslContainerHashTest = ::testing::Test;
TYPED_TEST_SUITE(AbslContainerHashTest, AbslContainerHashTypes);
TYPED_TEST(AbslContainerHashTest, HasherWorks) {
hash_default_hash<TypeParam> hasher;
TypeParam foo1{1, 100};
TypeParam foo1_copy{1, 20};
TypeParam foo2{2, 100};
EXPECT_EQ(hasher(foo1), absl::HashOf(1));
EXPECT_EQ(hasher(foo2), absl::HashOf(2));
EXPECT_EQ(hasher(foo1), hasher(foo1_copy));
EXPECT_EQ(hasher(foo1), hasher(1));
EXPECT_EQ(hasher(foo2), hasher(2));
}
TYPED_TEST(AbslContainerHashTest, EqWorks) {
hash_default_eq<TypeParam> eq;
TypeParam foo1{1, 100};
TypeParam foo1_copy{1, 20};
TypeParam foo2{2, 100};
EXPECT_TRUE(eq(foo1, foo1_copy));
EXPECT_FALSE(eq(foo1, foo2));
EXPECT_TRUE(eq(foo1, 1));
EXPECT_FALSE(eq(foo1, 2));
}
TYPED_TEST(AbslContainerHashTest, HeterogeneityInMapWorks) {
absl::flat_hash_map<TypeParam, int> map;
TypeParam foo1{1, 100};
TypeParam foo1_copy{1, 20};
TypeParam foo2{2, 100};
TypeParam foo3{3, 100};
map[foo1] = 1;
map[foo2] = 2;
EXPECT_TRUE(map.contains(foo1_copy));
EXPECT_EQ(map.at(foo1_copy), 1);
EXPECT_TRUE(map.contains(1));
EXPECT_EQ(map.at(1), 1);
EXPECT_TRUE(map.contains(2));
EXPECT_EQ(map.at(2), 2);
EXPECT_FALSE(map.contains(foo3));
EXPECT_FALSE(map.contains(3));
}
TYPED_TEST(AbslContainerHashTest, HeterogeneityInSetWorks) {
absl::flat_hash_set<TypeParam> set;
TypeParam foo1{1, 100};
TypeParam foo1_copy{1, 20};
TypeParam foo2{2, 100};
set.insert(foo1);
EXPECT_TRUE(set.contains(foo1_copy));
EXPECT_TRUE(set.contains(1));
EXPECT_FALSE(set.contains(foo2));
EXPECT_FALSE(set.contains(2));
}
}
}
ABSL_NAMESPACE_END
}
enum Hash : size_t {
kStd = 0x1,
#ifdef _MSC_VER
kExtension = kStd,
#else
kExtension = 0x2,
#endif
};
template <int H>
struct Hashable {
static constexpr bool HashableBy(Hash h) { return h & H; }
};
namespace std {
template <int H>
struct hash<Hashable<H>> {
template <class E = Hashable<H>,
class = typename std::enable_if<E::HashableBy(kStd)>::type>
size_t operator()(E) const {
return kStd;
}
};
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
template <class T>
size_t Hash(const T& v) {
return hash_default_hash<T>()(v);
}
TEST(Delegate, HashDispatch) {
EXPECT_EQ(Hash(kStd), Hash(Hashable<kStd>()));
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/hash_function_defaults.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/container/internal/hash_function_defaults_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ed071fc0-b1a7-41b0-b171-402f275ef9fc | cpp | tensorflow/tensorflow | math_util | tensorflow/core/lib/math/math_util.h | third_party/xla/xla/tsl/lib/math/math_util_test.cc | #ifndef TENSORFLOW_CORE_LIB_MATH_MATH_UTIL_H_
#define TENSORFLOW_CORE_LIB_MATH_MATH_UTIL_H_
#include "xla/tsl/lib/math/math_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
using tsl::MathUtil;
}
#endif | #include "xla/tsl/lib/math/math_util.h"
#include <cmath>
#include <limits>
#include <vector>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
const int kNumTestArguments = 4;
template <typename IntegralType, typename TestDataType>
void TestCeilOfRatio(const TestDataType test_data[][kNumTestArguments],
int num_tests) {
for (int i = 0; i < num_tests; ++i) {
const IntegralType numerator = test_data[i][0];
const IntegralType denominator = test_data[i][1];
const IntegralType expected_floor = test_data[i][2];
const IntegralType expected_ceil = test_data[i][3];
IntegralType floor_1 = tsl::MathUtil::FloorOfRatio(numerator, denominator);
IntegralType floor_2 =
tsl::MathUtil::CeilOrFloorOfRatio<IntegralType, false>(numerator,
denominator);
EXPECT_EQ(floor_1, floor_2);
EXPECT_EQ(expected_floor, floor_1)
<< "FloorOfRatio fails with numerator = " << numerator
<< ", denominator = " << denominator << " "
<< (8 * sizeof(IntegralType)) << " bits";
IntegralType ceil_1 = tsl::MathUtil::CeilOfRatio(numerator, denominator);
IntegralType ceil_2 = tsl::MathUtil::CeilOrFloorOfRatio<IntegralType, true>(
numerator, denominator);
EXPECT_EQ(ceil_1, ceil_2);
EXPECT_EQ(expected_ceil, ceil_1)
<< "CeilOfRatio fails with numerator = " << numerator
<< ", denominator = " << denominator << " "
<< (8 * sizeof(IntegralType)) << " bits";
}
}
template <typename UnsignedIntegralType>
void TestCeilOfRatioUnsigned(uint64 kMax) {
const int kNumTests = 12;
const uint64 kTestData[kNumTests][kNumTestArguments] = {
{0, 1, 0, 0},
{0, 2, 0, 0},
{0, kMax, 0, 0},
{1, 1, 1, 1},
{5, 2, 2, 3},
{kMax, 1, kMax, kMax},
{kMax, 2, kMax / 2, kMax / 2 + ((kMax % 2 != 0) ? 1 : 0)},
{kMax, 3, kMax / 3, kMax / 3 + ((kMax % 3 != 0) ? 1 : 0)},
{1, kMax, 0, 1},
{2, kMax, 0, 1},
{3, kMax, 0, 1},
{kMax, kMax, 1, 1},
};
TestCeilOfRatio<UnsignedIntegralType, uint64>(kTestData, kNumTests);
}
template <typename SignedInteger>
void TestCeilOfRatioSigned(int64_t kMin, int64_t kMax) {
const int kNumTests = 30;
const int64_t kTestData[kNumTests][kNumTestArguments] = {
{0, 1, 0, 0},
{0, -1, 0, 0},
{0, 2, 0, 0},
{0, kMin, 0, 0},
{0, kMax, 0, 0},
{1, 1, 1, 1},
{-1, 1, -1, -1},
{1, -1, -1, -1},
{-1, -1, 1, 1},
{5, 2, 2, 3},
{-5, 2, -3, -2},
{5, -2, -3, -2},
{-5, -2, 2, 3},
{kMax, 1, kMax, kMax},
{kMax, -1, -kMax, -kMax},
{kMax, 2, kMax / 2, kMax / 2 + ((kMax % 2 != 0) ? 1 : 0)},
{kMax, 3, kMax / 3, kMax / 3 + ((kMax % 3 != 0) ? 1 : 0)},
{kMin, 1, kMin, kMin},
{kMin, 2, kMin / 2 - ((kMin % 2 != 0) ? 1 : 0), kMin / 2},
{kMin, 3, kMin / 3 - ((kMin % 3 != 0) ? 1 : 0), kMin / 3},
{1, kMax, 0, 1},
{2, kMax, 0, 1},
{3, kMax, 0, 1},
{1, kMin, -1, 0},
{2, kMin, -1, 0},
{3, kMin, -1, 0},
{kMin, kMin, 1, 1},
{kMin, kMax, -2, -1},
{kMax, kMin, -1, 0},
{kMax, kMax, 1, 1},
};
TestCeilOfRatio<SignedInteger, int64_t>(kTestData, kNumTests);
}
template <typename IntegralType>
static IntegralType CeilOfRatioDenomMinusOne(IntegralType numerator,
IntegralType denominator) {
const IntegralType kOne(1);
return (numerator + denominator - kOne) / denominator;
}
template <typename IntegralType>
static IntegralType FloorOfRatioByDivision(IntegralType numerator,
IntegralType denominator) {
return numerator / denominator;
}
template <typename Integer, bool ComputeCeil>
static Integer CeilOrFloorOfRatioArithmetic(Integer numerator,
Integer denominator) {
if (ComputeCeil) {
return CeilOfRatioDenomMinusOne(numerator, denominator);
} else {
return FloorOfRatioByDivision(numerator, denominator);
}
}
void TestThatCeilOfRatioDenomMinusOneIsIncorrect(int64_t numerator,
int64_t denominator,
int64_t expected_error) {
const int64_t correct_result =
tsl::MathUtil::CeilOfRatio(numerator, denominator);
const int64_t result_by_denom_minus_one =
CeilOfRatioDenomMinusOne(numerator, denominator);
EXPECT_EQ(result_by_denom_minus_one + expected_error, correct_result)
<< "numerator = " << numerator << " denominator = " << denominator
<< " expected error = " << expected_error
<< " Actual difference: " << (correct_result - result_by_denom_minus_one);
}
void TestThatCeilOfRatioDenomMinusOneIsIncorrect() {
TestThatCeilOfRatioDenomMinusOneIsIncorrect(-1LL, -2LL, -1LL);
}
TEST(MathUtil, CeilOfRatio) {
TestCeilOfRatioUnsigned<uint8>(kuint8max);
TestCeilOfRatioUnsigned<uint16>(kuint16max);
TestCeilOfRatioUnsigned<uint32>(kuint32max);
TestCeilOfRatioUnsigned<uint64>(kuint64max);
TestCeilOfRatioSigned<int8>(kint8min, kint8max);
TestCeilOfRatioSigned<int16>(kint16min, kint16max);
TestCeilOfRatioSigned<int32>(kint32min, kint32max);
TestCeilOfRatioSigned<int64_t>(kint64min, kint64max);
#if 0
TestThatCeilOfRatioDenomMinusOneIsIncorrect();
#endif
}
struct GCDTestCase {
unsigned int x;
unsigned int y;
unsigned int gcd;
};
TEST(MathUtil, GCD) {
std::vector<GCDTestCase> testcases({
{10, 20, 10},
{27, 8, 1},
{4, 3, 1},
{6, 8, 2},
{5, 0, 5},
{5, 5, 5},
{0, 0, 0}
});
for (const auto& tc : testcases) {
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint32>(tc.x, tc.y));
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint32>(tc.y, tc.x));
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint64>(tc.x, tc.y));
EXPECT_EQ(tc.gcd, tsl::MathUtil::GCD<uint64>(tc.y, tc.x));
}
const uint64 biggish_prime = 1666666667;
EXPECT_EQ(biggish_prime,
tsl::MathUtil::GCD<uint64>(biggish_prime * 3, biggish_prime * 4));
}
template <typename T>
void TestOneIPowN() {
const T one{1};
for (int i = 0; i < 1024; ++i) {
EXPECT_EQ(tsl::MathUtil::IPow(one, i), one);
}
}
template <typename T>
void TestTwoIPowN() {
int limit = std::is_integral<T>::value ? std::numeric_limits<T>::digits : 63;
for (int i = 0; i < limit; ++i) {
EXPECT_EQ(tsl::MathUtil::IPow(T{2}, i), static_cast<T>(1ull << i));
}
}
template <typename T>
void TestFloatIPow(const int max_exponent, const T start, const T end,
const T step) {
for (T f = start; f < end; f += step) {
for (int i = 0; i < max_exponent; ++i) {
EXPECT_FLOAT_EQ(tsl::MathUtil::IPow(f, i), std::pow(f, i));
}
}
}
TEST(MathUtil, IPow) {
TestOneIPowN<double>();
TestOneIPowN<float>();
TestOneIPowN<int>();
TestOneIPowN<int64_t>();
TestTwoIPowN<double>();
TestTwoIPowN<float>();
TestTwoIPowN<int>();
TestTwoIPowN<int64_t>();
EXPECT_EQ(tsl::MathUtil::IPow(3, 0), 1);
EXPECT_EQ(tsl::MathUtil::IPow(3, 1), 3);
EXPECT_EQ(tsl::MathUtil::IPow(3, 2), 9);
EXPECT_EQ(tsl::MathUtil::IPow(3, 3), 27);
EXPECT_EQ(tsl::MathUtil::IPow(3, 4), 81);
EXPECT_EQ(tsl::MathUtil::IPow(3, 5), 243);
TestFloatIPow<float>(13, -16.0f, 16.0f, 1.0f / 8);
TestFloatIPow<double>(13, -16.0, 16.0, 1.0 / 8);
TestFloatIPow<float>(13, -1.0f / (1 << 12), -1.0f / (1 << 12),
1.0f / (1 << 16));
TestFloatIPow<double>(13, -1.0 / (1 << 12), -1.0 / (1 << 12),
1.0 / (1 << 16));
}
TEST(MathUtil, IPowEdgeCases) {
constexpr const double kInf = std::numeric_limits<double>::infinity();
EXPECT_EQ(tsl::MathUtil::IPow(-12345.0, 79), -kInf);
EXPECT_EQ(tsl::MathUtil::IPow(-12345.0, 80), +kInf);
EXPECT_EQ(tsl::MathUtil::IPow(+0.0, 3), +0.0);
EXPECT_EQ(tsl::MathUtil::IPow(-0.0, 3), -0.0);
EXPECT_EQ(tsl::MathUtil::IPow(+0.0, 42), +0.0);
EXPECT_EQ(tsl::MathUtil::IPow(-0.0, 42), +0.0);
EXPECT_EQ(tsl::MathUtil::IPow(-kInf, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-2.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-1.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-0.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+0.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+1.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+2.0, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(+kInf, 0.0), 1.0);
EXPECT_EQ(tsl::MathUtil::IPow(std::numeric_limits<double>::quiet_NaN(), 0.0),
1.0);
EXPECT_EQ(tsl::MathUtil::IPow(-kInf, 43), -kInf);
EXPECT_EQ(tsl::MathUtil::IPow(-kInf, 42), +kInf);
EXPECT_EQ(tsl::MathUtil::IPow(+kInf, 42), +kInf);
EXPECT_EQ(tsl::MathUtil::IPow(+kInf, 43), +kInf);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/math/math_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/math/math_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eb352e40-0206-4615-9ec1-0137e0066b13 | cpp | tensorflow/tensorflow | cache_dataset_ops | tensorflow/core/kernels/data/cache_dataset_ops.cc | tensorflow/core/kernels/data/cache_dataset_ops_test.cc | #include "tensorflow/core/kernels/data/cache_dataset_ops.h"
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/kernels/data/cache_ops.h"
#include "tensorflow/core/kernels/data/iterator_ops.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
namespace tensorflow {
namespace data {
constexpr const char* const CacheDatasetOp::kDatasetType;
constexpr const char* const CacheDatasetOp::kInputDataset;
constexpr const char* const CacheDatasetOp::kFileName;
constexpr const char* const CacheDatasetOp::kOutputTypes;
constexpr const char* const CacheDatasetOp::kOutputShapes;
namespace {
constexpr char kKeyStrFormat[] = "%%%zuzu_%%%zuzu";
constexpr char kPaddingSizeStrFormat[] = "%zu";
constexpr char kFileDatasetPrefix[] = "File";
constexpr char kMode[] = "Mode";
constexpr char kLockFileSuffix[] = ".lockfile";
constexpr char kIterationCompleted[] = "iteration_completed";
constexpr char kCurIndex[] = "cur_index";
constexpr char kShardId[] = "shard_id";
constexpr char kCreatedAt[] = "Created at";
constexpr char kMemoryDatasetPrefix[] = "Memory";
constexpr char kMemoryCache[] = "MemoryCache";
constexpr char kCacheCompleted[] = "cache_completed";
constexpr char kIndex[] = "index";
constexpr char kImpl[] = "Impl";
constexpr char kCacheDataset[] = "CacheDataset";
constexpr char kIncompleteCacheErrorMessage[] =
"The calling iterator did not fully read the dataset being cached. In "
"order to avoid unexpected truncation of the dataset, the partially cached "
"contents of the dataset will be discarded. This can happen if you have "
"an input pipeline similar to `dataset.cache().take(k).repeat()`. You "
"should use `dataset.take(k).cache().repeat()` instead.";
}
class DatasetRandomAccessCache {
public:
explicit DatasetRandomAccessCache(const DatasetBase* dataset)
: input_(dataset) {}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) {
if (!iter_resource_) {
TF_ASSIGN_OR_RETURN(iter_resource_,
GetIteratorResourceFromDataset(ctx, input_));
TF_RETURN_IF_ERROR(iter_resource_->SetIteratorFromDataset(ctx, input_));
}
if (index >= cache_.size()) {
TF_RETURN_IF_ERROR(ExtendTempCacheToIndex(index, ctx));
}
*out_tensors = cache_.at(index);
return absl::OkStatus();
}
std::vector<std::vector<Tensor>> GetCacheData() { return cache_; }
private:
Status ExtendTempCacheToIndex(int64 index, OpKernelContext* ctx) {
bool end_of_sequence;
while (cache_.size() <= index) {
std::vector<Tensor> out_tensors;
TF_RETURN_IF_ERROR(
iter_resource_->GetNext(ctx, &out_tensors, &end_of_sequence));
if (end_of_sequence) {
return tensorflow::errors::OutOfRange("Index out of range [0, ",
cache_.size(), "):", index);
}
cache_.push_back(out_tensors);
}
return absl::OkStatus();
}
absl::StatusOr<core::RefCountPtr<IteratorResource>>
GetIteratorResourceFromDataset(OpKernelContext* ctx,
const DatasetBase* dataset) {
FunctionLibraryRuntime* flr;
std::unique_ptr<DeviceMgr> device_mgr(nullptr);
std::unique_ptr<FunctionLibraryDefinition> flib_def(nullptr);
std::unique_ptr<ProcessFunctionLibraryRuntime> plfr(nullptr);
TF_RETURN_IF_ERROR(
ctx->function_library()->Clone(&flib_def, &plfr, &flr, true));
core::RefCountPtr<IteratorResource> iter_resource(new IteratorResource(
ctx->env(), dataset->output_dtypes(), dataset->output_shapes(),
std::move(device_mgr), std::move(flib_def), std::move(plfr), flr));
return iter_resource;
}
const DatasetBase* input_;
core::RefCountPtr<IteratorResource> iter_resource_;
std::vector<std::vector<Tensor>> cache_;
};
class IteratorRandomAccessCache {
public:
explicit IteratorRandomAccessCache(const DatasetBase* input)
: input_(input) {}
absl::Status Get(AnyContext ctx, size_t element_position,
std::vector<Tensor>* out_tensors) {
if (element_position < cache_.size() && !cache_[element_position].empty()) {
*out_tensors = cache_[element_position];
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(input_->Get(ctx, element_position, out_tensors));
if (element_position >= cache_.size()) {
cache_.resize(element_position + 1);
}
cache_[element_position] = *out_tensors;
return absl::OkStatus();
}
private:
const DatasetBase* input_ = nullptr;
std::vector<std::vector<Tensor>> cache_;
};
class CacheDatasetOp::FileDatasetBase : public DatasetBase {
public:
FileDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
string filename, Env* env)
: DatasetBase(DatasetContext(ctx)),
input_(input),
filename_(std::move(filename)),
env_(env),
num_tensors_(input->output_dtypes().size()),
tensor_index_padding_size_(StringPaddingSize(num_tensors_)),
item_index_padding_size_(StringPaddingSize(kMaxItems)),
tensor_format_string_(strings::Printf(kKeyStrFormat,
item_index_padding_size_,
tensor_index_padding_size_)) {
input_->Ref();
DCHECK_EQ(item_index_padding_size_, 7);
}
~FileDatasetBase() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.dataset_prefix = kFileDatasetPrefix;
return std::make_unique<FileIterator>(FileIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.dataset_prefix = kFileDatasetPrefix;
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
protected:
const DatasetBase* const input_;
const tstring filename_;
private:
static size_t StringPaddingSize(size_t num_tensors) {
return strings::Printf(kPaddingSizeStrFormat, num_tensors - 1).size();
}
string FormatName(size_t item_index, size_t tensor_index) const {
return strings::Printf(tensor_format_string_.c_str(), item_index,
tensor_index);
}
class FileIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params) {
if (params.dataset->env_
->FileExists(MetaFilename(params.dataset->filename_))
.ok()) {
mode_ = Mode::read;
} else {
mode_ = Mode::write;
}
}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return InitializeIterator(ctx);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
return iterator_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kMode, mode_));
return SaveInput(ctx, writer, iterator_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
{
int64_t temp;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kMode, &temp));
mode_ = static_cast<Mode>(temp);
}
if (mode_ == Mode::write &&
dataset()
->env_->FileExists(MetaFilename(dataset()->filename_))
.ok()) {
LOG(WARNING)
<< "It looks like the cache was already completely written("
<< MetaFilename(dataset()->filename_)
<< ") after the last checkpoint was saved. Attempting to read "
<< "the cache instead of continuing to write. If this is a "
<< "mistake, please remove the above file and try running again.";
mode_ = Mode::read;
}
TF_RETURN_IF_ERROR(InitializeIterator(ctx));
return RestoreInput(ctx, reader, iterator_);
}
private:
class FileWriterIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileWriterIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params),
cur_index_(0),
shard_id_(0),
filename_(
strings::StrCat(params.dataset->filename_, "_", shard_id_)),
lockfile_(strings::StrCat(filename_, kLockFileSuffix)),
lockfile_created_(false),
iteration_completed_(false) {}
~FileWriterIterator() override {
if (!dataset()->env_->FileExists(MetaFilename(filename_)).ok()) {
LOG(WARNING) << kIncompleteCacheErrorMessage;
std::vector<string> cache_files;
Status s = dataset()->env_->GetMatchingPaths(
strings::StrCat(filename_, "*"), &cache_files);
if (!s.ok()) {
LOG(WARNING) << "Failed to get matching files on " << filename_
<< "* : " << s.ToString();
}
for (const string& path : cache_files) {
s = dataset()->env_->DeleteFile(path);
if (!s.ok()) {
LOG(WARNING) << "Failed to delete " << path << " : "
<< s.ToString();
}
}
}
}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(),
&input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
TF_RETURN_IF_ERROR(EnsureLockFileExists(end_of_sequence));
if (*end_of_sequence) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(writer_->status());
if (cur_index_ >= kMaxItems) {
Status s = Finish();
if (!s.ok()) {
LOG(ERROR) << s;
}
return errors::InvalidArgument(
"Upstream iterator is producing more than ", kMaxItems,
" items, which is more than the cache limit.");
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence && out_tensors->empty()) {
TF_RETURN_IF_ERROR(Finish());
cur_index_++;
return absl::OkStatus();
}
if (out_tensors->size() != dataset()->num_tensors_) {
return errors::Internal(
"Upstream iterator returned invalid number of tensors. "
"Expected ",
dataset()->num_tensors_, " got: ", out_tensors->size());
}
size_t tensor_index = 0;
for (const Tensor& t : *out_tensors) {
DCHECK_LT(tensor_index, dataset()->num_tensors_);
string key = dataset()->FormatName(cur_index_, tensor_index++);
TF_RETURN_IF_ERROR(writer_->Add(key, t));
}
if (*end_of_sequence) {
TF_RETURN_IF_ERROR(Finish());
}
cur_index_++;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurIndex, cur_index_));
if (iteration_completed_) {
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kIterationCompleted, ""));
return absl::OkStatus();
}
if (lockfile_created_) {
TF_RETURN_IF_ERROR(writer_->Finish());
shard_id_++;
filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_);
lockfile_ = strings::StrCat(filename_, kLockFileSuffix);
lockfile_created_ = false;
}
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kShardId, shard_id_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t temp;
{
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIndex, &temp));
cur_index_ = static_cast<size_t>(temp);
if (cur_index_ != temp) {
return errors::Internal("Invalid value for cur_index ", temp);
}
}
if (reader->Contains(prefix(), kIterationCompleted)) {
iteration_completed_ = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
{
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kShardId, &temp));
shard_id_ = static_cast<size_t>(temp);
if (shard_id_ != temp) {
return errors::Internal("Invalid value for shard_id ", temp);
}
}
filename_ = strings::StrCat(dataset()->filename_, "_", shard_id_);
lockfile_ = strings::StrCat(filename_, kLockFileSuffix);
writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_);
return absl::OkStatus();
}
private:
Status EnsureLockFileExists(bool* end_of_sequence)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (iteration_completed_) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (lockfile_created_) {
return absl::OkStatus();
}
if (dataset()->env_->FileExists(MetaFilename(filename_)).ok()) {
return errors::AlreadyExists("Existing cache files found: \n",
MetaFilename(filename_), "\n",
DataFilename(filename_, 0, 1), "\n",
"To continue delete the above files.");
}
if (dataset()->env_->FileExists(lockfile_).ok()) {
char contents_scratch[151] = {0};
StringPiece contents;
std::unique_ptr<RandomAccessFile> file;
if (dataset()->env_->NewRandomAccessFile(lockfile_, &file).ok()) {
file->Read(0, 150, &contents, contents_scratch).IgnoreError();
}
return errors::AlreadyExists(
"There appears to be a concurrent caching iterator running - "
"cache lockfile already exists ('",
lockfile_,
"'). If you are sure no other running TF computations are "
"using this cache prefix, delete the lockfile and "
"re-initialize the iterator. Lockfile contents: ",
contents);
}
std::unique_ptr<WritableFile> lockfile;
TF_RETURN_IF_ERROR(
dataset()->env_->NewWritableFile(lockfile_, &lockfile));
TF_RETURN_IF_ERROR(lockfile->Append(
strings::StrCat(kCreatedAt, ": ", EnvTime::NowSeconds())));
writer_ = std::make_unique<BundleWriter>(dataset()->env_, filename_);
lockfile_created_ = true;
return absl::OkStatus();
}
Status Finish() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
iteration_completed_ = true;
TF_RETURN_IF_ERROR(writer_->Finish());
{
std::vector<tstring> prefixes;
prefixes.reserve(shard_id_ + 1);
for (size_t i = 0; i <= shard_id_; ++i) {
prefixes.emplace_back(
strings::StrCat(dataset()->filename_, "_", i));
}
TF_RETURN_IF_ERROR(
MergeBundles(dataset()->env_, prefixes, dataset()->filename_));
}
for (size_t i = 0; i <= shard_id_; ++i) {
TF_RETURN_IF_ERROR(dataset()->env_->DeleteFile(
strings::StrCat(dataset()->filename_, "_", i, kLockFileSuffix)));
}
return absl::OkStatus();
}
mutex mu_;
size_t cur_index_ TF_GUARDED_BY(mu_);
size_t shard_id_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
string filename_;
std::unique_ptr<BundleWriter> writer_ TF_GUARDED_BY(mu_);
string lockfile_ TF_GUARDED_BY(mu_);
bool lockfile_created_ TF_GUARDED_BY(mu_);
bool iteration_completed_ TF_GUARDED_BY(mu_);
};
class FileReaderIterator : public DatasetIterator<FileDatasetBase> {
public:
explicit FileReaderIterator(const Params& params)
: DatasetIterator<FileDatasetBase>(params),
cur_index_(0),
reader_(dataset()->env_, dataset()->filename_),
iterator_restored_(false) {}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
*end_of_sequence = false;
TF_RETURN_IF_ERROR(reader_.status());
if (!reader_.Valid()) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->resize(dataset()->num_tensors_);
for (size_t i = 0; i < dataset()->num_tensors_; ++i) {
if (!iterator_restored_) {
reader_.Next();
} else {
iterator_restored_ = false;
}
if (!reader_.Valid()) {
out_tensors->clear();
*end_of_sequence = true;
return absl::OkStatus();
}
StringPiece key = reader_.key();
DCHECK_EQ(key, dataset()->FormatName(cur_index_, i));
TF_RETURN_IF_ERROR(reader_.ReadCurrent(&(*out_tensors)[i]));
TF_RETURN_IF_ERROR(reader_.status());
}
cur_index_++;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCurIndex, cur_index_));
return absl::OkStatus();
}
Status RestoreInternal(
IteratorContext* ctx,
IteratorStateReader* iterator_state_reader) override {
mutex_lock l(mu_);
{
int64_t temp;
TF_RETURN_IF_ERROR(
iterator_state_reader->ReadScalar(prefix(), kCurIndex, &temp));
cur_index_ = static_cast<size_t>(temp);
if (cur_index_ != temp) {
return errors::Internal("Invalid value for cur_index ", temp);
}
}
if (!reader_.Valid()) {
return errors::Internal("Error initializing BundleReader.");
}
reader_.Seek(dataset()->FormatName(cur_index_, 0));
iterator_restored_ = true;
return absl::OkStatus();
}
private:
mutex mu_;
size_t cur_index_ TF_GUARDED_BY(mu_);
BundleReader reader_ TF_GUARDED_BY(mu_);
bool iterator_restored_ TF_GUARDED_BY(mu_);
};
Status InitializeIterator(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
switch (mode_) {
case Mode::read:
iterator_ =
std::make_unique<FileReaderIterator>(FileReaderIterator::Params{
dataset(), strings::StrCat(prefix(), kImpl)});
break;
case Mode::write:
iterator_ =
std::make_unique<FileWriterIterator>(FileWriterIterator::Params{
dataset(), strings::StrCat(prefix(), kImpl)});
}
TF_RETURN_IF_ERROR(iterator_->InitializeBase(ctx, this));
return iterator_->Initialize(ctx);
}
mutex mu_;
enum Mode { read, write };
Mode mode_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> iterator_ TF_GUARDED_BY(mu_);
};
Env* const env_;
const size_t num_tensors_;
const size_t tensor_index_padding_size_;
static constexpr size_t kMaxItems = 10000000;
const size_t item_index_padding_size_;
const string tensor_format_string_;
};
class CacheDatasetOp::FileDataset : public CacheDatasetOp::FileDatasetBase {
public:
using FileDatasetBase::FileDatasetBase;
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* filename = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph, filename}, output));
return absl::OkStatus();
}
};
class CacheDatasetOp::FileDatasetV2 : public CacheDatasetOp::FileDatasetBase {
public:
explicit FileDatasetV2(OpKernelContext* ctx, const DatasetBase* input,
string filename, Env* env,
const Tensor& resource_handle)
: FileDatasetBase(ctx, input, filename, env),
resource_handle_(resource_handle) {}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(filename_, &filename_node));
Node* resource_handle_node = nullptr;
TF_RETURN_IF_ERROR(b->AddTensor(resource_handle_, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_node, filename_node, resource_handle_node}, output));
return absl::OkStatus();
}
private:
const Tensor resource_handle_;
};
class CacheDatasetOp::MemoryDatasetBase : public DatasetBase {
public:
explicit MemoryDatasetBase(OpKernelContext* ctx, const DatasetBase* input,
std::shared_ptr<MemoryCache> cache)
: DatasetBase(DatasetContext(ctx)),
input_(input),
cache_(std::move(cache)) {
input_->Ref();
random_indexing_compatible_ = input_->RandomIndexingCompatible();
}
~MemoryDatasetBase() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
name_utils::IteratorPrefixParams params;
params.dataset_prefix = kMemoryDatasetPrefix;
return std::make_unique<MemoryIterator>(
MemoryIterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix, params)},
cache_.get());
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
name_utils::DatasetDebugStringParams params;
params.dataset_prefix = kMemoryDatasetPrefix;
return name_utils::DatasetDebugString(kDatasetType, params);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return input_->Cardinality(options);
};
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
mutex_lock l(mu_);
CardinalityOptions options;
options.set_compute_level(CardinalityOptions::CARDINALITY_COMPUTE_LOW);
int64_t cardinality = Cardinality(options);
if (cardinality != kUnknownCardinality &&
cardinality != kInfiniteCardinality && index >= cardinality) {
return errors::OutOfRange("Index out of range [0, ", cardinality,
"):", index);
}
if (!dataset_random_access_cache_) {
dataset_random_access_cache_ =
std::make_unique<DatasetRandomAccessCache>(input_);
}
return dataset_random_access_cache_->Get(ctx, index, out_tensors);
}
Status Get(AnyContext ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
mutex_lock l(mu_);
if (!iterator_random_access_cache_) {
iterator_random_access_cache_ =
std::make_unique<IteratorRandomAccessCache>(input_);
}
return iterator_random_access_cache_->Get(ctx, index, out_tensors);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
class MemoryIterator : public DatasetIterator<MemoryDatasetBase> {
public:
explicit MemoryIterator(const Params& params, MemoryCache* cache)
: DatasetIterator<MemoryDatasetBase>(params),
cache_(cache),
global_shuffle_iterator_(dataset()) {}
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return InitializeIterator(ctx);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
if (ctx->index_mapper() != nullptr) {
return global_shuffle_iterator_.GetNext(ctx, out_tensors,
end_of_sequence);
}
mutex_lock l(mu_);
return iterator_->GetNext(ctx, out_tensors, end_of_sequence);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (cache_->IsCompleted()) {
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCacheCompleted, ""));
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, prefix(), cache_->data()));
}
TF_RETURN_IF_ERROR(global_shuffle_iterator_.Save(prefix(), ctx, writer));
return SaveInput(ctx, writer, iterator_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
if (ctx->restored_element_count().has_value()) {
return global_shuffle_iterator_.Restore(prefix(), ctx, reader);
}
mutex_lock l(mu_);
iterator_.reset();
cache_->Reset();
if (reader->Contains(prefix(), kCacheCompleted)) {
std::vector<std::vector<Tensor>> temp_cache;
TF_RETURN_IF_ERROR(
ReadElementsFromCheckpoint(ctx, reader, prefix(), &temp_cache));
cache_->Complete(std::move(temp_cache));
}
TF_RETURN_IF_ERROR(InitializeIterator(ctx));
return RestoreInput(ctx, reader, iterator_);
}
private:
class MemoryWriterIterator : public DatasetIterator<MemoryDatasetBase> {
public:
explicit MemoryWriterIterator(const Params& params, MemoryCache* cache)
: DatasetIterator<MemoryDatasetBase>(params), cache_(cache) {}
~MemoryWriterIterator() override {
mutex_lock l(mu_);
if (!temp_cache_.empty() && !cache_->IsCompleted()) {
LOG(WARNING) << kIncompleteCacheErrorMessage;
cache_->Reset();
}
}
Status Initialize(IteratorContext* ctx) override {
return dataset()->input_->MakeIterator(ctx, this, prefix(),
&input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
if (*end_of_sequence) {
if (!cache_->IsCompleted()) {
VLOG(2) << "Finalizing the cache because EOF has been reached.";
cache_->Complete(std::move(temp_cache_));
}
return absl::OkStatus();
}
RecordBufferEnqueue(ctx, *out_tensors);
temp_cache_.emplace_back(*out_tensors);
if (temp_cache_.size() == dataset()->input_->Cardinality()) {
VLOG(2) << "Finalizing the cache because its size matches the "
"expected input cardinality.";
cache_->Complete(std::move(temp_cache_));
}
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
if (!cache_->IsCompleted()) {
TF_RETURN_IF_ERROR(
WriteElementsToCheckpoint(writer, prefix(), temp_cache_));
}
return SaveInput(ctx, writer, input_impl_);
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
if (!reader->Contains(prefix(), kCacheCompleted)) {
TF_RETURN_IF_ERROR(
ReadElementsFromCheckpoint(ctx, reader, prefix(), &temp_cache_));
}
return RestoreInput(ctx, reader, input_impl_);
}
private:
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
MemoryCache* const cache_ TF_GUARDED_BY(mu_);
std::vector<std::vector<Tensor>> temp_cache_ TF_GUARDED_BY(mu_);
};
class MemoryReaderIterator : public DatasetIterator<MemoryDatasetBase> {
public:
explicit MemoryReaderIterator(const Params& params, MemoryCache* cache)
: DatasetIterator<MemoryDatasetBase>(params),
cache_(cache),
index_(0) {}
Status Initialize(IteratorContext* ctx) override {
tf_shared_lock l(mu_);
for (size_t i = 0; i < cache_->size(); ++i) {
RecordBufferEnqueue(ctx, cache_->at(i));
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (index_ < cache_->size()) {
const std::vector<Tensor>& cache_tensors = cache_->at(index_);
out_tensors->insert(out_tensors->begin(), cache_tensors.begin(),
cache_tensors.end());
index_++;
*end_of_sequence = false;
return absl::OkStatus();
} else {
*end_of_sequence = true;
return absl::OkStatus();
}
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kIndex, index_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
{
int64_t temp = cache_->size();
if (reader->Contains(prefix(), kIndex)) {
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kIndex, &temp));
}
index_ = static_cast<size_t>(temp);
}
return absl::OkStatus();
}
private:
mutex mu_;
MemoryCache* const cache_ TF_GUARDED_BY(mu_);
size_t index_ TF_GUARDED_BY(mu_);
};
Status InitializeIterator(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (cache_->IsCompleted()) {
iterator_ = std::make_unique<MemoryReaderIterator>(
MemoryReaderIterator::Params{dataset(),
strings::StrCat(prefix(), kImpl)},
cache_);
} else {
iterator_ = std::make_unique<MemoryWriterIterator>(
MemoryWriterIterator::Params{dataset(),
strings::StrCat(prefix(), kImpl)},
cache_);
}
TF_RETURN_IF_ERROR(iterator_->InitializeBase(ctx, this));
return iterator_->Initialize(ctx);
}
mutex mu_;
MemoryCache* cache_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> iterator_ TF_GUARDED_BY(mu_);
GlobalShuffleIterator global_shuffle_iterator_;
};
mutable mutex mu_;
const DatasetBase* const input_;
const std::shared_ptr<MemoryCache> cache_;
mutable std::unique_ptr<DatasetRandomAccessCache> dataset_random_access_cache_
TF_GUARDED_BY(mu_);
mutable std::unique_ptr<IteratorRandomAccessCache>
iterator_random_access_cache_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
class CacheDatasetOp::MemoryDataset : public CacheDatasetOp::MemoryDatasetBase {
public:
MemoryDataset(OpKernelContext* ctx, const DatasetBase* input,
MemoryCacheManager* manager, ResourceHandle&& resource_handle)
: MemoryDatasetBase(ctx, input, manager->get()),
manager_(manager),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~MemoryDataset() override {
manager_->Unref();
Status s = resource_mgr_->Delete<MemoryCacheManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete cache resource: " << s.ToString();
}
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(tstring(""), &filename_node));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_node, filename_node}, output));
return absl::OkStatus();
}
private:
MemoryCacheManager* const manager_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
class CacheDatasetOp::MemoryDatasetV2
: public CacheDatasetOp::MemoryDatasetBase {
public:
MemoryDatasetV2(OpKernelContext* ctx, const DatasetBase* input,
MemoryCacheManager* manager, ResourceHandle&& resource_handle,
bool owns_resource)
: MemoryDatasetBase(ctx, input, manager->get()),
manager_(manager),
owns_resource_(owns_resource),
resource_handle_(std::move(resource_handle)),
resource_mgr_(ctx->resource_manager()) {}
~MemoryDatasetV2() override {
manager_->Unref();
if (owns_resource_) {
Status s = resource_mgr_->Delete<MemoryCacheManager>(
resource_handle_.container(), resource_handle_.name());
if (!s.ok()) {
LOG(WARNING) << "Failed to delete cache resource: " << s.ToString();
}
}
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* filename_node = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(tstring(""), &filename_node));
Node* resource_handle_node = nullptr;
Tensor handle(DT_RESOURCE, TensorShape({}));
handle.scalar<ResourceHandle>()() = resource_handle_;
TF_RETURN_IF_ERROR(b->AddTensor(handle, &resource_handle_node));
TF_RETURN_IF_ERROR(b->AddDataset(
this, {input_node, filename_node, resource_handle_node}, output));
return absl::OkStatus();
}
private:
MemoryCacheManager* const manager_;
const bool owns_resource_;
const ResourceHandle resource_handle_;
ResourceMgr* const resource_mgr_;
};
CacheDatasetOp::CacheDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx),
op_version_(ctx->def().op() == kCacheDataset ? 1 : 2) {}
void CacheDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
tstring filename;
OP_REQUIRES_OK(ctx, ParseScalarArgument<tstring>(ctx, kFileName, &filename));
if (filename.empty()) {
static std::atomic<int64_t> resource_id_counter(0);
const string& container = ctx->resource_manager()->default_container();
auto name = strings::StrCat(ctx->op_kernel().name(), "/", kMemoryCache, "_",
resource_id_counter.fetch_add(1));
if (op_version_ == 2) {
bool owns_resource = false;
MemoryCacheManager* manager = nullptr;
auto handle = HandleFromInput(ctx, 2);
Status s = ctx->resource_manager()->Lookup<MemoryCacheManager>(
handle.container(), handle.name(), &manager);
if (errors::IsNotFound(s)) {
owns_resource = true;
OP_REQUIRES_OK(
ctx,
ctx->resource_manager()->LookupOrCreate<MemoryCacheManager>(
container, name, &manager, [](MemoryCacheManager** manager) {
*manager = new MemoryCacheManager();
return absl::OkStatus();
}));
handle = MakeResourceHandle<MemoryCacheManager>(ctx, container, name);
} else {
OP_REQUIRES_OK(ctx, s);
}
*output = new MemoryDatasetV2(ctx, input, manager, std::move(handle),
owns_resource);
} else {
MemoryCacheManager* manager;
OP_REQUIRES_OK(
ctx, ctx->resource_manager()->LookupOrCreate<MemoryCacheManager>(
container, name, &manager, [](MemoryCacheManager** manager) {
*manager = new MemoryCacheManager();
return absl::OkStatus();
}));
auto handle =
MakeResourceHandle<MemoryCacheManager>(ctx, container, name);
*output = new MemoryDataset(ctx, input, manager, std::move(handle));
}
} else {
if (op_version_ == 2) {
*output =
new FileDatasetV2(ctx, input, filename, ctx->env(), ctx->input(2));
} else {
*output = new FileDataset(ctx, input, filename, ctx->env());
}
}
}
namespace {
REGISTER_KERNEL_BUILDER(Name("CacheDataset").Device(DEVICE_CPU),
CacheDatasetOp);
REGISTER_KERNEL_BUILDER(Name("CacheDatasetV2").Device(DEVICE_CPU),
CacheDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/cache_dataset_ops.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
#include "tensorflow/core/platform/path.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "cache_dataset";
constexpr char kFileDatasetPrefix[] = "File";
constexpr char kMemoryDatasetPrefix[] = "Memory";
class CacheDatasetParams : public DatasetParams {
public:
template <typename T>
CacheDatasetParams(T input_dataset_params, string filename,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
filename_(filename) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
Tensor filename_tensor =
CreateTensor<tstring>(TensorShape({}), {filename_});
return {filename_tensor};
}
Status GetInputNames(std::vector<string>* input_names) const override {
*input_names = {CacheDatasetOp::kInputDataset, CacheDatasetOp::kFileName};
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"output_types", output_dtypes_},
{"output_shapes", output_shapes_},
{"metadata", ""}};
return absl::OkStatus();
}
string dataset_type() const override { return CacheDatasetOp::kDatasetType; }
string filename() const { return filename_; }
private:
string filename_;
};
class CacheDatasetOpTest : public DatasetOpsTestBase {
public:
Status Initialize(const DatasetParams& dataset_params) {
TF_RETURN_IF_ERROR(DatasetOpsTestBase::Initialize(dataset_params));
auto params = static_cast<const CacheDatasetParams&>(dataset_params);
cache_filename_ = params.filename();
return absl::OkStatus();
}
~CacheDatasetOpTest() override {
if (!cache_filename_.empty()) {
std::vector<string> cache_files;
Status s = device_->env()->GetMatchingPaths(
strings::StrCat(cache_filename_, "*"), &cache_files);
if (!s.ok()) {
LOG(WARNING) << "Failed to get matching files on " << cache_filename_
<< "* : " << s.ToString();
}
for (const string& path : cache_files) {
s = device_->env()->DeleteFile(path);
if (!s.ok()) {
LOG(WARNING) << "Failed to delete " << path << " : " << s.ToString();
}
}
}
}
protected:
tstring cache_filename_;
};
CacheDatasetParams CacheDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return CacheDatasetParams(
std::move(tensor_slice_dataset_params),
io::JoinPath(testing::TmpDir(), "cache_data"),
{DT_INT64},
{PartialTensorShape({3, 1})}, kNodeName);
}
CacheDatasetParams CacheDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice");
return CacheDatasetParams(
std::move(tensor_slice_dataset_params),
io::JoinPath(testing::TmpDir(), "cache_data"),
{DT_INT64},
{PartialTensorShape({})}, kNodeName);
}
CacheDatasetParams CacheDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice");
return CacheDatasetParams(std::move(tensor_slice_dataset_params),
"",
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
CacheDatasetParams CacheDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice");
return CacheDatasetParams(std::move(tensor_slice_dataset_params),
"",
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<CacheDatasetParams>> GetNextTestCases() {
return {{CacheDatasetParams1(),
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams2(),
{}},
{CacheDatasetParams3(),
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams4(),
{}}};
}
class ParameterizedGetNextTest : public CacheDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<CacheDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
end_of_sequence = false;
out_tensors.clear();
while (!end_of_sequence) {
std::vector<Tensor> next;
TF_EXPECT_OK(
iterator_->GetNext(iterator_ctx_.get(), &next, &end_of_sequence));
out_tensors.insert(out_tensors.end(), next.begin(), next.end());
}
TF_EXPECT_OK(ExpectEqual(out_tensors, test_case.expected_outputs,
true));
}
INSTANTIATE_TEST_SUITE_P(CacheDatasetOpTest, ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(CacheDatasetOpTest, DatasetNodeName) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(CacheDatasetOpTest, DatasetTypeString) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(CacheDatasetOp::kDatasetType)));
}
TEST_F(CacheDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<CacheDatasetParams>>
DatasetOutputShapesTestCases() {
return {{CacheDatasetParams1(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams2(),
{PartialTensorShape({})}},
{CacheDatasetParams3(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams4(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<CacheDatasetParams>> CardinalityTestCases() {
return {{CacheDatasetParams1(),
3},
{CacheDatasetParams2(),
0},
{CacheDatasetParams3(),
3},
{CacheDatasetParams4(),
0}};
}
DATASET_CARDINALITY_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
CardinalityTestCases())
TEST_F(CacheDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<CacheDatasetParams>>
IteratorOutputShapesTestCases() {
return {{CacheDatasetParams1(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams2(),
{PartialTensorShape({})}},
{CacheDatasetParams3(),
{PartialTensorShape({3, 1})}},
{CacheDatasetParams4(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(CacheDatasetOpTest, CacheDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(CacheDatasetOpTest, IteratorPrefix) {
auto dataset_params = CacheDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
name_utils::IteratorPrefixParams iterator_prefix_params;
iterator_prefix_params.dataset_prefix =
cache_filename_.empty() ? kMemoryDatasetPrefix : kFileDatasetPrefix;
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
CacheDatasetOp::kDatasetType, dataset_params.iterator_prefix(),
iterator_prefix_params)));
}
std::vector<IteratorSaveAndRestoreTestCase<CacheDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{CacheDatasetParams1(),
{0, 2, 4, 11},
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams2(),
{0, 2, 4, 11},
{}},
{CacheDatasetParams3(),
{0, 2, 4, 11},
CreateTensors<int64_t>(TensorShape({3, 1}),
{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}})},
{CacheDatasetParams4(),
{0, 2, 4, 11},
{}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public CacheDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<CacheDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, SaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (cache_filename_.empty()) {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
}
end_of_sequence = false;
out_tensors.clear();
TF_ASSERT_OK(dataset_->MakeIterator(
iterator_ctx_.get(), nullptr,
test_case.dataset_params.iterator_prefix(), &iterator_));
}
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
int cur_iteration = 0;
auto expected_outputs_it = test_case.expected_outputs.begin();
for (int breakpoint : test_case.breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration <= breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
EXPECT_LT(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(out_tensors.back(), *expected_outputs_it));
expected_outputs_it++;
}
cur_iteration++;
}
if (breakpoint >= dataset_->Cardinality()) {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_CASE_P(CacheDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/cache_dataset_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/cache_dataset_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21b1cf8f-e548-408a-b9f8-3c49c1a4d992 | cpp | tensorflow/tensorflow | graph_analyzer | tensorflow/core/grappler/graph_analyzer/graph_analyzer.cc | tensorflow/core/grappler/graph_analyzer/graph_analyzer_test.cc | #include <deque>
#include <iostream>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/grappler/graph_analyzer/gen_node.h"
#include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include "tensorflow/core/grappler/graph_analyzer/sig_node.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
GraphAnalyzer::GraphAnalyzer(const GraphDef& graph, int subgraph_size)
: graph_(graph), subgraph_size_(subgraph_size) {}
GraphAnalyzer::~GraphAnalyzer() {}
Status GraphAnalyzer::Run() {
if (subgraph_size_ > Signature::kMaxGraphSize) {
return Status(absl::StatusCode::kInvalidArgument,
absl::StrFormat("Subgraphs of %d nodes are not supported, "
"the maximal supported node count is %d.",
subgraph_size_, Signature::kMaxGraphSize));
}
Status st = BuildMap();
if (!st.ok()) {
return st;
}
FindSubgraphs();
DropInvalidSubgraphs();
st = CollateResult();
if (!st.ok()) {
return st;
}
return absl::OkStatus();
}
Status GraphAnalyzer::BuildMap() {
nodes_.clear();
return GenNode::BuildGraphInMap(graph_, &nodes_);
}
void GraphAnalyzer::FindSubgraphs() {
result_.clear();
if (subgraph_size_ < 1) {
return;
}
partial_.clear();
todo_.clear();
const Subgraph::Identity empty_parent;
for (const auto& node : nodes_) {
if (subgraph_size_ == 1) {
result_.ExtendParent(empty_parent, node.second.get());
} else {
todo_.push_back(partial_.ExtendParent(empty_parent, node.second.get()));
}
}
while (!todo_.empty()) {
ExtendSubgraph(todo_.front());
todo_.pop_front();
}
partial_.clear();
}
void GraphAnalyzer::ExtendSubgraph(Subgraph* parent) {
const int next_parent_id = parent->id().size() + 1;
bool will_complete = (next_parent_id == subgraph_size_);
SubgraphPtrSet& sg_set = will_complete ? result_ : partial_;
const GenNode* last_all_or_none_node = nullptr;
for (SubgraphIterator sit(parent); !sit.AtEnd(); sit.Next()) {
const GenNode* node = sit.GetNode();
GenNode::Port port = sit.GetPort();
const GenNode::LinkTarget& neighbor = sit.GetNeighbor();
if (node->AllInputsOrNone() && port.IsInbound() && !port.IsControl()) {
if (node != last_all_or_none_node) {
ExtendSubgraphAllOrNone(parent, node);
last_all_or_none_node = node;
}
sit.SkipPort();
} else if (neighbor.node->AllInputsOrNone() && !port.IsInbound() &&
!port.IsControl()) {
if (parent->id().find(neighbor.node) == parent->id().end()) {
ExtendSubgraphAllOrNone(parent, neighbor.node);
}
} else if (node->IsMultiInput(port)) {
ExtendSubgraphPortAllOrNone(parent, node, port);
sit.SkipPort();
} else if (neighbor.node->IsMultiInput(neighbor.port)) {
if (parent->id().find(neighbor.node) != parent->id().end()) {
continue;
}
ExtendSubgraphPortAllOrNone(parent, neighbor.node, neighbor.port);
} else {
Subgraph* sg = sg_set.ExtendParent(parent->id(), neighbor.node);
if (!will_complete && sg != nullptr) {
todo_.push_back(sg);
}
}
}
}
void GraphAnalyzer::ExtendSubgraphAllOrNone(Subgraph* parent,
const GenNode* node) {
Subgraph::Identity id = parent->id();
id.insert(node);
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::ExtendSubgraphPortAllOrNone(Subgraph* parent,
const GenNode* node,
GenNode::Port port) {
auto nbit = node->links().find(port);
if (nbit == node->links().end()) {
return;
}
Subgraph::Identity id = parent->id();
id.insert(node);
for (const auto& link : nbit->second) {
id.insert(link.node);
const int id_size = id.size();
if (id_size > subgraph_size_) {
return;
}
}
AddExtendedSubgraph(parent, id);
}
void GraphAnalyzer::AddExtendedSubgraph(Subgraph* parent,
const Subgraph::Identity& id) {
if (id.size() == parent->id().size()) {
return;
}
auto sg = std::make_unique<Subgraph>(id);
SubgraphPtrSet& spec_sg_set =
(id.size() == subgraph_size_) ? result_ : partial_;
if (spec_sg_set.find(sg) != spec_sg_set.end()) {
return;
}
const int id_size = id.size();
if (id_size != subgraph_size_) {
todo_.push_back(sg.get());
}
spec_sg_set.insert(std::move(sg));
}
void GraphAnalyzer::DropInvalidSubgraphs() {
auto resit = result_.begin();
while (resit != result_.end()) {
if (HasInvalidMultiInputs(resit->get())) {
auto delit = resit;
++resit;
result_.erase(delit);
} else {
++resit;
}
}
}
bool GraphAnalyzer::HasInvalidMultiInputs(Subgraph* sg) {
for (auto const& node : sg->id()) {
if (!node->AllInputsOrNone()) {
continue;
}
bool anyIn = false;
bool anyOut = false;
auto range_end = node->links().end();
for (auto nbit = node->links().begin(); nbit != range_end; ++nbit) {
auto port = nbit->first;
if (!port.IsInbound() || port.IsControl()) {
continue;
}
for (const auto& link : nbit->second) {
if (sg->id().find(link.node) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
}
}
if (anyIn && anyOut) {
return true;
}
}
for (SubgraphIterator sit(sg); !sit.AtEnd(); sit.Next()) {
if (sit.GetNode()->IsMultiInput(sit.GetPort())) {
bool anyIn = false;
bool anyOut = false;
do {
GenNode* peer = sit.GetNeighbor().node;
if (sg->id().find(peer) == sg->id().end()) {
anyOut = true;
} else {
anyIn = true;
}
} while (sit.NextIfSamePort());
if (anyIn && anyOut) {
return true;
}
}
}
return false;
}
Status GraphAnalyzer::CollateResult() {
ordered_collation_.clear();
collation_map_.clear();
for (const auto& it : result_) {
auto sig = std::make_unique<Signature>();
it->ExtractForSignature(&sig->map);
Status status = sig->Compute();
if (!status.ok()) {
return status;
}
auto& coll_entry = collation_map_[sig.get()];
if (coll_entry.sig == nullptr) {
coll_entry.sig = std::move(sig);
}
++coll_entry.count;
}
for (auto& entry : collation_map_) {
ordered_collation_.insert(&entry.second);
}
result_.clear();
return absl::OkStatus();
}
std::vector<string> GraphAnalyzer::DumpRawSubgraphs() {
std::vector<string> result;
for (const auto& it : result_) {
result.emplace_back(it->Dump());
}
return result;
}
std::vector<string> GraphAnalyzer::DumpSubgraphs() {
std::vector<string> result;
for (auto ptr : ordered_collation_) {
result.emplace_back(
absl::StrFormat("%d %s", ptr->count, ptr->sig->ToString()));
}
return result;
}
Status GraphAnalyzer::OutputSubgraphs() {
size_t total = 0;
for (auto ptr : ordered_collation_) {
std::cout << ptr->count << ' ' << ptr->sig->ToString() << '\n';
total += ptr->count;
}
std::cout << "Total: " << total << '\n';
if (std::cout.fail()) {
return Status(absl::StatusCode::kDataLoss, "Failed to write to stdout");
} else {
return absl::OkStatus();
}
}
}
}
} | #include "tensorflow/core/grappler/graph_analyzer/graph_analyzer.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/core/grappler/graph_analyzer/test_tools.h"
namespace tensorflow {
namespace grappler {
namespace graph_analyzer {
namespace test {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::Ne;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class GraphAnalyzerTest : public ::testing::Test, protected TestGraphs {
protected:
Status BuildMap() { return gran_->BuildMap(); }
void FindSubgraphs() { gran_->FindSubgraphs(); }
void DropInvalidSubgraphs() { gran_->DropInvalidSubgraphs(); }
Status CollateResult() { return gran_->CollateResult(); }
void ExtendSubgraph(Subgraph* parent) { gran_->ExtendSubgraph(parent); }
void ExtendSubgraphPortAllOrNone(Subgraph* parent, GenNode* node,
GenNode::Port port) {
gran_->ExtendSubgraphPortAllOrNone(parent, node, port);
}
void ExtendSubgraphAllOrNone(Subgraph* parent, GenNode* node) {
gran_->ExtendSubgraphAllOrNone(parent, node);
}
std::vector<string> DumpRawSubgraphs() { return gran_->DumpRawSubgraphs(); }
std::vector<string> DumpPartials() {
std::vector<string> result;
for (const auto& it : gran_->partial_) {
result.emplace_back(it->Dump());
}
return result;
}
const GenNodeMap& GetNodes() { return gran_->nodes_; }
GenNode* GetNode(const string& name) { return gran_->nodes_.at(name).get(); }
SubgraphPtrSet& GetResult() { return gran_->result_; }
SubgraphPtrSet& GetPartial() { return gran_->partial_; }
std::deque<Subgraph*>& GetTodo() { return gran_->todo_; }
std::unique_ptr<GraphAnalyzer> gran_;
};
TEST_F(GraphAnalyzerTest, BuildMap) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
EXPECT_THAT(st, Eq(absl::OkStatus()));
auto& map = GetNodes();
EXPECT_THAT(map.find("node1"), Ne(map.end()));
EXPECT_THAT(map.find("node2"), Ne(map.end()));
EXPECT_THAT(map.find("node3"), Ne(map.end()));
}
TEST_F(GraphAnalyzerTest, BuildMapError) {
(*graph_3n_self_control_.add_node()) = MakeNodeConst("node1");
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(Status(absl::StatusCode::kInvalidArgument,
"Duplicate node name 'node1'.")));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs0) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 0);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(0));
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphs1) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 1);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
auto& subgraphs = GetResult();
EXPECT_THAT(subgraphs, SizeIs(3));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: BroadcastGradientArgs(node3)",
"1: Const(node1)",
"1: Sub(node2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, FindSubgraphsTooLarge) {
gran_ = std::make_unique<GraphAnalyzer>(graph_3n_self_control_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
FindSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), ElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseIn) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto parent = std::make_unique<Subgraph>(Subgraph::Identity());
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(parent.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsIncomplete) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("add2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("add2"),
GenNode::Port(true, 0));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("add2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: AddN(add2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, MultiInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add2), Const(const2_1), Const(const2_2), Const(const2_3)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsMulti) {
gran_ = std::make_unique<GraphAnalyzer>(graph_multi_input_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("add1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add1"),
GetNode("add2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("add1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("add2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: AddN(add1), AddN(add2), Sub(sub)",
"1: AddN(add1), Const(const1_1), Const(const1_2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsNoControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass1"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSeparateControl) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 5);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass1")}));
ExtendSubgraphPortAllOrNone(root.get(), GetNode("pass1"),
GenNode::Port(true, -1));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputTooLargeBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputNothingAddedBackwards) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root = std::make_unique<Subgraph>(
Subgraph::Identity({GetNode("pass2"), GetNode("const2_1"),
GetNode("const2_2"), GetNode("const2_3")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre());
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsBaseOut) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraphAllOrNone(root.get(), GetNode("pass2"));
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessBackwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("pass2")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre(
"1: IdentityN(pass2), Sub(sub)"
));
EXPECT_THAT(GetTodo(), SizeIs(1));
}
TEST_F(GraphAnalyzerTest, AllOrNoneInputSuccessForwardsFull) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 4);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
auto root =
std::make_unique<Subgraph>(Subgraph::Identity({GetNode("const2_1")}));
ExtendSubgraph(root.get());
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass2)",
"1: Const(const2_1), Const(const2_2), Const(const2_3), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
TEST_F(GraphAnalyzerTest, DropInvalidSubgraphsAllOrNone) {
gran_ = std::make_unique<GraphAnalyzer>(graph_all_or_none_, 3);
Status st = BuildMap();
ASSERT_THAT(st, Eq(absl::OkStatus()));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("const1_2"),
GetNode("pass1"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass1"),
GetNode("pass2"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("const1_1"),
GetNode("pass1"),
GetNode("sub"),
})));
GetResult().insert(std::make_unique<Subgraph>(Subgraph::Identity({
GetNode("pass2"),
GetNode("const2_1"),
GetNode("const2_2"),
})));
DropInvalidSubgraphs();
EXPECT_THAT(DumpRawSubgraphs(), UnorderedElementsAre(
"1: IdentityN(pass1), IdentityN(pass2), Sub(sub)",
"1: Const(const1_1), Const(const1_2), IdentityN(pass1)"
));
EXPECT_THAT(DumpPartials(), UnorderedElementsAre());
EXPECT_THAT(GetTodo(), SizeIs(0));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/graph_analyzer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/graph_analyzer/graph_analyzer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
60891ecd-aa49-424b-b793-09e6139c0d9f | cpp | tensorflow/tensorflow | hashtable_lookup | tensorflow/lite/kernels/hashtable_lookup.cc | tensorflow/lite/kernels/hashtable_lookup_test.cc | #include <stdint.h>
#include <cstdlib>
#include <cstring>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
int greater(const void* a, const void* b) {
return *static_cast<const int*>(a) - *static_cast<const int*>(b);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2);
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1);
TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32);
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));
TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1);
TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32);
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));
TF_LITE_ENSURE(context, NumDimensions(value) >= 1);
TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0),
SizeOfDimension(value, 0));
if (value->type == kTfLiteString) {
TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1);
}
TfLiteTensor* hits;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));
TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8);
TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1);
hitSize->data[0] = SizeOfDimension(lookup, 0);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TF_LITE_ENSURE_EQ(context, value->type, output->type);
TfLiteStatus status = kTfLiteOk;
if (output->type != kTfLiteString) {
TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value));
outputSize->data[0] = SizeOfDimension(lookup, 0);
for (int i = 1; i < NumDimensions(value); i++) {
outputSize->data[i] = SizeOfDimension(value, i);
}
status = context->ResizeTensor(context, output, outputSize);
}
if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) {
status = kTfLiteError;
}
return status;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));
TfLiteTensor* hits;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));
const TfLiteTensor* lookup;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));
const TfLiteTensor* key;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));
const TfLiteTensor* value;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));
const int num_rows = SizeOfDimension(value, 0);
TF_LITE_ENSURE(context, num_rows != 0);
const int row_bytes = value->bytes / num_rows;
void* pointer = nullptr;
DynamicBuffer buf;
for (int i = 0; i < SizeOfDimension(lookup, 0); i++) {
int idx = -1;
pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows,
sizeof(int32_t), greater);
if (pointer != nullptr) {
idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) /
sizeof(int32_t);
}
if (idx >= num_rows || idx < 0) {
if (output->type == kTfLiteString) {
buf.AddString(nullptr, 0);
} else {
memset(output->data.raw + i * row_bytes, 0, row_bytes);
}
hits->data.uint8[i] = 0;
} else {
if (output->type == kTfLiteString) {
buf.AddString(GetString(value, idx));
} else {
memcpy(output->data.raw + i * row_bytes,
value->data.raw + idx * row_bytes, row_bytes);
}
hits->data.uint8[i] = 1;
}
}
if (output->type == kTfLiteString) {
buf.WriteToTensorAsVector(output);
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_HASHTABLE_LOOKUP() {
static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <functional>
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class HashtableLookupOpModel : public SingleOpModel {
public:
HashtableLookupOpModel(std::initializer_list<int> lookup_shape,
std::initializer_list<int> key_shape,
std::initializer_list<int> value_shape,
TensorType type) {
lookup_ = AddInput(TensorType_INT32);
key_ = AddInput(TensorType_INT32);
value_ = AddInput(type);
output_ = AddOutput(type);
hit_ = AddOutput(TensorType_UINT8);
SetBuiltinOp(BuiltinOperator_HASHTABLE_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreter({lookup_shape, key_shape, value_shape});
}
void SetLookup(std::initializer_list<int> data) {
PopulateTensor<int>(lookup_, data);
}
void SetHashtableKey(std::initializer_list<int> data) {
PopulateTensor<int>(key_, data);
}
void SetHashtableValue(const std::vector<string>& content) {
PopulateStringTensor(value_, content);
}
void SetHashtableValue(const std::function<float(int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
for (int i = 0; i < rows; i++) {
GetTensorData<float>(tensor)[i] = function(i);
}
}
void SetHashtableValue(const std::function<float(int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
int features = tensor->dims->data[1];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < features; j++) {
GetTensorData<float>(tensor)[i * features + j] = function(i, j);
}
}
}
std::vector<string> GetStringOutput() {
TfLiteTensor* output = interpreter_->tensor(output_);
int num = GetStringCount(output);
std::vector<string> result(num);
for (int i = 0; i < num; i++) {
auto ref = GetString(output, i);
result[i] = string(ref.str, ref.len);
}
return result;
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<uint8_t> GetHit() { return ExtractVector<uint8_t>(hit_); }
private:
int lookup_;
int key_;
int value_;
int output_;
int hit_;
};
TEST(HashtableLookupOpTest, Test2DInput) {
HashtableLookupOpModel m({4}, {3}, {3, 2}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i, int j) { return i + j / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
2.0, 2.1,
0, 0,
0.0, 0.1,
1.0, 1.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
TEST(HashtableLookupOpTest, Test1DInput) {
HashtableLookupOpModel m({4}, {3}, {3}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i) { return i * i / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
0.4,
0,
0.0,
0.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
TEST(HashtableLookupOpTest, TestString) {
HashtableLookupOpModel m({4}, {3}, {3}, TensorType_STRING);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue({"Hello", "", "Hi"});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetStringOutput(), ElementsAreArray({
"Hi",
"",
"Hello",
"",
}));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/hashtable_lookup.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/hashtable_lookup_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
375ef4d6-64ae-424f-ae82-cd2438b0dec3 | cpp | google/quiche | spdy_alt_svc_wire_format | quiche/http2/core/spdy_alt_svc_wire_format.cc | quiche/http2/core/spdy_alt_svc_wire_format_test.cc | #include "quiche/http2/core/spdy_alt_svc_wire_format.h"
#include <algorithm>
#include <cctype>
#include <cstdint>
#include <limits>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace spdy {
namespace {
template <class T>
bool ParsePositiveIntegerImpl(absl::string_view::const_iterator c,
absl::string_view::const_iterator end, T* value) {
*value = 0;
for (; c != end && std::isdigit(*c); ++c) {
if (*value > std::numeric_limits<T>::max() / 10) {
return false;
}
*value *= 10;
if (*value > std::numeric_limits<T>::max() - (*c - '0')) {
return false;
}
*value += *c - '0';
}
return (c == end && *value > 0);
}
}
SpdyAltSvcWireFormat::AlternativeService::AlternativeService() = default;
SpdyAltSvcWireFormat::AlternativeService::AlternativeService(
const std::string& protocol_id, const std::string& host, uint16_t port,
uint32_t max_age_seconds, VersionVector version)
: protocol_id(protocol_id),
host(host),
port(port),
max_age_seconds(max_age_seconds),
version(std::move(version)) {}
SpdyAltSvcWireFormat::AlternativeService::~AlternativeService() = default;
SpdyAltSvcWireFormat::AlternativeService::AlternativeService(
const AlternativeService& other) = default;
bool SpdyAltSvcWireFormat::ParseHeaderFieldValue(
absl::string_view value, AlternativeServiceVector* altsvc_vector) {
if (value.empty()) {
return false;
}
altsvc_vector->clear();
if (value == absl::string_view("clear")) {
return true;
}
absl::string_view::const_iterator c = value.begin();
while (c != value.end()) {
absl::string_view::const_iterator percent_encoded_protocol_id_end =
std::find(c, value.end(), '=');
std::string protocol_id;
if (percent_encoded_protocol_id_end == c ||
!PercentDecode(c, percent_encoded_protocol_id_end, &protocol_id)) {
return false;
}
const bool is_ietf_format_quic = (protocol_id == "hq");
c = percent_encoded_protocol_id_end;
if (c == value.end()) {
return false;
}
QUICHE_DCHECK_EQ('=', *c);
++c;
if (c == value.end() || *c != '"') {
return false;
}
++c;
absl::string_view::const_iterator alt_authority_begin = c;
for (; c != value.end() && *c != '"'; ++c) {
if (*c != '\\') {
continue;
}
++c;
if (c == value.end()) {
return false;
}
}
if (c == alt_authority_begin || c == value.end()) {
return false;
}
QUICHE_DCHECK_EQ('"', *c);
std::string host;
uint16_t port;
if (!ParseAltAuthority(alt_authority_begin, c, &host, &port)) {
return false;
}
++c;
uint32_t max_age_seconds = 86400;
VersionVector version;
absl::string_view::const_iterator parameters_end =
std::find(c, value.end(), ',');
while (c != parameters_end) {
SkipWhiteSpace(&c, parameters_end);
if (c == parameters_end) {
break;
}
if (*c != ';') {
return false;
}
++c;
SkipWhiteSpace(&c, parameters_end);
if (c == parameters_end) {
break;
}
std::string parameter_name;
for (; c != parameters_end && *c != '=' && *c != ' ' && *c != '\t'; ++c) {
parameter_name.push_back(tolower(*c));
}
SkipWhiteSpace(&c, parameters_end);
if (c == parameters_end || *c != '=') {
return false;
}
++c;
SkipWhiteSpace(&c, parameters_end);
absl::string_view::const_iterator parameter_value_begin = c;
for (; c != parameters_end && *c != ';' && *c != ' ' && *c != '\t'; ++c) {
}
if (c == parameter_value_begin) {
return false;
}
if (parameter_name == "ma") {
if (!ParsePositiveInteger32(parameter_value_begin, c,
&max_age_seconds)) {
return false;
}
} else if (!is_ietf_format_quic && parameter_name == "v") {
if (*parameter_value_begin != '"') {
return false;
}
c = std::find(parameter_value_begin + 1, value.end(), '"');
if (c == value.end()) {
return false;
}
++c;
parameters_end = std::find(c, value.end(), ',');
absl::string_view::const_iterator v_begin = parameter_value_begin + 1;
while (v_begin < c) {
absl::string_view::const_iterator v_end = v_begin;
while (v_end < c - 1 && *v_end != ',') {
++v_end;
}
uint16_t v;
if (!ParsePositiveInteger16(v_begin, v_end, &v)) {
return false;
}
version.push_back(v);
v_begin = v_end + 1;
if (v_begin == c - 1) {
return false;
}
}
} else if (is_ietf_format_quic && parameter_name == "quic") {
if (*parameter_value_begin == '0') {
return false;
}
uint32_t quic_version;
if (!HexDecodeToUInt32(absl::string_view(&*parameter_value_begin,
c - parameter_value_begin),
&quic_version) ||
quic_version == 0) {
return false;
}
version.push_back(quic_version);
}
}
altsvc_vector->emplace_back(protocol_id, host, port, max_age_seconds,
version);
for (; c != value.end() && (*c == ' ' || *c == '\t' || *c == ','); ++c) {
}
}
return true;
}
std::string SpdyAltSvcWireFormat::SerializeHeaderFieldValue(
const AlternativeServiceVector& altsvc_vector) {
if (altsvc_vector.empty()) {
return std::string("clear");
}
const char kNibbleToHex[] = "0123456789ABCDEF";
std::string value;
for (const AlternativeService& altsvc : altsvc_vector) {
if (!value.empty()) {
value.push_back(',');
}
const bool is_ietf_format_quic = (altsvc.protocol_id == "hq");
for (char c : altsvc.protocol_id) {
if (isalnum(c)) {
value.push_back(c);
continue;
}
switch (c) {
case '!':
case '#':
case '$':
case '&':
case '\'':
case '*':
case '+':
case '-':
case '.':
case '^':
case '_':
case '`':
case '|':
case '~':
value.push_back(c);
break;
default:
value.push_back('%');
value.push_back(kNibbleToHex[c >> 4]);
value.push_back(kNibbleToHex[c & 0x0f]);
break;
}
}
value.push_back('=');
value.push_back('"');
for (char c : altsvc.host) {
if (c == '"' || c == '\\') {
value.push_back('\\');
}
value.push_back(c);
}
absl::StrAppend(&value, ":", altsvc.port, "\"");
if (altsvc.max_age_seconds != 86400) {
absl::StrAppend(&value, "; ma=", altsvc.max_age_seconds);
}
if (!altsvc.version.empty()) {
if (is_ietf_format_quic) {
for (uint32_t quic_version : altsvc.version) {
absl::StrAppend(&value, "; quic=", absl::Hex(quic_version));
}
} else {
value.append("; v=\"");
for (auto it = altsvc.version.begin(); it != altsvc.version.end();
++it) {
if (it != altsvc.version.begin()) {
value.append(",");
}
absl::StrAppend(&value, *it);
}
value.append("\"");
}
}
}
return value;
}
void SpdyAltSvcWireFormat::SkipWhiteSpace(
absl::string_view::const_iterator* c,
absl::string_view::const_iterator end) {
for (; *c != end && (**c == ' ' || **c == '\t'); ++*c) {
}
}
bool SpdyAltSvcWireFormat::PercentDecode(absl::string_view::const_iterator c,
absl::string_view::const_iterator end,
std::string* output) {
output->clear();
for (; c != end; ++c) {
if (*c != '%') {
output->push_back(*c);
continue;
}
QUICHE_DCHECK_EQ('%', *c);
++c;
if (c == end || !std::isxdigit(*c)) {
return false;
}
char decoded = HexDigitToInt(*c) << 4;
++c;
if (c == end || !std::isxdigit(*c)) {
return false;
}
decoded += HexDigitToInt(*c);
output->push_back(decoded);
}
return true;
}
bool SpdyAltSvcWireFormat::ParseAltAuthority(
absl::string_view::const_iterator c, absl::string_view::const_iterator end,
std::string* host, uint16_t* port) {
host->clear();
if (c == end) {
return false;
}
if (*c == '[') {
for (; c != end && *c != ']'; ++c) {
if (*c == '"') {
return false;
}
host->push_back(*c);
}
if (c == end) {
return false;
}
QUICHE_DCHECK_EQ(']', *c);
host->push_back(*c);
++c;
} else {
for (; c != end && *c != ':'; ++c) {
if (*c == '"') {
return false;
}
if (*c == '\\') {
++c;
if (c == end) {
return false;
}
}
host->push_back(*c);
}
}
if (c == end || *c != ':') {
return false;
}
QUICHE_DCHECK_EQ(':', *c);
++c;
return ParsePositiveInteger16(c, end, port);
}
bool SpdyAltSvcWireFormat::ParsePositiveInteger16(
absl::string_view::const_iterator c, absl::string_view::const_iterator end,
uint16_t* value) {
return ParsePositiveIntegerImpl<uint16_t>(c, end, value);
}
bool SpdyAltSvcWireFormat::ParsePositiveInteger32(
absl::string_view::const_iterator c, absl::string_view::const_iterator end,
uint32_t* value) {
return ParsePositiveIntegerImpl<uint32_t>(c, end, value);
}
char SpdyAltSvcWireFormat::HexDigitToInt(char c) {
QUICHE_DCHECK(std::isxdigit(c));
if (std::isdigit(c)) {
return c - '0';
}
if (c >= 'A' && c <= 'F') {
return c - 'A' + 10;
}
if (c >= 'a' && c <= 'f') {
return c - 'a' + 10;
}
return 0;
}
bool SpdyAltSvcWireFormat::HexDecodeToUInt32(absl::string_view data,
uint32_t* value) {
if (data.empty() || data.length() > 8u) {
return false;
}
*value = 0;
for (char c : data) {
if (!std::isxdigit(c)) {
return false;
}
*value <<= 4;
*value += HexDigitToInt(c);
}
return true;
}
} | #include "quiche/http2/core/spdy_alt_svc_wire_format.h"
#include <cstddef>
#include <cstdint>
#include <string>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
namespace test {
class SpdyAltSvcWireFormatPeer {
public:
static void SkipWhiteSpace(absl::string_view::const_iterator* c,
absl::string_view::const_iterator end) {
SpdyAltSvcWireFormat::SkipWhiteSpace(c, end);
}
static bool PercentDecode(absl::string_view::const_iterator c,
absl::string_view::const_iterator end,
std::string* output) {
return SpdyAltSvcWireFormat::PercentDecode(c, end, output);
}
static bool ParseAltAuthority(absl::string_view::const_iterator c,
absl::string_view::const_iterator end,
std::string* host, uint16_t* port) {
return SpdyAltSvcWireFormat::ParseAltAuthority(c, end, host, port);
}
static bool ParsePositiveInteger16(absl::string_view::const_iterator c,
absl::string_view::const_iterator end,
uint16_t* max_age_seconds) {
return SpdyAltSvcWireFormat::ParsePositiveInteger16(c, end,
max_age_seconds);
}
static bool ParsePositiveInteger32(absl::string_view::const_iterator c,
absl::string_view::const_iterator end,
uint32_t* max_age_seconds) {
return SpdyAltSvcWireFormat::ParsePositiveInteger32(c, end,
max_age_seconds);
}
static char HexDigitToInt(char c) {
return SpdyAltSvcWireFormat::HexDigitToInt(c);
}
static bool HexDecodeToUInt32(absl::string_view data, uint32_t* value) {
return SpdyAltSvcWireFormat::HexDecodeToUInt32(data, value);
}
};
namespace {
void FuzzHeaderFieldValue(
int i, std::string* header_field_value,
SpdyAltSvcWireFormat::AlternativeService* expected_altsvc) {
if (!header_field_value->empty()) {
header_field_value->push_back(',');
}
bool is_ietf_format_quic = (i & 1 << 0) != 0;
if (i & 1 << 0) {
expected_altsvc->protocol_id = "hq";
header_field_value->append("hq=\"");
} else {
expected_altsvc->protocol_id = "a=b%c";
header_field_value->append("a%3Db%25c=\"");
}
if (i & 1 << 1) {
expected_altsvc->host = "foo\"bar\\baz";
header_field_value->append("foo\\\"bar\\\\baz");
} else {
expected_altsvc->host = "";
}
expected_altsvc->port = 42;
header_field_value->append(":42\"");
if (i & 1 << 2) {
header_field_value->append(" ");
}
if (i & 3 << 3) {
expected_altsvc->max_age_seconds = 1111;
header_field_value->append(";");
if (i & 1 << 3) {
header_field_value->append(" ");
}
header_field_value->append("mA=1111");
if (i & 2 << 3) {
header_field_value->append(" ");
}
}
if (i & 1 << 5) {
header_field_value->append("; J=s");
}
if (i & 1 << 6) {
if (is_ietf_format_quic) {
if (i & 1 << 7) {
expected_altsvc->version.push_back(0x923457e);
header_field_value->append("; quic=923457E");
} else {
expected_altsvc->version.push_back(1);
expected_altsvc->version.push_back(0xFFFFFFFF);
header_field_value->append("; quic=1; quic=fFfFffFf");
}
} else {
if (i & i << 7) {
expected_altsvc->version.push_back(24);
header_field_value->append("; v=\"24\"");
} else {
expected_altsvc->version.push_back(1);
expected_altsvc->version.push_back(65535);
header_field_value->append("; v=\"1,65535\"");
}
}
}
if (i & 1 << 8) {
expected_altsvc->max_age_seconds = 999999999;
header_field_value->append("; Ma=999999999");
}
if (i & 1 << 9) {
header_field_value->append(";");
}
if (i & 1 << 10) {
header_field_value->append(" ");
}
if (i & 1 << 11) {
header_field_value->append(",");
}
if (i & 1 << 12) {
header_field_value->append(" ");
}
}
void FuzzAlternativeService(int i,
SpdyAltSvcWireFormat::AlternativeService* altsvc,
std::string* expected_header_field_value) {
if (!expected_header_field_value->empty()) {
expected_header_field_value->push_back(',');
}
altsvc->protocol_id = "a=b%c";
altsvc->port = 42;
expected_header_field_value->append("a%3Db%25c=\"");
if (i & 1 << 0) {
altsvc->host = "foo\"bar\\baz";
expected_header_field_value->append("foo\\\"bar\\\\baz");
}
expected_header_field_value->append(":42\"");
if (i & 1 << 1) {
altsvc->max_age_seconds = 1111;
expected_header_field_value->append("; ma=1111");
}
if (i & 1 << 2) {
altsvc->version.push_back(24);
altsvc->version.push_back(25);
expected_header_field_value->append("; v=\"24,25\"");
}
}
TEST(SpdyAltSvcWireFormatTest, DefaultValues) {
SpdyAltSvcWireFormat::AlternativeService altsvc;
EXPECT_EQ("", altsvc.protocol_id);
EXPECT_EQ("", altsvc.host);
EXPECT_EQ(0u, altsvc.port);
EXPECT_EQ(86400u, altsvc.max_age_seconds);
EXPECT_TRUE(altsvc.version.empty());
}
TEST(SpdyAltSvcWireFormatTest, ParseInvalidEmptyHeaderFieldValue) {
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
ASSERT_FALSE(SpdyAltSvcWireFormat::ParseHeaderFieldValue("", &altsvc_vector));
}
TEST(SpdyAltSvcWireFormatTest, ParseHeaderFieldValueClear) {
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
ASSERT_TRUE(
SpdyAltSvcWireFormat::ParseHeaderFieldValue("clear", &altsvc_vector));
EXPECT_EQ(0u, altsvc_vector.size());
}
TEST(SpdyAltSvcWireFormatTest, ParseHeaderFieldValue) {
for (int i = 0; i < 1 << 13; ++i) {
std::string header_field_value;
SpdyAltSvcWireFormat::AlternativeService expected_altsvc;
FuzzHeaderFieldValue(i, &header_field_value, &expected_altsvc);
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
ASSERT_TRUE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(header_field_value,
&altsvc_vector));
ASSERT_EQ(1u, altsvc_vector.size());
EXPECT_EQ(expected_altsvc.protocol_id, altsvc_vector[0].protocol_id);
EXPECT_EQ(expected_altsvc.host, altsvc_vector[0].host);
EXPECT_EQ(expected_altsvc.port, altsvc_vector[0].port);
EXPECT_EQ(expected_altsvc.max_age_seconds,
altsvc_vector[0].max_age_seconds);
EXPECT_EQ(expected_altsvc.version, altsvc_vector[0].version);
std::string reserialized_header_field_value =
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector);
SpdyAltSvcWireFormat::AlternativeServiceVector roundtrip_altsvc_vector;
ASSERT_TRUE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(
reserialized_header_field_value, &roundtrip_altsvc_vector));
ASSERT_EQ(1u, roundtrip_altsvc_vector.size());
EXPECT_EQ(expected_altsvc.protocol_id,
roundtrip_altsvc_vector[0].protocol_id);
EXPECT_EQ(expected_altsvc.host, roundtrip_altsvc_vector[0].host);
EXPECT_EQ(expected_altsvc.port, roundtrip_altsvc_vector[0].port);
EXPECT_EQ(expected_altsvc.max_age_seconds,
roundtrip_altsvc_vector[0].max_age_seconds);
EXPECT_EQ(expected_altsvc.version, roundtrip_altsvc_vector[0].version);
}
}
TEST(SpdyAltSvcWireFormatTest, ParseHeaderFieldValueMultiple) {
for (int i = 0; i < 1 << 13;) {
std::string header_field_value;
SpdyAltSvcWireFormat::AlternativeServiceVector expected_altsvc_vector;
do {
SpdyAltSvcWireFormat::AlternativeService expected_altsvc;
FuzzHeaderFieldValue(i, &header_field_value, &expected_altsvc);
expected_altsvc_vector.push_back(expected_altsvc);
++i;
} while (i % 6 < i % 7);
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
ASSERT_TRUE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(header_field_value,
&altsvc_vector));
ASSERT_EQ(expected_altsvc_vector.size(), altsvc_vector.size());
for (unsigned int j = 0; j < altsvc_vector.size(); ++j) {
EXPECT_EQ(expected_altsvc_vector[j].protocol_id,
altsvc_vector[j].protocol_id);
EXPECT_EQ(expected_altsvc_vector[j].host, altsvc_vector[j].host);
EXPECT_EQ(expected_altsvc_vector[j].port, altsvc_vector[j].port);
EXPECT_EQ(expected_altsvc_vector[j].max_age_seconds,
altsvc_vector[j].max_age_seconds);
EXPECT_EQ(expected_altsvc_vector[j].version, altsvc_vector[j].version);
}
std::string reserialized_header_field_value =
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector);
SpdyAltSvcWireFormat::AlternativeServiceVector roundtrip_altsvc_vector;
ASSERT_TRUE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(
reserialized_header_field_value, &roundtrip_altsvc_vector));
ASSERT_EQ(expected_altsvc_vector.size(), roundtrip_altsvc_vector.size());
for (unsigned int j = 0; j < roundtrip_altsvc_vector.size(); ++j) {
EXPECT_EQ(expected_altsvc_vector[j].protocol_id,
roundtrip_altsvc_vector[j].protocol_id);
EXPECT_EQ(expected_altsvc_vector[j].host,
roundtrip_altsvc_vector[j].host);
EXPECT_EQ(expected_altsvc_vector[j].port,
roundtrip_altsvc_vector[j].port);
EXPECT_EQ(expected_altsvc_vector[j].max_age_seconds,
roundtrip_altsvc_vector[j].max_age_seconds);
EXPECT_EQ(expected_altsvc_vector[j].version,
roundtrip_altsvc_vector[j].version);
}
}
}
TEST(SpdyAltSvcWireFormatTest, SerializeEmptyHeaderFieldValue) {
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
EXPECT_EQ("clear",
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector));
}
TEST(SpdyAltSvcWireFormatTest, RoundTrip) {
for (int i = 0; i < 1 << 3; ++i) {
SpdyAltSvcWireFormat::AlternativeService altsvc;
std::string expected_header_field_value;
FuzzAlternativeService(i, &altsvc, &expected_header_field_value);
SpdyAltSvcWireFormat::AlternativeServiceVector parsed_altsvc_vector;
ASSERT_TRUE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(
expected_header_field_value, &parsed_altsvc_vector));
ASSERT_EQ(1u, parsed_altsvc_vector.size());
EXPECT_EQ(altsvc.protocol_id, parsed_altsvc_vector[0].protocol_id);
EXPECT_EQ(altsvc.host, parsed_altsvc_vector[0].host);
EXPECT_EQ(altsvc.port, parsed_altsvc_vector[0].port);
EXPECT_EQ(altsvc.max_age_seconds, parsed_altsvc_vector[0].max_age_seconds);
EXPECT_EQ(altsvc.version, parsed_altsvc_vector[0].version);
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
altsvc_vector.push_back(altsvc);
EXPECT_EQ(expected_header_field_value,
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector));
}
}
TEST(SpdyAltSvcWireFormatTest, RoundTripMultiple) {
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
std::string expected_header_field_value;
for (int i = 0; i < 1 << 3; ++i) {
SpdyAltSvcWireFormat::AlternativeService altsvc;
FuzzAlternativeService(i, &altsvc, &expected_header_field_value);
altsvc_vector.push_back(altsvc);
}
SpdyAltSvcWireFormat::AlternativeServiceVector parsed_altsvc_vector;
ASSERT_TRUE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(
expected_header_field_value, &parsed_altsvc_vector));
ASSERT_EQ(altsvc_vector.size(), parsed_altsvc_vector.size());
auto expected_it = altsvc_vector.begin();
auto parsed_it = parsed_altsvc_vector.begin();
for (; expected_it != altsvc_vector.end(); ++expected_it, ++parsed_it) {
EXPECT_EQ(expected_it->protocol_id, parsed_it->protocol_id);
EXPECT_EQ(expected_it->host, parsed_it->host);
EXPECT_EQ(expected_it->port, parsed_it->port);
EXPECT_EQ(expected_it->max_age_seconds, parsed_it->max_age_seconds);
EXPECT_EQ(expected_it->version, parsed_it->version);
}
EXPECT_EQ(expected_header_field_value,
SpdyAltSvcWireFormat::SerializeHeaderFieldValue(altsvc_vector));
}
TEST(SpdyAltSvcWireFormatTest, ParseHeaderFieldValueInvalid) {
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
const char* invalid_field_value_array[] = {"a%",
"a%x",
"a%b",
"a%9z",
"a=",
"a=\"",
"a=\"b\"",
"a=\":\"",
"a=\"c:\"",
"a=\"c:foo\"",
"a=\"c:42foo\"",
"a=\"b:42\"bar",
"a=\"b:42\" ; m",
"a=\"b:42\" ; min-age",
"a=\"b:42\" ; ma",
"a=\"b:42\" ; ma=",
"a=\"b:42\" ; v=\"..\"",
"a=\"b:42\" ; ma=ma",
"a=\"b:42\" ; ma=123bar",
"a=\"b:42\" ; v=24",
"a=\"b:42\" ; v=24,25",
"a=\"b:42\" ; v=\"-3\"",
"a=\"b:42\" ; v=\"1.2\"",
"a=\"b:42\" ; v=\"24,\""};
for (const char* invalid_field_value : invalid_field_value_array) {
EXPECT_FALSE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(
invalid_field_value, &altsvc_vector))
<< invalid_field_value;
}
}
TEST(SpdyAltSvcWireFormatTest, ParseTruncatedHeaderFieldValue) {
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
const char* field_value_array[] = {"a=\":137\"", "a=\"foo:137\"",
"a%25=\"foo\\\"bar\\\\baz:137\""};
for (const absl::string_view field_value : field_value_array) {
for (size_t len = 1; len < field_value.size(); ++len) {
EXPECT_FALSE(SpdyAltSvcWireFormat::ParseHeaderFieldValue(
field_value.substr(0, len), &altsvc_vector))
<< len;
}
}
}
TEST(SpdyAltSvcWireFormatTest, SkipWhiteSpace) {
absl::string_view input("a \tb ");
absl::string_view::const_iterator c = input.begin();
SpdyAltSvcWireFormatPeer::SkipWhiteSpace(&c, input.end());
ASSERT_EQ(input.begin(), c);
++c;
SpdyAltSvcWireFormatPeer::SkipWhiteSpace(&c, input.end());
ASSERT_EQ(input.begin() + 3, c);
++c;
SpdyAltSvcWireFormatPeer::SkipWhiteSpace(&c, input.end());
ASSERT_EQ(input.end(), c);
}
TEST(SpdyAltSvcWireFormatTest, PercentDecodeValid) {
absl::string_view input("");
std::string output;
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::PercentDecode(input.begin(),
input.end(), &output));
EXPECT_EQ("", output);
input = absl::string_view("foo");
output.clear();
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::PercentDecode(input.begin(),
input.end(), &output));
EXPECT_EQ("foo", output);
input = absl::string_view("%2ca%5Cb");
output.clear();
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::PercentDecode(input.begin(),
input.end(), &output));
EXPECT_EQ(",a\\b", output);
}
TEST(SpdyAltSvcWireFormatTest, PercentDecodeInvalid) {
const char* invalid_input_array[] = {"a%", "a%x", "a%b", "%J22", "%9z"};
for (const char* invalid_input : invalid_input_array) {
absl::string_view input(invalid_input);
std::string output;
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::PercentDecode(input.begin(),
input.end(), &output))
<< input;
}
}
TEST(SpdyAltSvcWireFormatTest, ParseAltAuthorityValid) {
absl::string_view input(":42");
std::string host;
uint16_t port;
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParseAltAuthority(
input.begin(), input.end(), &host, &port));
EXPECT_TRUE(host.empty());
EXPECT_EQ(42, port);
input = absl::string_view("foo:137");
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParseAltAuthority(
input.begin(), input.end(), &host, &port));
EXPECT_EQ("foo", host);
EXPECT_EQ(137, port);
input = absl::string_view("[2003:8:0:16::509d:9615]:443");
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParseAltAuthority(
input.begin(), input.end(), &host, &port));
EXPECT_EQ("[2003:8:0:16::509d:9615]", host);
EXPECT_EQ(443, port);
}
TEST(SpdyAltSvcWireFormatTest, ParseAltAuthorityInvalid) {
const char* invalid_input_array[] = {"",
":",
"foo:",
":bar",
":0",
"foo:0",
":12bar",
"foo:23bar",
" ",
":12 ",
"foo:12 ",
"[2003:8:0:16::509d:9615]",
"[2003:8:0:16::509d:9615]:",
"[2003:8:0:16::509d:9615]foo:443",
"[2003:8:0:16::509d:9615:443",
"2003:8:0:16::509d:9615]:443"};
for (const char* invalid_input : invalid_input_array) {
absl::string_view input(invalid_input);
std::string host;
uint16_t port;
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::ParseAltAuthority(
input.begin(), input.end(), &host, &port))
<< input;
}
}
TEST(SpdyAltSvcWireFormatTest, ParseIntegerValid) {
absl::string_view input("3");
uint16_t value;
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger16(
input.begin(), input.end(), &value));
EXPECT_EQ(3, value);
input = absl::string_view("1337");
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger16(
input.begin(), input.end(), &value));
EXPECT_EQ(1337, value);
}
TEST(SpdyAltSvcWireFormatTest, ParseIntegerInvalid) {
const char* invalid_input_array[] = {"", " ", "a", "0", "00", "1 ", "12b"};
for (const char* invalid_input : invalid_input_array) {
absl::string_view input(invalid_input);
uint16_t value;
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger16(
input.begin(), input.end(), &value))
<< input;
}
}
TEST(SpdyAltSvcWireFormatTest, ParseIntegerOverflow) {
absl::string_view input("65535");
uint16_t value16;
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger16(
input.begin(), input.end(), &value16));
EXPECT_EQ(65535, value16);
input = absl::string_view("65536");
ASSERT_FALSE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger16(
input.begin(), input.end(), &value16));
input = absl::string_view("65537");
ASSERT_FALSE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger16(
input.begin(), input.end(), &value16));
input = absl::string_view("4294967295");
uint32_t value32;
ASSERT_TRUE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger32(
input.begin(), input.end(), &value32));
EXPECT_EQ(4294967295, value32);
input = absl::string_view("4294967296");
ASSERT_FALSE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger32(
input.begin(), input.end(), &value32));
input = absl::string_view("4294967297");
ASSERT_FALSE(SpdyAltSvcWireFormatPeer::ParsePositiveInteger32(
input.begin(), input.end(), &value32));
}
TEST(SpdyAltSvcWireFormatTest, ParseIPLiteral) {
const char* input =
"quic=\"[2003:8:0:16::509d:9615]:443\"; v=\"36,35\"; ma=60";
SpdyAltSvcWireFormat::AlternativeServiceVector altsvc_vector;
ASSERT_TRUE(
SpdyAltSvcWireFormat::ParseHeaderFieldValue(input, &altsvc_vector));
EXPECT_EQ(1u, altsvc_vector.size());
EXPECT_EQ("quic", altsvc_vector[0].protocol_id);
EXPECT_EQ("[2003:8:0:16::509d:9615]", altsvc_vector[0].host);
EXPECT_EQ(443u, altsvc_vector[0].port);
EXPECT_EQ(60u, altsvc_vector[0].max_age_seconds);
EXPECT_THAT(altsvc_vector[0].version, ::testing::ElementsAre(36, 35));
}
TEST(SpdyAltSvcWireFormatTest, HexDigitToInt) {
EXPECT_EQ(0, SpdyAltSvcWireFormatPeer::HexDigitToInt('0'));
EXPECT_EQ(1, SpdyAltSvcWireFormatPeer::HexDigitToInt('1'));
EXPECT_EQ(2, SpdyAltSvcWireFormatPeer::HexDigitToInt('2'));
EXPECT_EQ(3, SpdyAltSvcWireFormatPeer::HexDigitToInt('3'));
EXPECT_EQ(4, SpdyAltSvcWireFormatPeer::HexDigitToInt('4'));
EXPECT_EQ(5, SpdyAltSvcWireFormatPeer::HexDigitToInt('5'));
EXPECT_EQ(6, SpdyAltSvcWireFormatPeer::HexDigitToInt('6'));
EXPECT_EQ(7, SpdyAltSvcWireFormatPeer::HexDigitToInt('7'));
EXPECT_EQ(8, SpdyAltSvcWireFormatPeer::HexDigitToInt('8'));
EXPECT_EQ(9, SpdyAltSvcWireFormatPeer::HexDigitToInt('9'));
EXPECT_EQ(10, SpdyAltSvcWireFormatPeer::HexDigitToInt('a'));
EXPECT_EQ(11, SpdyAltSvcWireFormatPeer::HexDigitToInt('b'));
EXPECT_EQ(12, SpdyAltSvcWireFormatPeer::HexDigitToInt('c'));
EXPECT_EQ(13, SpdyAltSvcWireFormatPeer::HexDigitToInt('d'));
EXPECT_EQ(14, SpdyAltSvcWireFormatPeer::HexDigitToInt('e'));
EXPECT_EQ(15, SpdyAltSvcWireFormatPeer::HexDigitToInt('f'));
EXPECT_EQ(10, SpdyAltSvcWireFormatPeer::HexDigitToInt('A'));
EXPECT_EQ(11, SpdyAltSvcWireFormatPeer::HexDigitToInt('B'));
EXPECT_EQ(12, SpdyAltSvcWireFormatPeer::HexDigitToInt('C'));
EXPECT_EQ(13, SpdyAltSvcWireFormatPeer::HexDigitToInt('D'));
EXPECT_EQ(14, SpdyAltSvcWireFormatPeer::HexDigitToInt('E'));
EXPECT_EQ(15, SpdyAltSvcWireFormatPeer::HexDigitToInt('F'));
}
TEST(SpdyAltSvcWireFormatTest, HexDecodeToUInt32) {
uint32_t out;
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("0", &out));
EXPECT_EQ(0u, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("00", &out));
EXPECT_EQ(0u, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("0000000", &out));
EXPECT_EQ(0u, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("00000000", &out));
EXPECT_EQ(0u, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("1", &out));
EXPECT_EQ(1u, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("ffffFFF", &out));
EXPECT_EQ(0xFFFFFFFu, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("fFfFffFf", &out));
EXPECT_EQ(0xFFFFFFFFu, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("01AEF", &out));
EXPECT_EQ(0x1AEFu, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("abcde", &out));
EXPECT_EQ(0xABCDEu, out);
EXPECT_TRUE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("1234abcd", &out));
EXPECT_EQ(0x1234ABCDu, out);
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("", &out));
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("111111111", &out));
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("1111111111", &out));
EXPECT_FALSE(SpdyAltSvcWireFormatPeer::HexDecodeToUInt32("0x1111", &out));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_alt_svc_wire_format.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/spdy_alt_svc_wire_format_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
55108a1b-fd1f-4fb5-954f-c1e85a9793f1 | cpp | tensorflow/tensorflow | zip_dataset_op | tensorflow/core/kernels/data/zip_dataset_op.cc | tensorflow/core/kernels/data/zip_dataset_op_test.cc | #include "tensorflow/core/kernels/data/zip_dataset_op.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/dataset_options.pb.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ZipDatasetOp::kDatasetType;
constexpr const char* const ZipDatasetOp::kInputDatasets;
constexpr const char* const ZipDatasetOp::kOutputTypes;
constexpr const char* const ZipDatasetOp::kOutputShapes;
constexpr const char* const ZipDatasetOp::kNumInputDatasets;
constexpr char kInputImplsEmpty[] = "input_impls_empty";
class ZipDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx,
const std::vector<DatasetBase*>& inputs)
: DatasetBase(DatasetContext(ctx)), inputs_(inputs) {
for (const auto& input : inputs_) {
input->Ref();
for (DataType dt : input->output_dtypes()) {
output_dtypes_.push_back(dt);
}
output_shapes_.insert(output_shapes_.end(),
input->output_shapes().begin(),
input->output_shapes().end());
if (input != nullptr && random_indexing_compatible_.ok() &&
!input->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input->RandomIndexingCompatible();
}
}
}
~Dataset() override {
for (const auto& input : inputs_) {
input->Unref();
}
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return output_dtypes_;
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t result = kInfiniteCardinality;
for (const auto& input : inputs_) {
int64_t n = input->Cardinality(options);
if (n == kUnknownCardinality) {
return kUnknownCardinality;
}
if (n != kInfiniteCardinality &&
(result == kInfiniteCardinality || n < result)) {
result = n;
}
}
return result;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
for (const auto& input : inputs_) {
inputs->push_back(input);
}
return absl::OkStatus();
}
Status CheckExternalState() const override {
for (const auto& input : inputs_) {
TF_RETURN_IF_ERROR(input->CheckExternalState());
}
return absl::OkStatus();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
out_tensors->reserve(output_dtypes().size());
for (int i = 0; i < inputs_.size(); ++i) {
std::vector<Tensor> input_tensors;
TF_RETURN_IF_ERROR(inputs_[i]->Get(ctx, index, &input_tensors));
out_tensors->insert(out_tensors->end(), input_tensors.begin(),
input_tensors.end());
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
std::vector<Node*> input_graph_nodes;
input_graph_nodes.reserve(inputs_.size());
for (const auto& input : inputs_) {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input, &input_node));
input_graph_nodes.emplace_back(input_node);
}
TF_RETURN_IF_ERROR(b->AddDataset(
this, {}, {std::make_pair(0, input_graph_nodes)}, {}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
input_impls_.resize(dataset()->inputs_.size());
for (size_t i = 0; i < input_impls_.size(); ++i) {
TF_RETURN_IF_ERROR(dataset()->inputs_[i]->MakeIterator(
&input_contexts_[i], this, strings::StrCat(prefix(), "[", i, "]"),
&input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
}
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (input_impls_.empty()) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->reserve(dataset()->output_dtypes().size());
Status status = absl::OkStatus();
*end_of_sequence = false;
if (TF_PREDICT_FALSE(ctx->index_mapper() && !input_contexts_.empty() &&
input_contexts_.back().index_mapper() == nullptr)) {
for (IteratorContext& input_context : input_contexts_) {
input_context.SetIndexMapper(ctx->index_mapper());
}
}
for (int i = 0; i < input_impls_.size(); ++i) {
const auto& input_impl = input_impls_[i];
std::vector<Tensor> input_tensors;
bool component_end_of_sequence = false;
status.Update(input_impl->GetNext(&input_contexts_[i], &input_tensors,
&component_end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
*end_of_sequence |= component_end_of_sequence;
if (!status.ok()) {
continue;
}
if (*end_of_sequence) {
for (int j = i + 1; j < input_impls_.size(); ++j) {
Status s =
input_impls_[j]->GetNext(&input_contexts_[j], &input_tensors,
&component_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[j].checkpoint());
}
break;
}
out_tensors->insert(out_tensors->end(), input_tensors.begin(),
input_tensors.end());
}
if (*end_of_sequence || !status.ok()) {
out_tensors->clear();
}
if (*end_of_sequence) {
input_impls_.clear();
}
return status;
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kInputImplsEmpty,
static_cast<int64_t>(input_impls_.empty())));
for (auto& input_impl : input_impls_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t inputs_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplsEmpty, &inputs_empty));
if (ctx->restored_element_count()) {
if (input_impls_.size() != dataset()->inputs_.size()) {
return absl::FailedPreconditionError(
"`Initialize` should be called before restoring from the "
"checkpoint.");
}
if (ctx->index_mapper() == nullptr) {
return absl::FailedPreconditionError(
"ctx->index_mapper() should be provided along with "
"ctx->restored_element_count() when restoring.");
}
if (static_cast<bool>(inputs_empty)) {
input_impls_.clear();
} else {
for (int i = 0; i < input_impls_.size(); ++i) {
input_contexts_[i].set_restored_element_count(
ctx->restored_element_count().value());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[i], reader, input_impls_[i]));
ctx->MergeCheckpoint(input_contexts_[i].checkpoint());
}
}
return absl::OkStatus();
}
if (static_cast<bool>(inputs_empty)) {
input_impls_.clear();
} else {
DCHECK_EQ(input_impls_.size(), dataset()->inputs_.size());
for (auto& input_impl : input_impls_)
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl));
}
return absl::OkStatus();
}
private:
mutex mu_;
std::vector<std::unique_ptr<IteratorBase>> input_impls_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_ TF_GUARDED_BY(mu_);
};
const std::vector<DatasetBase*> inputs_;
DataTypeVector output_dtypes_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
ZipDatasetOp::ZipDatasetOp(OpKernelConstruction* ctx) : DatasetOpKernel(ctx) {}
void ZipDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase** output) {
std::vector<DatasetBase*> inputs;
for (size_t i = 0; i < ctx->num_inputs(); ++i) {
DatasetBase* input;
OP_REQUIRES_OK(ctx, GetDatasetFromVariantTensor(ctx->input(i), &input));
inputs.push_back(input);
}
*output = new Dataset(ctx, inputs);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ZipDataset").Device(DEVICE_CPU), ZipDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/zip_dataset_op.h"
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "zip_dataset";
class ZipDatasetParams : public DatasetParams {
public:
template <typename T>
ZipDatasetParams(std::vector<T> input_dataset_params,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
int num_input_datasets, string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
num_input_datasets_(num_input_datasets) {
for (auto& params : input_dataset_params) {
input_dataset_params_.push_back(std::make_unique<T>(params));
}
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params[0].dataset_type(),
input_dataset_params[0].iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override { return {}; }
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
for (int i = 0; i < num_input_datasets_; ++i) {
input_names->emplace_back(
absl::StrCat(ZipDatasetOp::kDatasetType, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("N", num_input_datasets_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return ZipDatasetOp::kDatasetType; }
private:
int32 num_input_datasets_;
};
class ZipDatasetOpTest : public DatasetOpsTestBase {};
ZipDatasetParams ZipDatasetParams1() {
return ZipDatasetParams(
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 13, 1)},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
ZipDatasetParams ZipDatasetParams2() {
return ZipDatasetParams(
std::vector<RangeDatasetParams>{RangeDatasetParams(0, 3, 1),
RangeDatasetParams(10, 15, 1)},
{DT_INT64, DT_INT64},
{PartialTensorShape({}), PartialTensorShape({})},
2,
kNodeName);
}
std::vector<GetNextTestCase<ZipDatasetParams>> GetNextTestCases() {
return {{ZipDatasetParams1(),
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})},
{ZipDatasetParams2(),
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})}};
}
ITERATOR_GET_NEXT_TEST_P(ZipDatasetOpTest, ZipDatasetParams, GetNextTestCases())
TEST_F(ZipDatasetOpTest, DatasetNodeName) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ZipDatasetOpTest, DatasetTypeString) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(
CheckDatasetTypeString(name_utils::OpName(ZipDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ZipDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{ZipDatasetParams1(),
{DT_INT64, DT_INT64}},
{ZipDatasetParams2(),
{DT_INT64, DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<ZipDatasetParams>>
DatasetOutputShapesTestCases() {
return {{ZipDatasetParams1(),
{PartialTensorShape({}),
PartialTensorShape({})}},
{ZipDatasetParams2(),
{PartialTensorShape({}),
PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ZipDatasetParams>> CardinalityTestCases() {
return {{ZipDatasetParams1(),
3},
{ZipDatasetParams2(),
3}};
}
DATASET_CARDINALITY_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ZipDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{ZipDatasetParams1(),
{DT_INT64, DT_INT64}},
{ZipDatasetParams2(),
{DT_INT64, DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<ZipDatasetParams>>
IteratorOutputShapesTestCases() {
return {{ZipDatasetParams1(),
{PartialTensorShape({}),
PartialTensorShape({})}},
{ZipDatasetParams2(),
{PartialTensorShape({}),
PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ZipDatasetOpTest, IteratorOutputPrefix) {
auto dataset_params = ZipDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ZipDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ZipDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{ZipDatasetParams1(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})},
{ZipDatasetParams2(),
{0, 1, 4},
CreateTensors<int64_t>(TensorShape{},
{{0}, {10}, {1}, {11}, {2}, {12}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ZipDatasetOpTest, ZipDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/zip_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/zip_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e6ebaeca-4ac3-439a-8640-7976da5c568c | cpp | abseil/abseil-cpp | demangle_rust | absl/debugging/internal/demangle_rust.cc | absl/debugging/internal/demangle_rust_test.cc | #include "absl/debugging/internal/demangle_rust.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <limits>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/debugging/internal/decode_rust_punycode.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
constexpr int kMaxReturns = 1 << 17;
bool IsDigit(char c) { return '0' <= c && c <= '9'; }
bool IsLower(char c) { return 'a' <= c && c <= 'z'; }
bool IsUpper(char c) { return 'A' <= c && c <= 'Z'; }
bool IsAlpha(char c) { return IsLower(c) || IsUpper(c); }
bool IsIdentifierChar(char c) { return IsAlpha(c) || IsDigit(c) || c == '_'; }
bool IsLowerHexDigit(char c) { return IsDigit(c) || ('a' <= c && c <= 'f'); }
const char* BasicTypeName(char c) {
switch (c) {
case 'a': return "i8";
case 'b': return "bool";
case 'c': return "char";
case 'd': return "f64";
case 'e': return "str";
case 'f': return "f32";
case 'h': return "u8";
case 'i': return "isize";
case 'j': return "usize";
case 'l': return "i32";
case 'm': return "u32";
case 'n': return "i128";
case 'o': return "u128";
case 'p': return "_";
case 's': return "i16";
case 't': return "u16";
case 'u': return "()";
case 'v': return "...";
case 'x': return "i64";
case 'y': return "u64";
case 'z': return "!";
}
return nullptr;
}
class RustSymbolParser {
public:
RustSymbolParser(const char* encoding, char* out, char* const out_end)
: encoding_(encoding), out_(out), out_end_(out_end) {
if (out_ != out_end_) *out_ = '\0';
}
ABSL_MUST_USE_RESULT bool Parse() && {
#define ABSL_DEMANGLER_RECURSE(callee, caller) \
do { \
if (recursion_depth_ == kStackSize) return false; \
\
recursion_stack_[recursion_depth_++] = caller; \
goto callee; \
\
case caller: {} \
} while (0)
int iter = 0;
goto whole_encoding;
for (; iter < kMaxReturns && recursion_depth_ > 0; ++iter) {
switch (recursion_stack_[--recursion_depth_]) {
whole_encoding:
if (!Eat('_') || !Eat('R')) return false;
ABSL_DEMANGLER_RECURSE(path, kInstantiatingCrate);
if (IsAlpha(Peek())) {
++silence_depth_;
ABSL_DEMANGLER_RECURSE(path, kVendorSpecificSuffix);
}
switch (Take()) {
case '.': case '$': case '\0': return true;
}
return false;
path:
switch (Take()) {
case 'C': goto crate_root;
case 'M': goto inherent_impl;
case 'X': goto trait_impl;
case 'Y': goto trait_definition;
case 'N': goto nested_path;
case 'I': goto generic_args;
case 'B': goto path_backref;
default: return false;
}
crate_root:
if (!ParseIdentifier()) return false;
continue;
inherent_impl:
if (!Emit("<")) return false;
ABSL_DEMANGLER_RECURSE(impl_path, kInherentImplType);
ABSL_DEMANGLER_RECURSE(type, kInherentImplEnding);
if (!Emit(">")) return false;
continue;
trait_impl:
if (!Emit("<")) return false;
ABSL_DEMANGLER_RECURSE(impl_path, kTraitImplType);
ABSL_DEMANGLER_RECURSE(type, kTraitImplInfix);
if (!Emit(" as ")) return false;
ABSL_DEMANGLER_RECURSE(path, kTraitImplEnding);
if (!Emit(">")) return false;
continue;
impl_path:
++silence_depth_;
{
int ignored_disambiguator;
if (!ParseDisambiguator(ignored_disambiguator)) return false;
}
ABSL_DEMANGLER_RECURSE(path, kImplPathEnding);
--silence_depth_;
continue;
trait_definition:
if (!Emit("<")) return false;
ABSL_DEMANGLER_RECURSE(type, kTraitDefinitionInfix);
if (!Emit(" as ")) return false;
ABSL_DEMANGLER_RECURSE(path, kTraitDefinitionEnding);
if (!Emit(">")) return false;
continue;
nested_path:
if (IsUpper(Peek())) {
if (!PushNamespace(Take())) return false;
ABSL_DEMANGLER_RECURSE(path, kIdentifierInUppercaseNamespace);
if (!Emit("::")) return false;
if (!ParseIdentifier(PopNamespace())) return false;
continue;
}
if (IsLower(Take())) {
ABSL_DEMANGLER_RECURSE(path, kIdentifierInLowercaseNamespace);
if (!Emit("::")) return false;
if (!ParseIdentifier()) return false;
continue;
}
return false;
type:
if (IsLower(Peek())) {
const char* type_name = BasicTypeName(Take());
if (type_name == nullptr || !Emit(type_name)) return false;
continue;
}
if (Eat('A')) {
if (!Emit("[")) return false;
ABSL_DEMANGLER_RECURSE(type, kArraySize);
if (!Emit("; ")) return false;
ABSL_DEMANGLER_RECURSE(constant, kFinishArray);
if (!Emit("]")) return false;
continue;
}
if (Eat('S')) {
if (!Emit("[")) return false;
ABSL_DEMANGLER_RECURSE(type, kSliceEnding);
if (!Emit("]")) return false;
continue;
}
if (Eat('T')) goto tuple_type;
if (Eat('R')) {
if (!Emit("&")) return false;
if (!ParseOptionalLifetime()) return false;
goto type;
}
if (Eat('Q')) {
if (!Emit("&mut ")) return false;
if (!ParseOptionalLifetime()) return false;
goto type;
}
if (Eat('P')) {
if (!Emit("*const ")) return false;
goto type;
}
if (Eat('O')) {
if (!Emit("*mut ")) return false;
goto type;
}
if (Eat('F')) goto fn_type;
if (Eat('D')) goto dyn_trait_type;
if (Eat('B')) goto type_backref;
goto path;
tuple_type:
if (!Emit("(")) return false;
if (Eat('E')) {
if (!Emit(")")) return false;
continue;
}
ABSL_DEMANGLER_RECURSE(type, kAfterFirstTupleElement);
if (Eat('E')) {
if (!Emit(",)")) return false;
continue;
}
if (!Emit(", ")) return false;
ABSL_DEMANGLER_RECURSE(type, kAfterSecondTupleElement);
if (Eat('E')) {
if (!Emit(")")) return false;
continue;
}
if (!Emit(", ")) return false;
ABSL_DEMANGLER_RECURSE(type, kAfterThirdTupleElement);
if (Eat('E')) {
if (!Emit(")")) return false;
continue;
}
if (!Emit(", ...)")) return false;
++silence_depth_;
while (!Eat('E')) {
ABSL_DEMANGLER_RECURSE(type, kAfterSubsequentTupleElement);
}
--silence_depth_;
continue;
fn_type:
if (!Emit("fn...")) return false;
++silence_depth_;
if (!ParseOptionalBinder()) return false;
(void)Eat('U');
if (Eat('K')) {
if (!Eat('C') && !ParseUndisambiguatedIdentifier()) return false;
}
while (!Eat('E')) {
ABSL_DEMANGLER_RECURSE(type, kContinueParameterList);
}
ABSL_DEMANGLER_RECURSE(type, kFinishFn);
--silence_depth_;
continue;
dyn_trait_type:
if (!Emit("dyn ")) return false;
if (!ParseOptionalBinder()) return false;
if (!Eat('E')) {
ABSL_DEMANGLER_RECURSE(dyn_trait, kBeginAutoTraits);
while (!Eat('E')) {
if (!Emit(" + ")) return false;
ABSL_DEMANGLER_RECURSE(dyn_trait, kContinueAutoTraits);
}
}
if (!ParseRequiredLifetime()) return false;
continue;
dyn_trait:
ABSL_DEMANGLER_RECURSE(path, kContinueDynTrait);
if (Peek() == 'p') {
if (!Emit("<>")) return false;
++silence_depth_;
while (Eat('p')) {
if (!ParseUndisambiguatedIdentifier()) return false;
ABSL_DEMANGLER_RECURSE(type, kContinueAssocBinding);
}
--silence_depth_;
}
continue;
constant:
if (Eat('B')) goto const_backref;
if (Eat('p')) {
if (!Emit("_")) return false;
continue;
}
++silence_depth_;
ABSL_DEMANGLER_RECURSE(type, kConstData);
--silence_depth_;
if (Eat('n') && !EmitChar('-')) return false;
if (!Emit("0x")) return false;
if (Eat('0')) {
if (!EmitChar('0')) return false;
if (!Eat('_')) return false;
continue;
}
while (IsLowerHexDigit(Peek())) {
if (!EmitChar(Take())) return false;
}
if (!Eat('_')) return false;
continue;
generic_args:
ABSL_DEMANGLER_RECURSE(path, kBeginGenericArgList);
if (!Emit("::<>")) return false;
++silence_depth_;
while (!Eat('E')) {
ABSL_DEMANGLER_RECURSE(generic_arg, kContinueGenericArgList);
}
--silence_depth_;
continue;
generic_arg:
if (Peek() == 'L') {
if (!ParseOptionalLifetime()) return false;
continue;
}
if (Eat('K')) goto constant;
goto type;
path_backref:
if (!BeginBackref()) return false;
if (silence_depth_ == 0) {
ABSL_DEMANGLER_RECURSE(path, kPathBackrefEnding);
}
EndBackref();
continue;
type_backref:
if (!BeginBackref()) return false;
if (silence_depth_ == 0) {
ABSL_DEMANGLER_RECURSE(type, kTypeBackrefEnding);
}
EndBackref();
continue;
const_backref:
if (!BeginBackref()) return false;
if (silence_depth_ == 0) {
ABSL_DEMANGLER_RECURSE(constant, kConstantBackrefEnding);
}
EndBackref();
continue;
}
}
return false;
}
private:
enum ReturnAddress : uint8_t {
kInstantiatingCrate,
kVendorSpecificSuffix,
kIdentifierInUppercaseNamespace,
kIdentifierInLowercaseNamespace,
kInherentImplType,
kInherentImplEnding,
kTraitImplType,
kTraitImplInfix,
kTraitImplEnding,
kImplPathEnding,
kTraitDefinitionInfix,
kTraitDefinitionEnding,
kArraySize,
kFinishArray,
kSliceEnding,
kAfterFirstTupleElement,
kAfterSecondTupleElement,
kAfterThirdTupleElement,
kAfterSubsequentTupleElement,
kContinueParameterList,
kFinishFn,
kBeginAutoTraits,
kContinueAutoTraits,
kContinueDynTrait,
kContinueAssocBinding,
kConstData,
kBeginGenericArgList,
kContinueGenericArgList,
kPathBackrefEnding,
kTypeBackrefEnding,
kConstantBackrefEnding,
};
enum {
kStackSize = 256,
kNamespaceStackSize = 64,
kPositionStackSize = 16,
};
char Peek() const { return encoding_[pos_]; }
char Take() { return encoding_[pos_++]; }
ABSL_MUST_USE_RESULT bool Eat(char want) {
if (encoding_[pos_] != want) return false;
++pos_;
return true;
}
ABSL_MUST_USE_RESULT bool EmitChar(char c) {
if (silence_depth_ > 0) return true;
if (out_end_ - out_ < 2) return false;
*out_++ = c;
*out_ = '\0';
return true;
}
ABSL_MUST_USE_RESULT bool Emit(const char* token) {
if (silence_depth_ > 0) return true;
const size_t token_length = std::strlen(token);
const size_t bytes_to_copy = token_length + 1;
if (static_cast<size_t>(out_end_ - out_) < bytes_to_copy) return false;
std::memcpy(out_, token, bytes_to_copy);
out_ += token_length;
return true;
}
ABSL_MUST_USE_RESULT bool EmitDisambiguator(int disambiguator) {
if (disambiguator < 0) return EmitChar('?');
if (disambiguator == 0) return EmitChar('0');
char digits[3 * sizeof(disambiguator)] = {};
size_t leading_digit_index = sizeof(digits) - 1;
for (; disambiguator > 0; disambiguator /= 10) {
digits[--leading_digit_index] =
static_cast<char>('0' + disambiguator % 10);
}
return Emit(digits + leading_digit_index);
}
ABSL_MUST_USE_RESULT bool ParseDisambiguator(int& value) {
value = -1;
if (!Eat('s')) {
value = 0;
return true;
}
int base_62_value = 0;
if (!ParseBase62Number(base_62_value)) return false;
value = base_62_value < 0 ? -1 : base_62_value + 1;
return true;
}
ABSL_MUST_USE_RESULT bool ParseBase62Number(int& value) {
value = -1;
if (Eat('_')) {
value = 0;
return true;
}
int encoded_number = 0;
bool overflowed = false;
while (IsAlpha(Peek()) || IsDigit(Peek())) {
const char c = Take();
if (encoded_number >= std::numeric_limits<int>::max()/62) {
overflowed = true;
} else {
int digit;
if (IsDigit(c)) {
digit = c - '0';
} else if (IsLower(c)) {
digit = c - 'a' + 10;
} else {
digit = c - 'A' + 36;
}
encoded_number = 62 * encoded_number + digit;
}
}
if (!Eat('_')) return false;
if (!overflowed) value = encoded_number + 1;
return true;
}
ABSL_MUST_USE_RESULT bool ParseIdentifier(char uppercase_namespace = '\0') {
int disambiguator = 0;
if (!ParseDisambiguator(disambiguator)) return false;
return ParseUndisambiguatedIdentifier(uppercase_namespace, disambiguator);
}
ABSL_MUST_USE_RESULT bool ParseUndisambiguatedIdentifier(
char uppercase_namespace = '\0', int disambiguator = 0) {
const bool is_punycoded = Eat('u');
if (!IsDigit(Peek())) return false;
int num_bytes = 0;
if (!ParseDecimalNumber(num_bytes)) return false;
(void)Eat('_');
if (is_punycoded) {
DecodeRustPunycodeOptions options;
options.punycode_begin = &encoding_[pos_];
options.punycode_end = &encoding_[pos_] + num_bytes;
options.out_begin = out_;
options.out_end = out_end_;
out_ = DecodeRustPunycode(options);
if (out_ == nullptr) return false;
pos_ += static_cast<size_t>(num_bytes);
}
if (uppercase_namespace != '\0') {
switch (uppercase_namespace) {
case 'C':
if (!Emit("{closure")) return false;
break;
case 'S':
if (!Emit("{shim")) return false;
break;
default:
if (!EmitChar('{') || !EmitChar(uppercase_namespace)) return false;
break;
}
if (num_bytes > 0 && !Emit(":")) return false;
}
if (!is_punycoded) {
for (int i = 0; i < num_bytes; ++i) {
const char c = Take();
if (!IsIdentifierChar(c) &&
(c & 0x80) == 0) {
return false;
}
if (!EmitChar(c)) return false;
}
}
if (uppercase_namespace != '\0') {
if (!EmitChar('#')) return false;
if (!EmitDisambiguator(disambiguator)) return false;
if (!EmitChar('}')) return false;
}
return true;
}
ABSL_MUST_USE_RESULT bool ParseDecimalNumber(int& value) {
value = -1;
if (!IsDigit(Peek())) return false;
int encoded_number = Take() - '0';
if (encoded_number == 0) {
value = 0;
return true;
}
while (IsDigit(Peek()) &&
encoded_number < std::numeric_limits<int>::max()/10) {
encoded_number = 10 * encoded_number + (Take() - '0');
}
if (IsDigit(Peek())) return false;
value = encoded_number;
return true;
}
ABSL_MUST_USE_RESULT bool ParseOptionalBinder() {
if (!Eat('G')) return true;
int ignored_binding_count;
return ParseBase62Number(ignored_binding_count);
}
ABSL_MUST_USE_RESULT bool ParseOptionalLifetime() {
if (!Eat('L')) return true;
int ignored_de_bruijn_index;
return ParseBase62Number(ignored_de_bruijn_index);
}
ABSL_MUST_USE_RESULT bool ParseRequiredLifetime() {
if (Peek() != 'L') return false;
return ParseOptionalLifetime();
}
ABSL_MUST_USE_RESULT bool PushNamespace(char ns) {
if (namespace_depth_ == kNamespaceStackSize) return false;
namespace_stack_[namespace_depth_++] = ns;
return true;
}
char PopNamespace() { return namespace_stack_[--namespace_depth_]; }
ABSL_MUST_USE_RESULT bool PushPosition(int position) {
if (position_depth_ == kPositionStackSize) return false;
position_stack_[position_depth_++] = position;
return true;
}
int PopPosition() { return position_stack_[--position_depth_]; }
ABSL_MUST_USE_RESULT bool BeginBackref() {
int offset = 0;
const int offset_of_this_backref =
pos_ - 2 - 1 ;
if (!ParseBase62Number(offset) || offset < 0 ||
offset >= offset_of_this_backref) {
return false;
}
offset += 2;
if (!PushPosition(pos_)) return false;
pos_ = offset;
return true;
}
void EndBackref() { pos_ = PopPosition(); }
ReturnAddress recursion_stack_[kStackSize] = {};
int recursion_depth_ = 0;
char namespace_stack_[kNamespaceStackSize] = {};
int namespace_depth_ = 0;
int position_stack_[kPositionStackSize] = {};
int position_depth_ = 0;
int silence_depth_ = 0;
int pos_ = 0;
const char* encoding_ = nullptr;
char* out_ = nullptr;
char* out_end_ = nullptr;
};
}
bool DemangleRustSymbolEncoding(const char* mangled, char* out,
size_t out_size) {
return RustSymbolParser(mangled, out, out + out_size).Parse();
}
}
ABSL_NAMESPACE_END
} | #include "absl/debugging/internal/demangle_rust.h"
#include <cstddef>
#include <string>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
std::string ResultOfDemangling(const char* mangled, size_t buffer_size) {
std::string buffer(buffer_size + 1, '~');
constexpr char kCanaryCharacter = 0x7f;
buffer[buffer_size] = kCanaryCharacter;
if (!DemangleRustSymbolEncoding(mangled, &buffer[0], buffer_size)) {
return "Failed parse";
}
if (buffer[buffer_size] != kCanaryCharacter) {
return "Buffer overrun by output: " + buffer.substr(0, buffer_size + 1)
+ "...";
}
return buffer.data();
}
#define EXPECT_DEMANGLING(mangled, plaintext) \
do { \
[] { \
constexpr size_t plenty_of_space = sizeof(plaintext) + 128; \
constexpr size_t just_enough_space = sizeof(plaintext); \
constexpr size_t one_byte_too_few = sizeof(plaintext) - 1; \
const char* expected_plaintext = plaintext; \
const char* expected_error = "Failed parse"; \
ASSERT_EQ(ResultOfDemangling(mangled, plenty_of_space), \
expected_plaintext); \
ASSERT_EQ(ResultOfDemangling(mangled, just_enough_space), \
expected_plaintext); \
ASSERT_EQ(ResultOfDemangling(mangled, one_byte_too_few), \
expected_error); \
}(); \
} while (0)
#define EXPECT_DEMANGLING_FAILS(mangled) \
do { \
constexpr size_t plenty_of_space = 1024; \
const char* expected_error = "Failed parse"; \
EXPECT_EQ(ResultOfDemangling(mangled, plenty_of_space), expected_error); \
} while (0)
TEST(DemangleRust, EmptyDemangling) {
EXPECT_TRUE(DemangleRustSymbolEncoding("_RC0", nullptr, 0));
}
TEST(DemangleRust, FunctionAtCrateLevel) {
EXPECT_DEMANGLING("_RNvC10crate_name9func_name", "crate_name::func_name");
EXPECT_DEMANGLING(
"_RNvCs09azAZ_10crate_name9func_name", "crate_name::func_name");
}
TEST(DemangleRust, TruncationsOfFunctionAtCrateLevel) {
EXPECT_DEMANGLING_FAILS("_R");
EXPECT_DEMANGLING_FAILS("_RN");
EXPECT_DEMANGLING_FAILS("_RNvC");
EXPECT_DEMANGLING_FAILS("_RNvC10");
EXPECT_DEMANGLING_FAILS("_RNvC10crate_nam");
EXPECT_DEMANGLING_FAILS("_RNvC10crate_name");
EXPECT_DEMANGLING_FAILS("_RNvC10crate_name9");
EXPECT_DEMANGLING_FAILS("_RNvC10crate_name9func_nam");
EXPECT_DEMANGLING_FAILS("_RNvCs");
EXPECT_DEMANGLING_FAILS("_RNvCs09azAZ");
EXPECT_DEMANGLING_FAILS("_RNvCs09azAZ_");
}
TEST(DemangleRust, VendorSuffixes) {
EXPECT_DEMANGLING("_RNvC10crate_name9func_name.!@#", "crate_name::func_name");
EXPECT_DEMANGLING("_RNvC10crate_name9func_name$!@#", "crate_name::func_name");
}
TEST(DemangleRust, UnicodeIdentifiers) {
EXPECT_DEMANGLING("_RNvC7ice_cap17Eyjafjallajökull",
"ice_cap::Eyjafjallajökull");
EXPECT_DEMANGLING("_RNvC7ice_caps_u19Eyjafjallajkull_jtb",
"ice_cap::Eyjafjallajökull");
}
TEST(DemangleRust, FunctionInModule) {
EXPECT_DEMANGLING("_RNvNtCs09azAZ_10crate_name11module_name9func_name",
"crate_name::module_name::func_name");
}
TEST(DemangleRust, FunctionInFunction) {
EXPECT_DEMANGLING(
"_RNvNvCs09azAZ_10crate_name15outer_func_name15inner_func_name",
"crate_name::outer_func_name::inner_func_name");
}
TEST(DemangleRust, ClosureInFunction) {
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_name0",
"crate_name::func_name::{closure#0}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_name0Cs123_12client_crate",
"crate_name::func_name::{closure#0}");
}
TEST(DemangleRust, ClosureNumbering) {
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_names_0Cs123_12client_crate",
"crate_name::func_name::{closure#1}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_names0_0Cs123_12client_crate",
"crate_name::func_name::{closure#2}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_names9_0Cs123_12client_crate",
"crate_name::func_name::{closure#11}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_namesa_0Cs123_12client_crate",
"crate_name::func_name::{closure#12}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_namesz_0Cs123_12client_crate",
"crate_name::func_name::{closure#37}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_namesA_0Cs123_12client_crate",
"crate_name::func_name::{closure#38}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_namesZ_0Cs123_12client_crate",
"crate_name::func_name::{closure#63}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_names10_0Cs123_12client_crate",
"crate_name::func_name::{closure#64}");
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_namesg6_0Cs123_12client_crate",
"crate_name::func_name::{closure#1000}");
}
TEST(DemangleRust, ClosureNumberOverflowingInt) {
EXPECT_DEMANGLING(
"_RNCNvCs09azAZ_10crate_name9func_names1234567_0Cs123_12client_crate",
"crate_name::func_name::{closure#?}");
}
TEST(DemangleRust, UnexpectedlyNamedClosure) {
EXPECT_DEMANGLING(
"_RNCNvCs123_10crate_name9func_name12closure_nameCs456_12client_crate",
"crate_name::func_name::{closure:closure_name#0}");
EXPECT_DEMANGLING(
"_RNCNvCs123_10crate_name9func_names2_12closure_nameCs456_12client_crate",
"crate_name::func_name::{closure:closure_name#4}");
}
TEST(DemangleRust, ItemNestedInsideClosure) {
EXPECT_DEMANGLING(
"_RNvNCNvCs123_10crate_name9func_name015inner_func_nameCs_12client_crate",
"crate_name::func_name::{closure#0}::inner_func_name");
}
TEST(DemangleRust, Shim) {
EXPECT_DEMANGLING(
"_RNSNvCs123_10crate_name9func_name6vtableCs456_12client_crate",
"crate_name::func_name::{shim:vtable#0}");
}
TEST(DemangleRust, UnknownUppercaseNamespace) {
EXPECT_DEMANGLING(
"_RNXNvCs123_10crate_name9func_name14mystery_objectCs456_12client_crate",
"crate_name::func_name::{X:mystery_object#0}");
}
TEST(DemangleRust, NestedUppercaseNamespaces) {
EXPECT_DEMANGLING(
"_RNCNXNYCs123_10crate_names0_1ys1_1xs2_0Cs456_12client_crate",
"crate_name::{Y:y#2}::{X:x#3}::{closure#4}");
}
TEST(DemangleRust, TraitDefinition) {
EXPECT_DEMANGLING(
"_RNvYNtC7crate_a9my_structNtC7crate_b8my_trait1f",
"<crate_a::my_struct as crate_b::my_trait>::f");
}
TEST(DemangleRust, BasicTypeNames) {
EXPECT_DEMANGLING("_RNvYaNtC1c1t1f", "<i8 as c::t>::f");
EXPECT_DEMANGLING("_RNvYbNtC1c1t1f", "<bool as c::t>::f");
EXPECT_DEMANGLING("_RNvYcNtC1c1t1f", "<char as c::t>::f");
EXPECT_DEMANGLING("_RNvYdNtC1c1t1f", "<f64 as c::t>::f");
EXPECT_DEMANGLING("_RNvYeNtC1c1t1f", "<str as c::t>::f");
EXPECT_DEMANGLING("_RNvYfNtC1c1t1f", "<f32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYhNtC1c1t1f", "<u8 as c::t>::f");
EXPECT_DEMANGLING("_RNvYiNtC1c1t1f", "<isize as c::t>::f");
EXPECT_DEMANGLING("_RNvYjNtC1c1t1f", "<usize as c::t>::f");
EXPECT_DEMANGLING("_RNvYlNtC1c1t1f", "<i32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYmNtC1c1t1f", "<u32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYnNtC1c1t1f", "<i128 as c::t>::f");
EXPECT_DEMANGLING("_RNvYoNtC1c1t1f", "<u128 as c::t>::f");
EXPECT_DEMANGLING("_RNvYpNtC1c1t1f", "<_ as c::t>::f");
EXPECT_DEMANGLING("_RNvYsNtC1c1t1f", "<i16 as c::t>::f");
EXPECT_DEMANGLING("_RNvYtNtC1c1t1f", "<u16 as c::t>::f");
EXPECT_DEMANGLING("_RNvYuNtC1c1t1f", "<() as c::t>::f");
EXPECT_DEMANGLING("_RNvYvNtC1c1t1f", "<... as c::t>::f");
EXPECT_DEMANGLING("_RNvYxNtC1c1t1f", "<i64 as c::t>::f");
EXPECT_DEMANGLING("_RNvYyNtC1c1t1f", "<u64 as c::t>::f");
EXPECT_DEMANGLING("_RNvYzNtC1c1t1f", "<! as c::t>::f");
EXPECT_DEMANGLING_FAILS("_RNvYkNtC1c1t1f");
}
TEST(DemangleRust, SliceTypes) {
EXPECT_DEMANGLING("_RNvYSlNtC1c1t1f", "<[i32] as c::t>::f");
EXPECT_DEMANGLING("_RNvYSNtC1d1sNtC1c1t1f", "<[d::s] as c::t>::f");
}
TEST(DemangleRust, ImmutableReferenceTypes) {
EXPECT_DEMANGLING("_RNvYRlNtC1c1t1f", "<&i32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYRNtC1d1sNtC1c1t1f", "<&d::s as c::t>::f");
}
TEST(DemangleRust, MutableReferenceTypes) {
EXPECT_DEMANGLING("_RNvYQlNtC1c1t1f", "<&mut i32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYQNtC1d1sNtC1c1t1f", "<&mut d::s as c::t>::f");
}
TEST(DemangleRust, ConstantRawPointerTypes) {
EXPECT_DEMANGLING("_RNvYPlNtC1c1t1f", "<*const i32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYPNtC1d1sNtC1c1t1f", "<*const d::s as c::t>::f");
}
TEST(DemangleRust, MutableRawPointerTypes) {
EXPECT_DEMANGLING("_RNvYOlNtC1c1t1f", "<*mut i32 as c::t>::f");
EXPECT_DEMANGLING("_RNvYONtC1d1sNtC1c1t1f", "<*mut d::s as c::t>::f");
}
TEST(DemangleRust, TupleLength0) {
EXPECT_DEMANGLING("_RNvYTENtC1c1t1f", "<() as c::t>::f");
}
TEST(DemangleRust, TupleLength1) {
EXPECT_DEMANGLING("_RNvYTlENtC1c1t1f", "<(i32,) as c::t>::f");
EXPECT_DEMANGLING("_RNvYTNtC1d1sENtC1c1t1f", "<(d::s,) as c::t>::f");
}
TEST(DemangleRust, TupleLength2) {
EXPECT_DEMANGLING("_RNvYTlmENtC1c1t1f", "<(i32, u32) as c::t>::f");
EXPECT_DEMANGLING("_RNvYTNtC1d1xNtC1e1yENtC1c1t1f",
"<(d::x, e::y) as c::t>::f");
}
TEST(DemangleRust, TupleLength3) {
EXPECT_DEMANGLING("_RNvYTlmnENtC1c1t1f", "<(i32, u32, i128) as c::t>::f");
EXPECT_DEMANGLING("_RNvYTNtC1d1xNtC1e1yNtC1f1zENtC1c1t1f",
"<(d::x, e::y, f::z) as c::t>::f");
}
TEST(DemangleRust, LongerTuplesAbbreviated) {
EXPECT_DEMANGLING("_RNvYTlmnoENtC1c1t1f",
"<(i32, u32, i128, ...) as c::t>::f");
EXPECT_DEMANGLING("_RNvYTlmnNtC1d1xNtC1e1yENtC1c1t1f",
"<(i32, u32, i128, ...) as c::t>::f");
}
TEST(DemangleRust, PathBackrefToCrate) {
EXPECT_DEMANGLING("_RNvYNtC8my_crate9my_structNtB4_8my_trait1f",
"<my_crate::my_struct as my_crate::my_trait>::f");
}
TEST(DemangleRust, PathBackrefToNestedPath) {
EXPECT_DEMANGLING("_RNvYNtNtC1c1m1sNtB4_1t1f", "<c::m::s as c::m::t>::f");
}
TEST(DemangleRust, PathBackrefAsInstantiatingCrate) {
EXPECT_DEMANGLING("_RNCNvC8my_crate7my_func0B3_",
"my_crate::my_func::{closure#0}");
}
TEST(DemangleRust, TypeBackrefsNestedInTuple) {
EXPECT_DEMANGLING("_RNvYTTRlB4_ERB3_ENtC1c1t1f",
"<((&i32, &i32), &(&i32, &i32)) as c::t>::f");
}
TEST(DemangleRust, NoInfiniteLoopOnBackrefToTheWhole) {
EXPECT_DEMANGLING_FAILS("_RB_");
EXPECT_DEMANGLING_FAILS("_RNvB_1sNtC1c1t1f");
}
TEST(DemangleRust, NoCrashOnForwardBackref) {
EXPECT_DEMANGLING_FAILS("_RB0_");
EXPECT_DEMANGLING_FAILS("_RB1_");
EXPECT_DEMANGLING_FAILS("_RB2_");
EXPECT_DEMANGLING_FAILS("_RB3_");
EXPECT_DEMANGLING_FAILS("_RB4_");
}
TEST(DemangleRust, PathBackrefsDoNotRecurseDuringSilence) {
EXPECT_DEMANGLING("_RNvYTlmnNtB_1sENtC1c1t1f",
"<(i32, u32, i128, ...) as c::t>::f");
}
TEST(DemangleRust, TypeBackrefsDoNotRecurseDuringSilence) {
EXPECT_DEMANGLING("_RNvYTlmnB2_ENtC1c1t1f",
"<(i32, u32, i128, ...) as c::t>::f");
}
TEST(DemangleRust, ConstBackrefsDoNotRecurseDuringSilence) {
EXPECT_DEMANGLING("_RINvC1c1fAlB_E", "c::f::<>");
}
TEST(DemangleRust, ReturnFromBackrefToInputPosition256) {
EXPECT_DEMANGLING("_RNvYNtC1c238very_long_type_"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABC"
"NtB4_1t1f",
"<c::very_long_type_"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABCDEFGHIJabcdefghij"
"ABCDEFGHIJabcdefghijABC"
" as c::t>::f");
}
TEST(DemangleRust, EmptyGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fE", "c::f::<>");
}
TEST(DemangleRust, OneSimpleTypeInGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1flE",
"c::f::<>");
}
TEST(DemangleRust, OneTupleInGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fTlmEE",
"c::f::<>");
}
TEST(DemangleRust, OnePathInGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fNtC1d1sE",
"c::f::<>");
}
TEST(DemangleRust, LongerGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1flmRNtC1d1sE",
"c::f::<>");
}
TEST(DemangleRust, BackrefInGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fRlB7_NtB2_1sE",
"c::f::<>");
}
TEST(DemangleRust, NestedGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fINtB2_1slEmE",
"c::f::<>");
}
TEST(DemangleRust, MonomorphicEntityNestedInsideGeneric) {
EXPECT_DEMANGLING("_RNvINvC1c1fppE1g",
"c::f::<>::g");
}
TEST(DemangleRust, ArrayTypeWithSimpleElementType) {
EXPECT_DEMANGLING("_RNvYAlj1f_NtC1c1t1f", "<[i32; 0x1f] as c::t>::f");
}
TEST(DemangleRust, ArrayTypeWithComplexElementType) {
EXPECT_DEMANGLING("_RNvYAINtC1c1slEj1f_NtB6_1t1f",
"<[c::s::<>; 0x1f] as c::t>::f");
}
TEST(DemangleRust, NestedArrayType) {
EXPECT_DEMANGLING("_RNvYAAlj1f_j2e_NtC1c1t1f",
"<[[i32; 0x1f]; 0x2e] as c::t>::f");
}
TEST(DemangleRust, BackrefArraySize) {
EXPECT_DEMANGLING("_RNvYAAlj1f_B5_NtC1c1t1f",
"<[[i32; 0x1f]; 0x1f] as c::t>::f");
}
TEST(DemangleRust, ZeroArraySize) {
EXPECT_DEMANGLING("_RNvYAlj0_NtC1c1t1f", "<[i32; 0x0] as c::t>::f");
}
TEST(DemangleRust, SurprisingMinusesInArraySize) {
EXPECT_DEMANGLING("_RNvYAljn0_NtC1c1t1f", "<[i32; -0x0] as c::t>::f");
EXPECT_DEMANGLING("_RNvYAljn42_NtC1c1t1f", "<[i32; -0x42] as c::t>::f");
}
TEST(DemangleRust, NumberAsGenericArg) {
EXPECT_DEMANGLING("_RINvC1c1fKl8_E",
"c::f::<>");
}
TEST(DemangleRust, NumberAsFirstOfTwoGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fKl8_mE",
"c::f::<>");
}
TEST(DemangleRust, NumberAsSecondOfTwoGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fmKl8_E",
"c::f::<>");
}
TEST(DemangleRust, NumberPlaceholder) {
EXPECT_DEMANGLING("_RNvINvC1c1fKpE1g",
"c::f::<>::g");
}
TEST(DemangleRust, InherentImplWithoutDisambiguator) {
EXPECT_DEMANGLING("_RNvMNtC8my_crate6my_modNtB2_9my_struct7my_func",
"<my_crate::my_mod::my_struct>::my_func");
}
TEST(DemangleRust, InherentImplWithDisambiguator) {
EXPECT_DEMANGLING("_RNvMs_NtC8my_crate6my_modNtB4_9my_struct7my_func",
"<my_crate::my_mod::my_struct>::my_func");
}
TEST(DemangleRust, TraitImplWithoutDisambiguator) {
EXPECT_DEMANGLING("_RNvXC8my_crateNtB2_9my_structNtB2_8my_trait7my_func",
"<my_crate::my_struct as my_crate::my_trait>::my_func");
}
TEST(DemangleRust, TraitImplWithDisambiguator) {
EXPECT_DEMANGLING("_RNvXs_C8my_crateNtB4_9my_structNtB4_8my_trait7my_func",
"<my_crate::my_struct as my_crate::my_trait>::my_func");
}
TEST(DemangleRust, TraitImplWithNonpathSelfType) {
EXPECT_DEMANGLING("_RNvXC8my_crateRlNtB2_8my_trait7my_func",
"<&i32 as my_crate::my_trait>::my_func");
}
TEST(DemangleRust, ThunkType) {
EXPECT_DEMANGLING("_RNvYFEuNtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, NontrivialFunctionReturnType) {
EXPECT_DEMANGLING(
"_RNvYFERTlmENtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, OneParameterType) {
EXPECT_DEMANGLING("_RNvYFlEuNtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, TwoParameterTypes) {
EXPECT_DEMANGLING("_RNvYFlmEuNtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, ExternC) {
EXPECT_DEMANGLING("_RNvYFKCEuNtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, ExternOther) {
EXPECT_DEMANGLING(
"_RNvYFK5not_CEuNtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, Unsafe) {
EXPECT_DEMANGLING("_RNvYFUEuNtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, Binder) {
EXPECT_DEMANGLING(
"_RNvYFG_RL0_lEB5_NtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, AllFnSigFeaturesInOrder) {
EXPECT_DEMANGLING(
"_RNvYFG_UKCRL0_lEB8_NtC1c1t1f",
"<fn... as c::t>::f");
}
TEST(DemangleRust, LifetimeInGenericArgs) {
EXPECT_DEMANGLING("_RINvC1c1fINtB2_1sL_EE",
"c::f::<>");
}
TEST(DemangleRust, EmptyDynTrait) {
EXPECT_DEMANGLING("_RNvYDEL_NtC1c1t1f",
"<dyn as c::t>::f");
}
TEST(DemangleRust, SimpleDynTrait) {
EXPECT_DEMANGLING("_RNvYDNtC1c1tEL_NtC1d1u1f",
"<dyn c::t as d::u>::f");
}
TEST(DemangleRust, DynTraitWithOneAssociatedType) {
EXPECT_DEMANGLING(
"_RNvYDNtC1c1tp1xlEL_NtC1d1u1f",
"<dyn c::t<> as d::u>::f");
}
TEST(DemangleRust, DynTraitWithTwoAssociatedTypes) {
EXPECT_DEMANGLING(
"_RNvYDNtC1c1tp1xlp1ymEL_NtC1d1u1f",
"<dyn c::t<> as d::u>::f");
}
TEST(DemangleRust, DynTraitPlusAutoTrait) {
EXPECT_DEMANGLING(
"_RNvYDNtC1c1tNtNtC3std6marker4SendEL_NtC1d1u1f",
"<dyn c::t + std::marker::Send as d::u>::f");
}
TEST(DemangleRust, DynTraitPlusTwoAutoTraits) {
EXPECT_DEMANGLING(
"_RNvYDNtC1c1tNtNtC3std6marker4CopyNtBc_4SyncEL_NtC1d1u1f",
"<dyn c::t + std::marker::Copy + std::marker::Sync as d::u>::f");
}
TEST(DemangleRust, HigherRankedDynTrait) {
EXPECT_DEMANGLING(
"_RNvYDG_INtC1c1tRL0_lEEL_NtC1d1u1f",
"<dyn c::t::<> as d::u>::f");
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/demangle_rust.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/demangle_rust_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
2b04e360-d1b6-4011-8aeb-4c51b14e12ca | cpp | tensorflow/tensorflow | segment | tensorflow/compiler/tf2tensorrt/segment/segment.cc | tensorflow/compiler/tf2tensorrt/segment/segment_test.cc | #include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include <algorithm>
#include <fstream>
#include <map>
#include <numeric>
#include <queue>
#include <tuple>
#include <unordered_map>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace {
using absl::StrAppend;
using absl::StrAppendFormat;
using absl::StrCat;
using absl::StrJoin;
class SimpleNode;
class SimpleGraph;
class SimpleEdge {
public:
SimpleEdge(int id, SimpleNode* src, int src_port, SimpleNode* dst,
int dst_port, bool is_control = false)
: id_(id),
src_(src),
src_port_(src_port),
dst_(dst),
dst_port_(dst_port),
control_(is_control) {}
~SimpleEdge() {}
SimpleNode* src() const { return src_; }
SimpleNode* dst() const { return dst_; }
int src_output() const { return src_port_; }
int dst_input() const { return dst_port_; }
int id() const { return id_; }
bool IsControlEdge() const { return control_; }
private:
int id_;
SimpleNode* src_;
int src_port_;
SimpleNode* dst_;
int dst_port_;
bool control_;
};
class SimpleNode {
public:
SimpleNode(const Node* node, const int id);
const std::vector<SimpleEdge*>& in_edges() const { return in_edges_; }
const std::vector<SimpleEdge*>& out_edges() const { return out_edges_; }
std::vector<SimpleNode*> in_nodes() const {
std::vector<SimpleNode*> res;
res.reserve(in_edges_.size());
for (const auto e : in_edges_) {
if (e) res.push_back(e->src());
}
return res;
}
std::vector<SimpleNode*> out_nodes() const {
std::vector<SimpleNode*> res;
res.reserve(out_edges_.size());
for (const auto e : out_edges_) {
if (e) res.push_back(e->dst());
}
return res;
}
const string& name() const { return node_->name(); }
const Node* tf_node() const { return node_; }
int id() const { return id_; }
private:
const Node* node_;
std::vector<SimpleEdge*> in_edges_;
std::vector<SimpleEdge*> out_edges_;
int id_;
friend class SimpleGraph;
};
class SimpleGraph {
public:
explicit SimpleGraph(const Graph* g);
~SimpleGraph();
void AddControlEdge(SimpleNode* src, SimpleNode* dst);
void AddEdge(SimpleNode* src, int out_port, SimpleNode* dst, int in_port);
void RemoveEdge(const SimpleEdge*);
SimpleNode* FindNodeId(int node_id) {
if (node_id < 0 || node_id > static_cast<int>(nodes_.size())) {
return nullptr;
}
return nodes_[node_id];
}
int num_node_ids() const { return nodes_.size(); }
const SimpleNode* source_node() const { return nodes_[Graph::kSourceId]; }
const SimpleNode* sink_node() const { return nodes_[Graph::kSinkId]; }
private:
const Graph* g_;
std::vector<SimpleNode*> nodes_;
std::vector<SimpleEdge*> edges_;
std::set<int> free_edge_ids_;
std::set<int> free_node_ids_;
};
SimpleNode::SimpleNode(const Node* node, const int id) : node_(node), id_(id) {
if (node_) {
in_edges_.reserve(node_->in_edges().size());
out_edges_.reserve(node_->out_edges().size());
}
}
SimpleGraph::SimpleGraph(const Graph* g) : g_(g) {
int n_nodes = g_->num_node_ids();
nodes_.resize(n_nodes, nullptr);
nodes_[g->kSourceId] = new SimpleNode(g->source_node(), g->kSourceId);
nodes_[g->kSinkId] = new SimpleNode(g->sink_node(), g->kSinkId);
int n_edges = g->num_edge_ids();
edges_.resize(n_edges, nullptr);
for (int i = 2; i < n_nodes; i++) {
const auto n = g->FindNodeId(i);
if (n) {
nodes_[i] = new SimpleNode(n, i);
} else {
free_node_ids_.insert(i);
}
}
for (int i = 0; i < n_edges; i++) {
const auto e = g->FindEdgeId(i);
if (e) {
const auto tfsrc = e->src();
const auto tfdst = e->dst();
bool is_control = e->IsControlEdge();
auto src = nodes_[tfsrc->id()];
auto dst = nodes_[tfdst->id()];
auto edge = new SimpleEdge(i, src, e->src_output(), dst, e->dst_input(),
is_control);
edges_[i] = edge;
src->out_edges_.push_back(edge);
dst->in_edges_.push_back(edge);
} else {
free_edge_ids_.insert(i);
}
}
}
void SimpleGraph::AddEdge(SimpleNode* src, int out_port, SimpleNode* dst,
int in_port) {
int i = edges_.size();
if (!free_edge_ids_.empty()) {
auto it = free_edge_ids_.begin();
i = *it;
free_edge_ids_.erase(it);
} else {
edges_.push_back(nullptr);
}
bool is_control = (out_port == Graph::kControlSlot);
is_control |= (in_port == Graph::kControlSlot);
auto edge = new SimpleEdge(i, src, out_port, dst, in_port, is_control);
edges_[i] = edge;
src->out_edges_.push_back(edge);
dst->in_edges_.push_back(edge);
}
void SimpleGraph::AddControlEdge(SimpleNode* src, SimpleNode* dst) {
AddEdge(src, Graph::kControlSlot, dst, Graph::kControlSlot);
}
void SimpleGraph::RemoveEdge(const SimpleEdge* edge) {
auto src = edge->src();
auto dst = edge->dst();
for (auto it = src->out_edges_.begin(); it != src->out_edges_.end(); ++it) {
if (*it == edge) {
src->out_edges_.erase(it);
break;
}
}
for (auto it = dst->in_edges_.begin(); it != dst->in_edges_.end(); ++it) {
if (*it == edge) {
dst->in_edges_.erase(it);
break;
}
}
}
SimpleGraph::~SimpleGraph() {
for (auto x : nodes_) delete x;
for (auto x : edges_) delete x;
}
struct SimpleEdgePtrCompare {
bool operator()(const SimpleEdge* lhs, const SimpleEdge* rhs) const {
return lhs->id() < rhs->id();
}
};
void StableDFS(const SimpleGraph& g, bool reverse,
const std::vector<const SimpleNode*>& start,
const std::function<bool(const SimpleNode*)>& enter,
const std::function<bool(const SimpleNode*)>& leave) {
struct Work {
const SimpleNode* node;
bool leave;
};
std::vector<Work> stack(start.size());
for (int i = 0; i < start.size(); ++i) {
stack[i] = Work{start[i], false};
}
auto get_nodes = [reverse](const SimpleNode* n) {
return reverse ? n->in_nodes() : n->out_nodes();
};
std::vector<bool> visited(g.num_node_ids(), false);
while (!stack.empty()) {
Work w = stack.back();
stack.pop_back();
auto n = w.node;
if (w.leave) {
if (leave && !leave(n)) return;
continue;
}
if (visited[n->id()]) continue;
visited[n->id()] = true;
if (enter && !enter(n)) return;
if (leave) stack.push_back(Work{n, true});
auto nodes = get_nodes(n);
std::vector<const SimpleNode*> nodes_sorted(nodes.begin(), nodes.end());
std::sort(nodes_sorted.begin(), nodes_sorted.end(),
[](const SimpleNode* lhs, const SimpleNode* rhs) {
return lhs->name() < rhs->name();
});
for (const SimpleNode* node : nodes_sorted) {
if (!visited[node->id()]) {
stack.push_back(Work{node, false});
}
}
}
}
bool CanContractEdge(const SimpleEdge* edge,
const std::unique_ptr<SimpleGraph>& graph) {
const auto src = edge->src();
const auto dst = edge->dst();
std::vector<const SimpleNode*> dfs_start_nodes;
for (const SimpleNode* node : dst->in_nodes()) {
if (node != src) {
dfs_start_nodes.push_back(node);
}
}
bool has_cycle = false;
StableDFS(*graph, true, dfs_start_nodes, nullptr,
[&has_cycle, src](const SimpleNode* n) {
if (n == src) {
has_cycle = true;
return false;
}
return true;
});
return !has_cycle;
}
string TensorPropertiesToString(const OpInfo::TensorProperties& prop) {
string s = StrCat(DataTypeString(prop.dtype()), ": ");
StrAppend(&s, "[");
if (prop.shape().unknown_rank()) {
StrAppend(&s, "?");
} else {
StrAppend(&s, StrJoin(prop.shape().dim(), ",",
[](string* out, const TensorShapeProto_Dim& d) {
StrAppendFormat(out, "%d", d.size());
}));
}
StrAppend(&s, "]");
return s;
}
string TensorPropertiesToString(
const std::vector<OpInfo::TensorProperties>& properties) {
return StrJoin(properties, "; ",
[](string* out, const OpInfo::TensorProperties& prop) {
StrAppend(out, TensorPropertiesToString(prop));
});
}
std::optional<const TensorShapeProto*> FindLeadingShape(
absl::Span<const OpInfo::TensorProperties> properties) {
DCHECK(!properties.empty());
const TensorShapeProto* result;
int max_batch_dim_value;
auto choose_shape_with_higher_rank = [&](const TensorShapeProto* s) {
result = s;
max_batch_dim_value = s->dim_size() < 1 ? 1 : s->dim(0).size();
};
DCHECK(!properties[0].shape().unknown_rank());
choose_shape_with_higher_rank(&properties[0].shape());
for (const OpInfo::TensorProperties& p : properties.subspan(1)) {
DCHECK(!p.shape().unknown_rank());
if (p.shape().dim_size() < result->dim_size()) continue;
if (p.shape().dim_size() > result->dim_size()) {
choose_shape_with_higher_rank(&p.shape());
continue;
}
if (result->dim_size() < 1) continue;
if (p.shape().dim(0).size() < 0 || result->dim(0).size() < 0) {
if (p.shape().dim(0).size() < 0 && result->dim(0).size() >= 0) {
result = &p.shape();
} else {
max_batch_dim_value =
std::max<int>(max_batch_dim_value, p.shape().dim(0).size());
}
continue;
}
if (p.shape().dim(0).size() > result->dim(0).size()) {
result = &p.shape();
max_batch_dim_value = result->dim(0).size();
}
}
if (result->dim_size() > 0 && result->dim(0).size() < 0) {
if (max_batch_dim_value <= 1) {
return result;
} else {
return std::nullopt;
}
}
return result;
}
absl::Span<const OpInfo::TensorProperties> GetInputsToDeterminateBatchSize(
const Node* node, const std::vector<OpInfo::TensorProperties>& all_inputs) {
static std::set<string> broadcast_supporting_ops = {
"Add",
"AddV2",
"Mul",
"Sub",
"Div",
"FloorDiv",
"RealDiv",
"Minimum",
"Maximum",
"Pow",
"BiasAdd",
"SquaredDifference",
"BatchMatMul",
"BatchMatMulV2",
};
const string& op = node->def().op();
if (op == "Conv2DBackpropInput" || op == "Conv3DBackpropInputV2") {
DCHECK_EQ(all_inputs.size(), 3);
return absl::MakeSpan(all_inputs).subspan(2, 1);
}
if (broadcast_supporting_ops.count(op)) {
return absl::MakeSpan(all_inputs);
}
return absl::MakeSpan(all_inputs).subspan(0, 1);
}
bool OperationCanBeTranslatedToImplicitBatch(
const grappler::GraphProperties* graph_properties, const Node* node) {
VLOG(3) << "process node " << node->name();
if (node->num_inputs() == 0) return true;
if (!graph_properties || !graph_properties->HasInputProperties(node->name()))
return false;
VLOG(3) << "input shapes "
<< TensorPropertiesToString(
graph_properties->GetInputProperties(node->name()));
const std::vector<OpInfo::TensorProperties>& all_input_properties =
graph_properties->GetInputProperties(node->name());
absl::Span<const OpInfo::TensorProperties> input_properties =
GetInputsToDeterminateBatchSize(node, all_input_properties);
if (absl::c_any_of(input_properties, [](const OpInfo::TensorProperties& p) {
return p.shape().unknown_rank();
})) {
return false;
}
std::optional<const TensorShapeProto*> leading_shape =
FindLeadingShape(input_properties);
return leading_shape.has_value() && leading_shape.value()->dim_size() >= 2;
}
bool HasDynamicNonBatchDimension(const OpInfo::TensorProperties& prop) {
const TensorShapeProto& shape = prop.shape();
if (shape.unknown_rank()) return true;
if (shape.dim_size() == 0) return false;
for (int i = 1; i < shape.dim_size(); ++i) {
if (shape.dim(i).size() <= -1) {
return true;
}
}
return false;
}
bool OperationHasDynamicNonBatchDimension(
const grappler::GraphProperties* graph_properties, const Node* node) {
VLOG(3) << "process node " << node->name();
if (node->num_inputs() == 0 || node->num_outputs() == 0) return false;
if (!graph_properties->HasOutputProperties(node->name())) return true;
VLOG(3) << "output shapes "
<< TensorPropertiesToString(
graph_properties->GetOutputProperties(node->name()));
return HasDynamicNonBatchDimension(
graph_properties->GetOutputProperties(node->name()).at(0));
}
void ContractEdge(SimpleEdge* edge, SimpleGraph* graph,
std::vector<const SimpleEdge*>* remove_edges) {
auto src = edge->src();
auto dst = edge->dst();
std::vector<const SimpleEdge*> in_edges(dst->in_edges().begin(),
dst->in_edges().end());
for (const SimpleEdge* in_edge : in_edges) {
if (in_edge->IsControlEdge()) {
if (in_edge->src() != src) {
SimpleEdge* e = const_cast<SimpleEdge*>(in_edge);
graph->AddControlEdge(e->src(), src);
}
} else {
if (in_edge->src() != src) {
SimpleEdge* e = const_cast<SimpleEdge*>(in_edge);
if (e->src() == graph->source_node()) {
graph->AddEdge(e->src(), e->src_output(), src, Graph::kControlSlot);
} else {
graph->AddEdge(e->src(), e->src_output(), src, 0 );
}
}
}
}
std::vector<const SimpleEdge*> out_edges(dst->out_edges().begin(),
dst->out_edges().end());
for (const SimpleEdge* out_edge : out_edges) {
if (out_edge->IsControlEdge()) {
SimpleEdge* e = const_cast<SimpleEdge*>(out_edge);
graph->AddControlEdge(src, e->dst());
} else {
SimpleEdge* e = const_cast<SimpleEdge*>(out_edge);
if (e->dst() == graph->sink_node()) {
VLOG(1) << " edge to sink node " << src->name() << " -> "
<< e->dst()->name();
graph->AddEdge(src, Graph::kControlSlot, e->dst(), e->dst_input());
} else {
graph->AddEdge(src, 0 , e->dst(), e->dst_input());
}
}
}
for (const auto& in_edge : dst->in_edges()) {
remove_edges->push_back(in_edge);
}
for (const auto& out_edge : dst->out_edges()) {
remove_edges->push_back(out_edge);
}
}
ClusterBatchSize GetClusterBatchSizeForNode(
const grappler::GraphProperties* graph_properties, const Node* node,
bool use_implicit_batch) {
ClusterBatchSize cluster_batch_size;
if (!use_implicit_batch || !node || node->num_inputs() == 0) {
return cluster_batch_size;
}
const NodeDef& node_def = node->def();
if (node_def.attr().count(kTftrtOpMaxBatchSizeAttr)) {
cluster_batch_size.SetMaxBatchSize(
node_def.attr().at(kTftrtOpMaxBatchSizeAttr).i());
}
if (!graph_properties ||
!graph_properties->HasInputProperties(node->name())) {
VLOG(3) << "doesn't have input property";
return cluster_batch_size;
}
const std::vector<OpInfo::TensorProperties>& input_properties =
graph_properties->GetInputProperties(node->name());
std::optional<const TensorShapeProto*> optional_leading_shape =
FindLeadingShape(GetInputsToDeterminateBatchSize(node, input_properties));
DCHECK(optional_leading_shape.has_value());
const TensorShapeProto* leading_shape = optional_leading_shape.value();
DCHECK(!leading_shape->unknown_rank() && leading_shape->dim_size() >= 2);
VLOG(3) << "set batch size as " << leading_shape->dim(0).size();
return cluster_batch_size.SetBatchSize(leading_shape->dim(0).size());
}
void AddSegmentForNode(const grappler::GraphProperties* graph_properties,
std::vector<UnionFind<SimpleNode*>>* segments,
SimpleNode* node,
const DeviceNameUtils::ParsedName& device_name,
bool use_implicit_batch) {
tensorflow::profiler::TraceMe activity(
"AddSegmentForNode", tensorflow::profiler::TraceMeLevel::kInfo);
ClusterProperty property(
GetClusterBatchSizeForNode(graph_properties,
node == nullptr ? nullptr : node->tf_node(),
use_implicit_batch),
device_name);
segments->emplace_back(node, std::move(property));
}
}
Status ExportNonConversionReportToCSV(
string filename,
std::map<string, std::map<string, int>>& nonconverted_ops_map,
string sep = "|") {
tensorflow::profiler::TraceMe activity(
"ExportNonConversionReportToCSV",
tensorflow::profiler::TraceMeLevel::kInfo);
std::unique_ptr<WritableFile> csv_file;
auto open_status = Env::Default()->NewWritableFile(filename, &csv_file);
if (!open_status.ok()) {
return errors::Internal("Failed to open output file: `", filename, "`");
}
LOG(WARNING) << "TF-TRT Non-Conversion Report saved at: `" << filename << "`";
std::ostringstream sstream;
sstream << "OP Name" << sep << "Reason" << sep << "Count" << std::endl;
for (auto& op_details : nonconverted_ops_map) {
auto op_name = op_details.first;
auto op_data = op_details.second;
for (auto& reject_data : op_data) {
auto reason = reject_data.first;
auto count = reject_data.second;
sstream << op_name << sep << reason << sep << count << std::endl;
}
}
auto append_status = csv_file->Append(sstream.str());
if (!append_status.ok()) {
return errors::Internal("Error writing to output file `", filename, "`.");
}
auto close_status = csv_file->Close();
if (!close_status.ok()) {
return errors::Internal("Error closing the file `", filename,
"`. The file might be corrupted.");
}
return OkStatus();
}
string GenerateNonConversionReport(
std::map<string, std::map<string, int>>& nonconverted_ops_map) {
tensorflow::profiler::TraceMe activity(
"GenerateNonConversionReport", tensorflow::profiler::TraceMeLevel::kInfo);
string detailed_report_var;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_SHOW_DETAILED_REPORT",
"", &detailed_report_var));
bool show_detailed_conversion_report = false;
if (detailed_report_var != "") {
if (detailed_report_var.find_first_not_of("-0123456789") != string::npos) {
const Status status = ExportNonConversionReportToCSV(
detailed_report_var, nonconverted_ops_map);
if (!status.ok()) {
LOG(ERROR) << "Problem encountered while generating the TF-TRT "
<< "Non-Conversion Report in CSV Format:\n"
<< status.message();
}
show_detailed_conversion_report = true;
} else if (std::stoi(detailed_report_var) >= 1) {
show_detailed_conversion_report = true;
}
}
string unsupported_op_report =
StrCat("\n\n", string(80, '#'), "\n",
"TensorRT unsupported/non-converted OP Report:");
int total_nonconverted_ops{0};
using ReasonCounterVector = std::vector<std::pair<string, int>>;
using NotConvertedOPTuple = std::tuple<string, int, ReasonCounterVector>;
std::vector<NotConvertedOPTuple> nonconverted_ops_vec;
for (auto& nonconverted_op_data : nonconverted_ops_map) {
int total_nonconverted_op{0};
ReasonCounterVector reason_occurances_vect;
auto op_name = nonconverted_op_data.first;
auto op_data = nonconverted_op_data.second;
for (auto& notconversion_reason_data : op_data) {
auto reason_count = notconversion_reason_data.second;
total_nonconverted_op += reason_count;
reason_occurances_vect.push_back(notconversion_reason_data);
}
std::sort(reason_occurances_vect.begin(), reason_occurances_vect.end(),
[](const std::pair<string, int>& a,
const std::pair<string, int>& b) -> bool {
return a.second > b.second;
});
nonconverted_ops_vec.push_back(std::make_tuple(
op_name, total_nonconverted_op, reason_occurances_vect));
}
std::sort(nonconverted_ops_vec.begin(), nonconverted_ops_vec.end(),
[](const NotConvertedOPTuple& a, const NotConvertedOPTuple& b) {
return std::get<1>(a) > std::get<1>(b);
});
for (auto& notconverted_op_detail : nonconverted_ops_vec) {
auto& op_name = std::get<0>(notconverted_op_detail);
auto& op_total_nonconverted = std::get<1>(notconverted_op_detail);
total_nonconverted_ops += op_total_nonconverted;
unsupported_op_report = StrCat(unsupported_op_report, "\n\t- ", op_name,
" -> ", op_total_nonconverted, "x");
if (show_detailed_conversion_report) {
auto& nonconverted_ops_details = std::get<2>(notconverted_op_detail);
for (auto& nonconversion_details : nonconverted_ops_details) {
auto& reason = nonconversion_details.first;
auto& reason_count = nonconversion_details.second;
if (reason_count == 0) {
continue;
}
unsupported_op_report = StrCat(unsupported_op_report, "\n\t\t- ",
"[Count: ", reason_count, "x] ", reason);
}
unsupported_op_report = StrCat(unsupported_op_report, "\n");
}
}
unsupported_op_report =
StrCat(unsupported_op_report, "\n", string(80, '-'),
"\n\t- Total nonconverted OPs: ", total_nonconverted_ops,
"\n\t- Total nonconverted OP Types: ", nonconverted_ops_map.size(),
"\nFor more information see https:
"/frameworks/tf-trt-user-guide/index.html#supported-ops.", "\n",
string(80, '#'), "\n");
return unsupported_op_report;
}
Status SegmentGraph(const Graph* tf_graph,
const grappler::GraphProperties* graph_properties,
const std::function<Status(const Node*)>& candidate_fn,
const std::function<bool(const Edge*)>& input_candidate_fn,
const std::function<bool(const Edge*)>& output_candidate_fn,
const SegmentOptions& options, SegmentVector* segments) {
tensorflow::profiler::TraceMe activity(
"SegmentGraph", tensorflow::profiler::TraceMeLevel::kInfo);
if (!options.use_implicit_batch && !options.allow_dynamic_non_batch_dim) {
return errors::Internal(
"Explicit batch mode should allow dynamic non-batch dimensions");
}
if (options.use_implicit_batch && !options.maximum_batch_size.has_value()) {
return errors::Internal("Implicit batch mode requires maximum_batch_size");
}
if (!options.allow_dynamic_non_batch_dim && !graph_properties) {
return errors::Internal(
"Need graph propertities to disallow dynamic non-batch dimensions");
}
auto graph = std::unique_ptr<SimpleGraph>(new SimpleGraph(tf_graph));
const absl::flat_hash_set<string> tftrt_op_denylist = [] {
string tftrt_op_denylist_str;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_OP_DENYLIST", "",
&tftrt_op_denylist_str));
absl::flat_hash_set<string> tftrt_op_denylist{};
for (const auto& x : str_util::Split(tftrt_op_denylist_str, ",")) {
tftrt_op_denylist.insert(x);
}
tftrt_op_denylist.rehash(0);
return tftrt_op_denylist;
}();
std::map<string, std::map<string, int>> nonconverted_ops_map = {};
std::vector<UnionFind<SimpleNode*>> node_segments;
for (int i = 0; i < graph->num_node_ids(); ++i) {
SimpleNode* node = graph->FindNodeId(i);
if (!node) {
VLOG(3) << "Node " << i << " doesn't exist in the graph";
continue;
}
const string node_op_type{node->tf_node()->type_string()};
auto exclude_node = [&](absl::string_view reason) {
VLOG(1) << "Not a TF-TRT candidate, " << "(Op type: " << node_op_type
<< "), " << "(Op name: " << node->name() << "), "
<< "(Reason: " << reason << ")";
nonconverted_ops_map[node_op_type][string(reason)]++;
node = nullptr;
};
std::optional<DeviceNameUtils::ParsedName> device_name =
GetDeviceParsedName(node->tf_node());
if (!device_name.has_value() ||
(device_name->has_type && device_name->type != "GPU")) {
exclude_node("node can't be placed on GPU");
} else if (options.exclude_node_list.count(node->name()) != 0) {
exclude_node(
"excluded by segmenter option. Most likely an input or "
"output node.");
} else if (options.use_implicit_batch &&
!OperationCanBeTranslatedToImplicitBatch(graph_properties,
node->tf_node())) {
exclude_node(
"implicit batch mode requires input shape with at least two "
"dimensions");
} else if (!options.allow_dynamic_non_batch_dim &&
OperationHasDynamicNonBatchDimension(graph_properties,
node->tf_node())) {
exclude_node("dynamic non-batch dimensions not allowed");
} else {
const Status status = candidate_fn(node->tf_node());
if (!status.ok()) {
exclude_node(status.message());
} else if (tftrt_op_denylist.contains(node->tf_node()->type_string())) {
LOG_WARNING_WITH_PREFIX
<< "Denylisted as TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "
<< "(Op name: " << node->name() << ")";
exclude_node("Denylisted with the env var TF_TRT_OP_DENYLIST");
} else {
VLOG(2) << "Accepted as a TF-TRT candidate, "
<< "(Op type: " << node->tf_node()->type_string() << "), "
<< "(Op name: " << node->name();
}
}
AddSegmentForNode(graph_properties, &node_segments, node, *device_name,
options.use_implicit_batch);
}
LOG(WARNING) << GenerateNonConversionReport(nonconverted_ops_map);
std::vector<const SimpleNode*> order;
order.reserve(graph->num_node_ids());
StableDFS(*graph, false, {graph->source_node()},
nullptr, [&order](const SimpleNode* n) {
order.push_back(n);
return true;
});
for (const SimpleNode* node : order) {
VLOG(3) << "Trying node " << node->name() << " id=" << node->id();
if (node_segments[node->id()].Value() == nullptr) {
VLOG(3) << "... not a TRT candidate";
continue;
}
ClusterBatchSize expected_batch_size =
node_segments[node->id()].Property().BatchSize();
DeviceNameUtils::ParsedName expected_device_name =
node_segments[node->id()].Property().DeviceName();
VLOG(3) << "batch size " << expected_batch_size;
while (true) {
std::set<const SimpleEdge*, SimpleEdgePtrCompare> contract_edges;
for (const SimpleEdge* out_edge : node->out_edges()) {
VLOG(3) << "... out node " << out_edge->dst()->name() << " ( "
<< out_edge->dst()->id() << " <- " << node->id() << " )";
if (out_edge->IsControlEdge()) {
VLOG(3) << "... ... Control Edge, Skipping";
continue;
}
UnionFind<SimpleNode*>* out_cluster =
&node_segments[out_edge->dst()->id()];
if (out_cluster->Value() == nullptr) {
VLOG(3) << "... ... not a TRT candidate";
continue;
}
ClusterBatchSize out_batch_size = out_cluster->Property().BatchSize();
ClusterBatchSize merged_batch_size = expected_batch_size;
if (!merged_batch_size.MergeIfCompatible(out_batch_size)) {
VLOG(3) << "... ... incompatible batch sizes "
<< expected_batch_size.ToString() << " "
<< out_batch_size.ToString();
continue;
}
const DeviceNameUtils::ParsedName& out_device_name =
out_cluster->Property().DeviceName();
std::optional<DeviceNameUtils::ParsedName> merged_device_name =
MergeIfCompatible(expected_device_name, out_device_name);
if (!merged_device_name.has_value()) {
VLOG(3) << "... ... incompatible device names "
<< expected_device_name << " " << out_device_name;
continue;
}
if (CanContractEdge(out_edge, graph)) {
VLOG(3) << "... ... can contract. new batch size "
<< merged_batch_size.ToString();
contract_edges.insert(out_edge);
expected_batch_size = merged_batch_size;
expected_device_name = *merged_device_name;
} else {
VLOG(3) << "... ... cannot contract, would form cycle";
}
}
if (contract_edges.empty()) {
break;
}
while (!contract_edges.empty()) {
const SimpleEdge* contract_edge = *contract_edges.begin();
const SimpleNode* src = contract_edge->src();
const SimpleNode* dst = contract_edge->dst();
VLOG(3) << "Merge " << src->name() << " <- " << dst->name() << " ("
<< src->id() << " <- " << dst->id();
TF_RETURN_IF_ERROR(
node_segments[src->id()].Merge(&node_segments[dst->id()]));
SimpleEdge* e = const_cast<SimpleEdge*>(contract_edge);
std::vector<const SimpleEdge*> remove_edges;
ContractEdge(e, graph.get(), &remove_edges);
for (const SimpleEdge* r : remove_edges) {
contract_edges.erase(r);
graph->RemoveEdge(r);
}
}
if (expected_batch_size !=
node_segments[node->id()].Property().BatchSize()) {
return errors::Internal(
"expected batch size is not the same as the actual batch size");
}
if (expected_device_name !=
node_segments[node->id()].Property().DeviceName()) {
return errors::Internal(
"expected device name is not the same as the actual device name");
}
}
}
std::map<string, Segment> sg_map;
for (auto& u : node_segments) {
if ((u.Value() != nullptr) && (u.ParentValue() != nullptr)) {
sg_map[u.ParentValue()->name()].nodes.insert(u.Value()->tf_node());
}
if ((u.Value() != nullptr) && (u.ParentValue() == u.Value())) {
sg_map[u.Value()->name()].property = u.Property();
}
}
for (auto& itr : sg_map) {
std::set<const Node*, NodePtrCompare>& segment_nodes = itr.second.nodes;
VLOG(1) << "Segment original size: " << segment_nodes.size();
while (true) {
std::deque<const Node*> in_nodes_que, out_nodes_que;
for (auto node : segment_nodes) {
bool added = false;
for (const Edge* edge : node->in_edges()) {
if (!edge->IsControlEdge() && !edge->src()->IsSource() &&
!segment_nodes.count(edge->src())) {
if (!input_candidate_fn(edge)) {
in_nodes_que.push_back(node);
added = true;
break;
}
}
}
if (added) continue;
for (const Edge* edge : node->out_edges()) {
if (!edge->dst()->IsSink() && !edge->IsControlEdge() &&
!segment_nodes.count(edge->dst())) {
if (!output_candidate_fn(edge)) {
out_nodes_que.push_back(node);
break;
}
}
}
}
if (in_nodes_que.empty() && out_nodes_que.empty()) {
break;
}
auto remove_nodes = [&segment_nodes](bool is_input_nodes,
std::deque<const Node*>* que) {
std::set<const Node*, NodePtrCompare> visited;
std::set<const Node*, NodePtrCompare> logged(que->begin(), que->end());
while (!que->empty()) {
auto node = que->front();
que->pop_front();
if (!visited.insert(node).second) continue;
segment_nodes.erase(node);
for (auto in : (is_input_nodes || node->type_string() == "Const")
? node->in_nodes()
: node->out_nodes()) {
if (segment_nodes.count(in)) {
que->push_back(in);
if (VLOG_IS_ON(2)) {
if (!logged.count(in)) {
VLOG(2) << "----> Need to remove node " << in->name()
<< " because one of its "
<< (is_input_nodes ? "output" : "input")
<< " nodes in the graph was removed: "
<< node->name();
logged.insert(in);
}
}
}
}
}
};
remove_nodes(true, &in_nodes_que);
remove_nodes(false, &out_nodes_que);
}
VLOG(1) << "Segment new size: " << segment_nodes.size();
}
std::vector<int> effective_nodes_counts;
for (const auto& itr : sg_map) {
const string& segment_root = itr.first;
std::set<const Node*, NodePtrCompare> segment_nodes(
itr.second.nodes.begin(), itr.second.nodes.end());
if (VLOG_IS_ON(1) && !segment_nodes.empty()) {
string s;
for (auto node : segment_nodes) {
StrAppend(&s, "\n[Op type: ", node->type_string(), "] ", node->name());
}
VLOG(1) << "Nodes in segment " << segments->size()
<< " with parent=" << segment_root << ":" << s;
}
const int num_effective_nodes = std::count_if(
segment_nodes.begin(), segment_nodes.end(), [](const Node* node) {
static auto noops =
new std::set<string>{"Identity", "Snapshot", "StopGradient"};
return noops->count(node->type_string()) == 0;
});
if (num_effective_nodes == 0 ||
num_effective_nodes < options.minimum_segment_size) {
VLOG(1) << "Segment " << segments->size() << " has only "
<< num_effective_nodes << " effective nodes, dropping";
continue;
}
segments->emplace_back(itr.second.property, segment_nodes);
effective_nodes_counts.push_back(num_effective_nodes);
}
int64_t max_trt_engine_ops;
TF_CHECK_OK(ReadInt64FromEnvVar("TF_TRT_MAX_ALLOWED_ENGINES",
20, &max_trt_engine_ops));
if (max_trt_engine_ops <= 0) {
LOG(WARNING) << "The environment variable TF_TRT_MAX_ALLOWED_ENGINES is "
<< "<= 0. TF-TRT did not limit the number of TensorRT engines "
<< "created.";
} else {
if (segments->size() > max_trt_engine_ops) {
LOG(WARNING) << "A total of " << segments->size() << " segments with at "
<< "least minimum_segment_size="
<< options.minimum_segment_size << " nodes have been found. "
<< "TF-TRT will only convert the " << max_trt_engine_ops
<< " largest segments. You can change this behavior by "
<< "modifying the environment variable "
<< "TF_TRT_MAX_ALLOWED_ENGINES=" << max_trt_engine_ops;
std::vector<int> indices(segments->size());
std::iota(indices.begin(), indices.end(), 0);
std::stable_sort(indices.begin(), indices.end(),
[&effective_nodes_counts](int i1, int i2) {
return effective_nodes_counts[i1] >
effective_nodes_counts[i2];
});
std::vector<bool> mask = std::vector<bool>(segments->size(), false);
for (int i = 0; i < max_trt_engine_ops; i++) {
mask[indices[i]] = true;
}
int j = 0;
VLOG(1) << "The following segments have been accepted by TF-TRT:";
for (int i = 0; i < segments->size(); i++) {
if (mask[i]) {
VLOG(1) << "[*] Segment " << i
<< " [node count: " << effective_nodes_counts[i]
<< "] accepted. Re-assigned " << "segment id=" << j;
segments->at(j) = segments->at(i);
j++;
}
}
VLOG(1) << "The following segments have been rejected by TF-TRT:";
for (int i = 0; i < segments->size(); i++) {
if (!mask[i]) {
VLOG(1) << "[*] Segment " << i
<< " [node count: " << effective_nodes_counts[i]
<< "] rejected.";
}
}
segments->resize(max_trt_engine_ops);
} else {
LOG(WARNING) << "The environment variable TF_TRT_MAX_ALLOWED_ENGINES="
<< max_trt_engine_ops << " has no effect since there are "
<< "only " << segments->size() << " TRT Engines with at "
<< "least minimum_segment_size="
<< options.minimum_segment_size << " nodes.";
}
}
return OkStatus();
}
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/segment/segment.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace segment {
namespace test {
class SegmentTest : public ::testing::Test {
protected:
std::function<Status(const Node*)> MakeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Node* node) -> Status {
if (node_names.find(node->name()) != node_names.end()) {
return OkStatus();
}
return errors::NotFound("Not a user specified candidate");
};
}
std::function<bool(const Edge*)> MakeInputEdgeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Edge* in_edge) -> bool {
return node_names.find(in_edge->dst()->name()) != node_names.end();
};
}
std::function<bool(const Edge*)> MakeOutputEdgeCandidateFn(
const std::set<string>& node_names) {
return [node_names](const Edge* out_edge) -> bool {
return node_names.find(out_edge->src()->name()) != node_names.end();
};
}
void RunTest(const Graph* graph,
const grappler::GraphProperties* graph_properties,
const std::set<string>& candidates,
const std::set<string>& input_candidates,
const std::set<string>& output_candidates,
const std::vector<std::set<string>>& expected_segments) {
SegmentVector segments;
TF_EXPECT_OK(SegmentGraph(graph, graph_properties,
MakeCandidateFn(candidates),
MakeInputEdgeCandidateFn(input_candidates),
MakeOutputEdgeCandidateFn(output_candidates),
segment_options_, &segments));
ValidateSegment(segments, expected_segments);
}
void RunTest(const Graph* graph, const std::set<string>& candidates,
const std::set<string>& input_candidates,
const std::set<string>& output_candidates,
const std::vector<std::set<string>>& expected_segments) {
RunTest(graph, nullptr, candidates, input_candidates, output_candidates,
expected_segments);
}
void ValidateSegment(const SegmentVector& segments,
const std::vector<std::set<string>>& expected_segments) {
EXPECT_EQ(expected_segments.size(), segments.size());
for (int i = 0; i < segments.size(); ++i) {
std::set<string> segment_node_names;
for (const Node* node : segments[i].nodes) {
segment_node_names.insert(node->name());
}
const auto& expected = expected_segments[i];
for (const auto& name : expected) {
EXPECT_TRUE(segment_node_names.count(name))
<< "Segment " << i << " is missing expected node: " << name;
}
if (segment_node_names.size() == expected.size()) continue;
for (const auto& name : segment_node_names) {
EXPECT_TRUE(expected.count(name))
<< "Unexpected node found in segment " << i << ": " << name;
}
}
}
void DisableImplicitBatchMode() {
segment_options_.use_implicit_batch = false;
segment_options_.allow_dynamic_non_batch_dim = true;
}
void EnableImplicitBatchModeForStaticEngine(int maximum_batch_size = 1000) {
segment_options_.use_implicit_batch = true;
segment_options_.maximum_batch_size = maximum_batch_size;
segment_options_.allow_dynamic_non_batch_dim = false;
}
SegmentOptions segment_options_;
};
std::set<string> operator-(const std::set<string>& lhs, const string& rhs) {
std::set<string> result = lhs;
CHECK(result.erase(rhs));
return result;
}
TEST_F(SegmentTest, Empty) {
Scope s = Scope::NewRootScope();
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
DisableImplicitBatchMode();
RunTest(&g, {}, {}, {}, {});
}
TEST_F(SegmentTest, Simple) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
DisableImplicitBatchMode();
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
auto without_add1 = all_adds - "add1";
RunTest(&g, without_add1, without_add1, without_add1, {without_add1});
auto without_add2 = all_adds - "add2";
RunTest(&g, without_add1, without_add2, without_add1, {{"add3", "add4"}});
RunTest(&g, all_adds, without_add2, all_adds, {all_adds});
RunTest(&g, all_adds, without_add1, all_adds, {without_add1});
auto without_add3 = all_adds - "add3";
RunTest(&g, all_adds, all_adds, without_add3, {all_adds});
}
TEST_F(SegmentTest, WithDeviceAssignments) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4"};
DisableImplicitBatchMode();
{
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
}
{
add1.node()->set_assigned_device_name("/device:CPU:0");
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds - "add1"});
add1.node()->set_assigned_device_name("");
}
{
constexpr char kGpu0[] = "/device:GPU:0";
add0.node()->set_assigned_device_name(kGpu0);
add1.node()->set_assigned_device_name(kGpu0);
add2.node()->set_assigned_device_name(kGpu0);
constexpr char kGpu1[] = "/device:GPU:1";
add3.node()->set_assigned_device_name(kGpu1);
add4.node()->set_assigned_device_name(kGpu1);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {{"add0", "add1", "add2"}});
}
{
constexpr char kGpuAny[] = "/device:GPU:*";
add3.node()->set_assigned_device_name(kGpuAny);
add4.node()->set_assigned_device_name(kGpuAny);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
RunTest(&g, all_adds, all_adds, all_adds, {all_adds});
}
}
TEST_F(SegmentTest, AvoidCycle) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> without_add2 = {"add0", "add1", "add3", "add4"};
DisableImplicitBatchMode();
RunTest(&g, without_add2, without_add2, without_add2, {});
}
TEST_F(SegmentTest, Multiple) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), feed, feed);
auto add7 = ops::Add(s.WithOpName("add7"), feed, feed);
auto add2 = ops::Add(s.WithOpName("add2"), add0, add1);
auto add5 = ops::Add(s.WithOpName("add5"), add2, add7);
auto add8 = ops::Add(s.WithOpName("add8"), add7, add7);
auto add3 = ops::Add(s.WithOpName("add3"), add0, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add2, add5);
auto add6 = ops::Add(s.WithOpName("add6"), add5, add8);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3", "add4",
"add5", "add6", "add7", "add8"};
auto without_add5 = all_adds - "add5";
DisableImplicitBatchMode();
RunTest(&g, without_add5, without_add5, without_add5,
{{"add0", "add1", "add2", "add3"}, {"add6", "add8"}});
auto without_add8 = all_adds - "add8";
auto without_add6 = all_adds - "add6";
RunTest(&g, without_add8, without_add6, all_adds, {{"add3", "add4"}});
auto without_add3 = all_adds - "add3";
auto without_add0 = all_adds - "add0";
RunTest(&g, without_add3, all_adds, without_add0, {{"add1", "add7", "add8"}});
}
TEST_F(SegmentTest, BigIfElse) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto add0 = ops::Add(s.WithOpName("add0"), feed, feed);
auto add1 = ops::Add(s.WithOpName("add1"), add0, add0);
auto add2 = ops::Add(s.WithOpName("add2"), add1, add1);
auto add3 = ops::Add(s.WithOpName("add3"), add2, add2);
auto add4 = ops::Add(s.WithOpName("add4"), add0, add0);
auto add5 = ops::Add(s.WithOpName("add5"), add4, add4);
auto add6 = ops::Add(s.WithOpName("add6"), add5, add5);
auto add7 = ops::Add(s.WithOpName("add7"), add3, add6);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_adds = {"add0", "add1", "add2", "add3",
"add4", "add5", "add6", "add7"};
DisableImplicitBatchMode();
RunTest(&g, all_adds - "add2", all_adds, all_adds,
{{"add0", "add1"}, {"add3", "add4", "add5", "add6", "add7"}});
}
TEST_F(SegmentTest, IdentityOps) {
Scope s = Scope::NewRootScope();
auto feed = ops::Placeholder(s.WithOpName("feed"), DT_FLOAT);
auto identity0 = ops::Identity(s.WithOpName("identity0"), feed);
auto identity1 = ops::Identity(s.WithOpName("identity1"), identity0);
auto identity2 = ops::Identity(s.WithOpName("identity2"), identity1);
auto identity3 = ops::Identity(s.WithOpName("identity3"), identity2);
Graph g(OpRegistry::Global());
TF_EXPECT_OK(s.ToGraph(&g));
const std::set<string> all_identities = {"identity0", "identity1",
"identity2", "identity3"};
DisableImplicitBatchMode();
RunTest(&g, all_identities, all_identities, all_identities, {});
}
TEST_F(SegmentTest, ExcludeAddWithDynamicNonBatchDimension) {
Scope s = Scope::NewRootScope();
auto feed_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2, 3}));
auto feed_1_shape = ops::Placeholder::Shape(PartialTensorShape({-1, -1, 3}));
auto const_val = ops::Const<float>(s, {1.0}, {});
auto feed_0 =
ops::Placeholder(s.WithOpName("feed-1"), DT_FLOAT, feed_0_shape);
auto feed_1 =
ops::Placeholder(s.WithOpName("feed-2"), DT_FLOAT, feed_1_shape);
auto add_0 = ops::Add(s.WithOpName("add-0"), feed_0, const_val);
auto add_1 = ops::Add(s.WithOpName("add-1"), add_0, feed_0);
auto add_2 = ops::Add(s.WithOpName("add-2"), const_val, feed_1);
grappler::GrapplerItem item;
item.fetch.push_back("add-2");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"add-0", "add-1", "add-2"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{all_nodes - "add-2"});
}
TEST_F(SegmentTest, ExcludeReshapeWithDynamicNonBatchDimensionInOutput) {
Scope s = Scope::NewRootScope();
auto feed_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2, 3}));
auto const_val = ops::Const<float>(s, {1.0}, {});
auto feed_0 =
ops::Placeholder(s.WithOpName("feed-1"), DT_FLOAT, feed_0_shape);
auto add_0 = ops::Add(s.WithOpName("add-0"), feed_0, const_val);
auto reshape = ops::Reshape(s.WithOpName("reshape"), add_0, Input({6, -1}));
auto add_1 = ops::Add(s.WithOpName("add-1"), reshape, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("add-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"add-0", "reshape", "add-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
TEST_F(SegmentTest, RankOneCannotUseImplicitBatch) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({3}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({3}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-scalar"), 1.0f, {});
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, const_val);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-scalar", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
TEST_F(SegmentTest, TwoChainsDiffBatchSizes) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({2, 3}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({5, 3}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-scalar"), 1.0f, {});
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, const_val);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, const_val);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-scalar", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"output-0", "const-scalar"}});
EnableImplicitBatchModeForStaticEngine(1);
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"output-0", "const-scalar"}});
}
TEST_F(SegmentTest, SameRankImplicitBroadcastingStaticBatchSize) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(TensorShape({2, 3, 1}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({1, 3, 4}));
auto input_2_shape = ops::Placeholder::Shape(TensorShape({2, 3, 4}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto input_2 =
ops::Placeholder(s.WithOpName("input-2"), DT_FLOAT, input_2_shape);
auto multiple = ops::Mul(s.WithOpName("multiple"), input_2, input_2);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, multiple);
auto output_1 = ops::Add(s.WithOpName("output-1"), input_1, multiple);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
item.fetch.push_back("output-1");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"multiple", "output-0", "output-1"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{all_nodes});
}
TEST_F(SegmentTest, SameRankImplicitBroadcastingDynamicBatchSize) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({1, 2}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-val"), 1.0f, {1, 1});
auto add_0 = ops::Add(s.WithOpName("add-0"), input_0, const_val);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, add_0);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-val", "add-0", "output-0"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes,
{{"const-val", "add-0", "output-0"}});
}
TEST_F(SegmentTest, IncompatibleBatchSizes) {
Scope s = Scope::NewRootScope();
auto input_0_shape = ops::Placeholder::Shape(PartialTensorShape({-1, 2}));
auto input_1_shape = ops::Placeholder::Shape(TensorShape({2, 2}));
auto input_0 =
ops::Placeholder(s.WithOpName("input-0"), DT_FLOAT, input_0_shape);
auto input_1 =
ops::Placeholder(s.WithOpName("input-1"), DT_FLOAT, input_1_shape);
auto const_val = ops::Const(s.WithOpName("const-val"), 1.0f, {2, 2});
auto add_0 = ops::Add(s.WithOpName("add-0"), input_0, const_val);
auto output_0 = ops::Add(s.WithOpName("output-0"), input_0, add_0);
grappler::GrapplerItem item;
item.fetch.push_back("output-0");
TF_EXPECT_OK(s.ToGraphDef(&item.graph));
grappler::GraphProperties static_graph_properties(item);
TF_EXPECT_OK(static_graph_properties.InferStatically(true));
Graph g(OpRegistry::Global());
TF_CHECK_OK(
ConvertGraphDefToGraph(GraphConstructorOptions(), item.graph, &g));
const std::set<string> all_nodes = {"const-val", "add-0", "output-0"};
EnableImplicitBatchModeForStaticEngine();
RunTest(&g, &static_graph_properties, all_nodes, all_nodes, all_nodes, {});
}
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/segment/segment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/segment/segment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5596e95d-05d0-4549-a9d7-d039d691e49b | cpp | google/cel-cpp | ast_converters | extensions/protobuf/ast_converters.cc | extensions/protobuf/ast_converters_test.cc | #include "extensions/protobuf/ast_converters.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/functional/overload.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "absl/types/variant.h"
#include "base/ast.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "common/constant.h"
#include "extensions/protobuf/internal/ast.h"
#include "internal/proto_time_encoding.h"
#include "internal/status_macros.h"
namespace cel::extensions {
namespace internal {
namespace {
using ::cel::ast_internal::AbstractType;
using ::cel::ast_internal::Bytes;
using ::cel::ast_internal::Call;
using ::cel::ast_internal::Comprehension;
using ::cel::ast_internal::Constant;
using ::cel::ast_internal::CreateList;
using ::cel::ast_internal::CreateStruct;
using ::cel::ast_internal::DynamicType;
using ::cel::ast_internal::ErrorType;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::Extension;
using ::cel::ast_internal::FunctionType;
using ::cel::ast_internal::Ident;
using ::cel::ast_internal::ListType;
using ::cel::ast_internal::MapType;
using ::cel::ast_internal::MessageType;
using ::cel::ast_internal::NullValue;
using ::cel::ast_internal::ParamType;
using ::cel::ast_internal::PrimitiveType;
using ::cel::ast_internal::PrimitiveTypeWrapper;
using ::cel::ast_internal::Reference;
using ::cel::ast_internal::Select;
using ::cel::ast_internal::SourceInfo;
using ::cel::ast_internal::Type;
using ::cel::ast_internal::WellKnownType;
using ExprPb = google::api::expr::v1alpha1::Expr;
using ParsedExprPb = google::api::expr::v1alpha1::ParsedExpr;
using CheckedExprPb = google::api::expr::v1alpha1::CheckedExpr;
using ExtensionPb = google::api::expr::v1alpha1::SourceInfo::Extension;
}
absl::StatusOr<Constant> ConvertConstant(
const google::api::expr::v1alpha1::Constant& constant) {
switch (constant.constant_kind_case()) {
case google::api::expr::v1alpha1::Constant::CONSTANT_KIND_NOT_SET:
return Constant();
case google::api::expr::v1alpha1::Constant::kNullValue:
return Constant(nullptr);
case google::api::expr::v1alpha1::Constant::kBoolValue:
return Constant(constant.bool_value());
case google::api::expr::v1alpha1::Constant::kInt64Value:
return Constant(constant.int64_value());
case google::api::expr::v1alpha1::Constant::kUint64Value:
return Constant(constant.uint64_value());
case google::api::expr::v1alpha1::Constant::kDoubleValue:
return Constant(constant.double_value());
case google::api::expr::v1alpha1::Constant::kStringValue:
return Constant(StringConstant{constant.string_value()});
case google::api::expr::v1alpha1::Constant::kBytesValue:
return Constant(BytesConstant{constant.bytes_value()});
case google::api::expr::v1alpha1::Constant::kDurationValue:
return Constant(absl::Seconds(constant.duration_value().seconds()) +
absl::Nanoseconds(constant.duration_value().nanos()));
case google::api::expr::v1alpha1::Constant::kTimestampValue:
return Constant(
absl::FromUnixSeconds(constant.timestamp_value().seconds()) +
absl::Nanoseconds(constant.timestamp_value().nanos()));
default:
return absl::InvalidArgumentError("Unsupported constant type");
}
}
absl::StatusOr<Expr> ConvertProtoExprToNative(
const google::api::expr::v1alpha1::Expr& expr) {
Expr native_expr;
CEL_RETURN_IF_ERROR(protobuf_internal::ExprFromProto(expr, native_expr));
return native_expr;
}
absl::StatusOr<SourceInfo> ConvertProtoSourceInfoToNative(
const google::api::expr::v1alpha1::SourceInfo& source_info) {
absl::flat_hash_map<int64_t, Expr> macro_calls;
for (const auto& pair : source_info.macro_calls()) {
auto native_expr = ConvertProtoExprToNative(pair.second);
if (!native_expr.ok()) {
return native_expr.status();
}
macro_calls.emplace(pair.first, *(std::move(native_expr)));
}
std::vector<Extension> extensions;
extensions.reserve(source_info.extensions_size());
for (const auto& extension : source_info.extensions()) {
std::vector<Extension::Component> components;
components.reserve(extension.affected_components().size());
for (const auto& component : extension.affected_components()) {
switch (component) {
case ExtensionPb::COMPONENT_PARSER:
components.push_back(Extension::Component::kParser);
break;
case ExtensionPb::COMPONENT_TYPE_CHECKER:
components.push_back(Extension::Component::kTypeChecker);
break;
case ExtensionPb::COMPONENT_RUNTIME:
components.push_back(Extension::Component::kRuntime);
break;
default:
components.push_back(Extension::Component::kUnspecified);
break;
}
}
extensions.push_back(
Extension(extension.id(),
std::make_unique<Extension::Version>(
extension.version().major(), extension.version().minor()),
std::move(components)));
}
return SourceInfo(
source_info.syntax_version(), source_info.location(),
std::vector<int32_t>(source_info.line_offsets().begin(),
source_info.line_offsets().end()),
absl::flat_hash_map<int64_t, int32_t>(source_info.positions().begin(),
source_info.positions().end()),
std::move(macro_calls), std::move(extensions));
}
absl::StatusOr<PrimitiveType> ToNative(
google::api::expr::v1alpha1::Type::PrimitiveType primitive_type) {
switch (primitive_type) {
case google::api::expr::v1alpha1::Type::PRIMITIVE_TYPE_UNSPECIFIED:
return PrimitiveType::kPrimitiveTypeUnspecified;
case google::api::expr::v1alpha1::Type::BOOL:
return PrimitiveType::kBool;
case google::api::expr::v1alpha1::Type::INT64:
return PrimitiveType::kInt64;
case google::api::expr::v1alpha1::Type::UINT64:
return PrimitiveType::kUint64;
case google::api::expr::v1alpha1::Type::DOUBLE:
return PrimitiveType::kDouble;
case google::api::expr::v1alpha1::Type::STRING:
return PrimitiveType::kString;
case google::api::expr::v1alpha1::Type::BYTES:
return PrimitiveType::kBytes;
default:
return absl::InvalidArgumentError(
"Illegal type specified for "
"google::api::expr::v1alpha1::Type::PrimitiveType.");
}
}
absl::StatusOr<WellKnownType> ToNative(
google::api::expr::v1alpha1::Type::WellKnownType well_known_type) {
switch (well_known_type) {
case google::api::expr::v1alpha1::Type::WELL_KNOWN_TYPE_UNSPECIFIED:
return WellKnownType::kWellKnownTypeUnspecified;
case google::api::expr::v1alpha1::Type::ANY:
return WellKnownType::kAny;
case google::api::expr::v1alpha1::Type::TIMESTAMP:
return WellKnownType::kTimestamp;
case google::api::expr::v1alpha1::Type::DURATION:
return WellKnownType::kDuration;
default:
return absl::InvalidArgumentError(
"Illegal type specified for "
"google::api::expr::v1alpha1::Type::WellKnownType.");
}
}
absl::StatusOr<ListType> ToNative(
const google::api::expr::v1alpha1::Type::ListType& list_type) {
auto native_elem_type = ConvertProtoTypeToNative(list_type.elem_type());
if (!native_elem_type.ok()) {
return native_elem_type.status();
}
return ListType(std::make_unique<Type>(*(std::move(native_elem_type))));
}
absl::StatusOr<MapType> ToNative(
const google::api::expr::v1alpha1::Type::MapType& map_type) {
auto native_key_type = ConvertProtoTypeToNative(map_type.key_type());
if (!native_key_type.ok()) {
return native_key_type.status();
}
auto native_value_type = ConvertProtoTypeToNative(map_type.value_type());
if (!native_value_type.ok()) {
return native_value_type.status();
}
return MapType(std::make_unique<Type>(*(std::move(native_key_type))),
std::make_unique<Type>(*(std::move(native_value_type))));
}
absl::StatusOr<FunctionType> ToNative(
const google::api::expr::v1alpha1::Type::FunctionType& function_type) {
std::vector<Type> arg_types;
arg_types.reserve(function_type.arg_types_size());
for (const auto& arg_type : function_type.arg_types()) {
auto native_arg = ConvertProtoTypeToNative(arg_type);
if (!native_arg.ok()) {
return native_arg.status();
}
arg_types.emplace_back(*(std::move(native_arg)));
}
auto native_result = ConvertProtoTypeToNative(function_type.result_type());
if (!native_result.ok()) {
return native_result.status();
}
return FunctionType(std::make_unique<Type>(*(std::move(native_result))),
std::move(arg_types));
}
absl::StatusOr<AbstractType> ToNative(
const google::api::expr::v1alpha1::Type::AbstractType& abstract_type) {
std::vector<Type> parameter_types;
for (const auto& parameter_type : abstract_type.parameter_types()) {
auto native_parameter_type = ConvertProtoTypeToNative(parameter_type);
if (!native_parameter_type.ok()) {
return native_parameter_type.status();
}
parameter_types.emplace_back(*(std::move(native_parameter_type)));
}
return AbstractType(abstract_type.name(), std::move(parameter_types));
}
absl::StatusOr<Type> ConvertProtoTypeToNative(
const google::api::expr::v1alpha1::Type& type) {
switch (type.type_kind_case()) {
case google::api::expr::v1alpha1::Type::kDyn:
return Type(DynamicType());
case google::api::expr::v1alpha1::Type::kNull:
return Type(nullptr);
case google::api::expr::v1alpha1::Type::kPrimitive: {
auto native_primitive = ToNative(type.primitive());
if (!native_primitive.ok()) {
return native_primitive.status();
}
return Type(*(std::move(native_primitive)));
}
case google::api::expr::v1alpha1::Type::kWrapper: {
auto native_wrapper = ToNative(type.wrapper());
if (!native_wrapper.ok()) {
return native_wrapper.status();
}
return Type(PrimitiveTypeWrapper(*(std::move(native_wrapper))));
}
case google::api::expr::v1alpha1::Type::kWellKnown: {
auto native_well_known = ToNative(type.well_known());
if (!native_well_known.ok()) {
return native_well_known.status();
}
return Type(*std::move(native_well_known));
}
case google::api::expr::v1alpha1::Type::kListType: {
auto native_list_type = ToNative(type.list_type());
if (!native_list_type.ok()) {
return native_list_type.status();
}
return Type(*(std::move(native_list_type)));
}
case google::api::expr::v1alpha1::Type::kMapType: {
auto native_map_type = ToNative(type.map_type());
if (!native_map_type.ok()) {
return native_map_type.status();
}
return Type(*(std::move(native_map_type)));
}
case google::api::expr::v1alpha1::Type::kFunction: {
auto native_function = ToNative(type.function());
if (!native_function.ok()) {
return native_function.status();
}
return Type(*(std::move(native_function)));
}
case google::api::expr::v1alpha1::Type::kMessageType:
return Type(MessageType(type.message_type()));
case google::api::expr::v1alpha1::Type::kTypeParam:
return Type(ParamType(type.type_param()));
case google::api::expr::v1alpha1::Type::kType: {
auto native_type = ConvertProtoTypeToNative(type.type());
if (!native_type.ok()) {
return native_type.status();
}
return Type(std::make_unique<Type>(*std::move(native_type)));
}
case google::api::expr::v1alpha1::Type::kError:
return Type(ErrorType::kErrorTypeValue);
case google::api::expr::v1alpha1::Type::kAbstractType: {
auto native_abstract = ToNative(type.abstract_type());
if (!native_abstract.ok()) {
return native_abstract.status();
}
return Type(*(std::move(native_abstract)));
}
default:
return absl::InvalidArgumentError(
"Illegal type specified for google::api::expr::v1alpha1::Type.");
}
}
absl::StatusOr<Reference> ConvertProtoReferenceToNative(
const google::api::expr::v1alpha1::Reference& reference) {
Reference ret_val;
ret_val.set_name(reference.name());
ret_val.mutable_overload_id().reserve(reference.overload_id_size());
for (const auto& elem : reference.overload_id()) {
ret_val.mutable_overload_id().emplace_back(elem);
}
if (reference.has_value()) {
auto native_value = ConvertConstant(reference.value());
if (!native_value.ok()) {
return native_value.status();
}
ret_val.set_value(*(std::move(native_value)));
}
return ret_val;
}
}
namespace {
using ::cel::ast_internal::AbstractType;
using ::cel::ast_internal::AstImpl;
using ::cel::ast_internal::Bytes;
using ::cel::ast_internal::Call;
using ::cel::ast_internal::Comprehension;
using ::cel::ast_internal::Constant;
using ::cel::ast_internal::CreateList;
using ::cel::ast_internal::CreateStruct;
using ::cel::ast_internal::DynamicType;
using ::cel::ast_internal::ErrorType;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::Extension;
using ::cel::ast_internal::FunctionType;
using ::cel::ast_internal::Ident;
using ::cel::ast_internal::ListType;
using ::cel::ast_internal::MapType;
using ::cel::ast_internal::MessageType;
using ::cel::ast_internal::NullValue;
using ::cel::ast_internal::ParamType;
using ::cel::ast_internal::PrimitiveType;
using ::cel::ast_internal::PrimitiveTypeWrapper;
using ::cel::ast_internal::Reference;
using ::cel::ast_internal::Select;
using ::cel::ast_internal::SourceInfo;
using ::cel::ast_internal::Type;
using ::cel::ast_internal::WellKnownType;
using ExprPb = google::api::expr::v1alpha1::Expr;
using ParsedExprPb = google::api::expr::v1alpha1::ParsedExpr;
using CheckedExprPb = google::api::expr::v1alpha1::CheckedExpr;
using SourceInfoPb = google::api::expr::v1alpha1::SourceInfo;
using ExtensionPb = google::api::expr::v1alpha1::SourceInfo::Extension;
using ReferencePb = google::api::expr::v1alpha1::Reference;
using TypePb = google::api::expr::v1alpha1::Type;
struct ToProtoStackEntry {
absl::Nonnull<const Expr*> source;
absl::Nonnull<ExprPb*> dest;
};
absl::Status ConstantToProto(const ast_internal::Constant& source,
google::api::expr::v1alpha1::Constant& dest) {
return absl::visit(absl::Overload(
[&](absl::monostate) -> absl::Status {
dest.clear_constant_kind();
return absl::OkStatus();
},
[&](NullValue) -> absl::Status {
dest.set_null_value(google::protobuf::NULL_VALUE);
return absl::OkStatus();
},
[&](bool value) {
dest.set_bool_value(value);
return absl::OkStatus();
},
[&](int64_t value) {
dest.set_int64_value(value);
return absl::OkStatus();
},
[&](uint64_t value) {
dest.set_uint64_value(value);
return absl::OkStatus();
},
[&](double value) {
dest.set_double_value(value);
return absl::OkStatus();
},
[&](const StringConstant& value) {
dest.set_string_value(value);
return absl::OkStatus();
},
[&](const BytesConstant& value) {
dest.set_bytes_value(value);
return absl::OkStatus();
},
[&](absl::Time time) {
return cel::internal::EncodeTime(
time, dest.mutable_timestamp_value());
},
[&](absl::Duration duration) {
return cel::internal::EncodeDuration(
duration, dest.mutable_duration_value());
}),
source.constant_kind());
}
absl::StatusOr<ExprPb> ExprToProto(const Expr& expr) {
ExprPb proto_expr;
CEL_RETURN_IF_ERROR(protobuf_internal::ExprToProto(expr, &proto_expr));
return proto_expr;
}
absl::StatusOr<SourceInfoPb> SourceInfoToProto(const SourceInfo& source_info) {
SourceInfoPb result;
result.set_syntax_version(source_info.syntax_version());
result.set_location(source_info.location());
for (int32_t line_offset : source_info.line_offsets()) {
result.add_line_offsets(line_offset);
}
for (auto pos_iter = source_info.positions().begin();
pos_iter != source_info.positions().end(); ++pos_iter) {
(*result.mutable_positions())[pos_iter->first] = pos_iter->second;
}
for (auto macro_iter = source_info.macro_calls().begin();
macro_iter != source_info.macro_calls().end(); ++macro_iter) {
ExprPb& dest_macro = (*result.mutable_macro_calls())[macro_iter->first];
CEL_ASSIGN_OR_RETURN(dest_macro, ExprToProto(macro_iter->second));
}
for (const auto& extension : source_info.extensions()) {
auto* extension_pb = result.add_extensions();
extension_pb->set_id(extension.id());
auto* version_pb = extension_pb->mutable_version();
version_pb->set_major(extension.version().major());
version_pb->set_minor(extension.version().minor());
for (auto component : extension.affected_components()) {
switch (component) {
case Extension::Component::kParser:
extension_pb->add_affected_components(ExtensionPb::COMPONENT_PARSER);
break;
case Extension::Component::kTypeChecker:
extension_pb->add_affected_components(
ExtensionPb::COMPONENT_TYPE_CHECKER);
break;
case Extension::Component::kRuntime:
extension_pb->add_affected_components(ExtensionPb::COMPONENT_RUNTIME);
break;
default:
extension_pb->add_affected_components(
ExtensionPb::COMPONENT_UNSPECIFIED);
break;
}
}
}
return result;
}
absl::StatusOr<ReferencePb> ReferenceToProto(const Reference& reference) {
ReferencePb result;
result.set_name(reference.name());
for (const auto& overload_id : reference.overload_id()) {
result.add_overload_id(overload_id);
}
if (reference.has_value()) {
CEL_RETURN_IF_ERROR(
ConstantToProto(reference.value(), *result.mutable_value()));
}
return result;
}
absl::Status TypeToProto(const Type& type, TypePb* result);
struct TypeKindToProtoVisitor {
absl::Status operator()(PrimitiveType primitive) {
switch (primitive) {
case PrimitiveType::kPrimitiveTypeUnspecified:
result->set_primitive(TypePb::PRIMITIVE_TYPE_UNSPECIFIED);
return absl::OkStatus();
case PrimitiveType::kBool:
result->set_primitive(TypePb::BOOL);
return absl::OkStatus();
case PrimitiveType::kInt64:
result->set_primitive(TypePb::INT64);
return absl::OkStatus();
case PrimitiveType::kUint64:
result->set_primitive(TypePb::UINT64);
return absl::OkStatus();
case PrimitiveType::kDouble:
result->set_primitive(TypePb::DOUBLE);
return absl::OkStatus();
case PrimitiveType::kString:
result->set_primitive(TypePb::STRING);
return absl::OkStatus();
case PrimitiveType::kBytes:
result->set_primitive(TypePb::BYTES);
return absl::OkStatus();
default:
break;
}
return absl::InvalidArgumentError("Unsupported primitive type");
}
absl::Status operator()(PrimitiveTypeWrapper wrapper) {
CEL_RETURN_IF_ERROR(this->operator()(wrapper.type()));
auto wrapped = result->primitive();
result->set_wrapper(wrapped);
return absl::OkStatus();
}
absl::Status operator()(DynamicType) {
result->mutable_dyn();
return absl::OkStatus();
}
absl::Status operator()(ErrorType) {
result->mutable_error();
return absl::OkStatus();
}
absl::Status operator()(NullValue) {
result->set_null(google::protobuf::NULL_VALUE);
return absl::OkStatus();
}
absl::Status operator()(const ListType& list_type) {
return TypeToProto(list_type.elem_type(),
result->mutable_list_type()->mutable_elem_type());
}
absl::Status operator()(const MapType& map_type) {
CEL_RETURN_IF_ERROR(TypeToProto(
map_type.key_type(), result->mutable_map_type()->mutable_key_type()));
return TypeToProto(map_type.value_type(),
result->mutable_map_type()->mutable_value_type());
}
absl::Status operator()(const MessageType& message_type) {
result->set_message_type(message_type.type());
return absl::OkStatus();
}
absl::Status operator()(const WellKnownType& well_known_type) {
switch (well_known_type) {
case WellKnownType::kWellKnownTypeUnspecified:
result->set_well_known(TypePb::WELL_KNOWN_TYPE_UNSPECIFIED);
return absl::OkStatus();
case WellKnownType::kAny:
result->set_well_known(TypePb::ANY);
return absl::OkStatus();
case WellKnownType::kDuration:
result->set_well_known(TypePb::DURATION);
return absl::OkStatus();
case WellKnownType::kTimestamp:
result->set_well_known(TypePb::TIMESTAMP);
return absl::OkStatus();
default:
break;
}
return absl::InvalidArgumentError("Unsupported well-known type");
}
absl::Status operator()(const FunctionType& function_type) {
CEL_RETURN_IF_ERROR(
TypeToProto(function_type.result_type(),
result->mutable_function()->mutable_result_type()));
for (const Type& arg_type : function_type.arg_types()) {
CEL_RETURN_IF_ERROR(
TypeToProto(arg_type, result->mutable_function()->add_arg_types()));
}
return absl::OkStatus();
}
absl::Status operator()(const AbstractType& type) {
auto* abstract_type_pb = result->mutable_abstract_type();
abstract_type_pb->set_name(type.name());
for (const Type& type_param : type.parameter_types()) {
CEL_RETURN_IF_ERROR(
TypeToProto(type_param, abstract_type_pb->add_parameter_types()));
}
return absl::OkStatus();
}
absl::Status operator()(const std::unique_ptr<Type>& type_type) {
return TypeToProto(*type_type, result->mutable_type());
}
absl::Status operator()(const ParamType& param_type) {
result->set_type_param(param_type.type());
return absl::OkStatus();
}
TypePb* result;
};
absl::Status TypeToProto(const Type& type, TypePb* result) {
return absl::visit(TypeKindToProtoVisitor{result}, type.type_kind());
}
}
absl::StatusOr<std::unique_ptr<Ast>> CreateAstFromParsedExpr(
const google::api::expr::v1alpha1::Expr& expr,
const google::api::expr::v1alpha1::SourceInfo* source_info) {
CEL_ASSIGN_OR_RETURN(auto runtime_expr,
internal::ConvertProtoExprToNative(expr));
cel::ast_internal::SourceInfo runtime_source_info;
if (source_info != nullptr) {
CEL_ASSIGN_OR_RETURN(
runtime_source_info,
internal::ConvertProtoSourceInfoToNative(*source_info));
}
return std::make_unique<cel::ast_internal::AstImpl>(
std::move(runtime_expr), std::move(runtime_source_info));
}
absl::StatusOr<std::unique_ptr<Ast>> CreateAstFromParsedExpr(
const ParsedExprPb& parsed_expr) {
return CreateAstFromParsedExpr(parsed_expr.expr(),
&parsed_expr.source_info());
}
absl::StatusOr<ParsedExprPb> CreateParsedExprFromAst(const Ast& ast) {
const auto& ast_impl = ast_internal::AstImpl::CastFromPublicAst(ast);
ParsedExprPb parsed_expr;
CEL_ASSIGN_OR_RETURN(*parsed_expr.mutable_expr(),
ExprToProto(ast_impl.root_expr()));
CEL_ASSIGN_OR_RETURN(*parsed_expr.mutable_source_info(),
SourceInfoToProto(ast_impl.source_info()));
return parsed_expr;
}
absl::StatusOr<std::unique_ptr<Ast>> CreateAstFromCheckedExpr(
const CheckedExprPb& checked_expr) {
CEL_ASSIGN_OR_RETURN(Expr expr,
internal::ConvertProtoExprToNative(checked_expr.expr()));
CEL_ASSIGN_OR_RETURN(
SourceInfo source_info,
internal::ConvertProtoSourceInfoToNative(checked_expr.source_info()));
AstImpl::ReferenceMap reference_map;
for (const auto& pair : checked_expr.reference_map()) {
auto native_reference =
internal::ConvertProtoReferenceToNative(pair.second);
if (!native_reference.ok()) {
return native_reference.status();
}
reference_map.emplace(pair.first, *(std::move(native_reference)));
}
AstImpl::TypeMap type_map;
for (const auto& pair : checked_expr.type_map()) {
auto native_type = internal::ConvertProtoTypeToNative(pair.second);
if (!native_type.ok()) {
return native_type.status();
}
type_map.emplace(pair.first, *(std::move(native_type)));
}
return std::make_unique<AstImpl>(
std::move(expr), std::move(source_info), std::move(reference_map),
std::move(type_map), checked_expr.expr_version());
}
absl::StatusOr<google::api::expr::v1alpha1::CheckedExpr> CreateCheckedExprFromAst(
const Ast& ast) {
if (!ast.IsChecked()) {
return absl::InvalidArgumentError("AST is not type-checked");
}
const auto& ast_impl = ast_internal::AstImpl::CastFromPublicAst(ast);
CheckedExprPb checked_expr;
checked_expr.set_expr_version(ast_impl.expr_version());
CEL_ASSIGN_OR_RETURN(*checked_expr.mutable_expr(),
ExprToProto(ast_impl.root_expr()));
CEL_ASSIGN_OR_RETURN(*checked_expr.mutable_source_info(),
SourceInfoToProto(ast_impl.source_info()));
for (auto it = ast_impl.reference_map().begin();
it != ast_impl.reference_map().end(); ++it) {
ReferencePb& dest_reference =
(*checked_expr.mutable_reference_map())[it->first];
CEL_ASSIGN_OR_RETURN(dest_reference, ReferenceToProto(it->second));
}
for (auto it = ast_impl.type_map().begin(); it != ast_impl.type_map().end();
++it) {
TypePb& dest_type = (*checked_expr.mutable_type_map())[it->first];
CEL_RETURN_IF_ERROR(TypeToProto(it->second, &dest_type));
}
return checked_expr;
}
} | #include "extensions/protobuf/ast_converters.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "base/ast_internal/ast_impl.h"
#include "base/ast_internal/expr.h"
#include "internal/proto_matchers.h"
#include "internal/testing.h"
#include "parser/options.h"
#include "parser/parser.h"
#include "google/protobuf/text_format.h"
namespace cel::extensions {
namespace internal {
namespace {
using ::absl_testing::StatusIs;
using ::cel::ast_internal::NullValue;
using ::cel::ast_internal::PrimitiveType;
using ::cel::ast_internal::WellKnownType;
TEST(AstConvertersTest, SourceInfoToNative) {
google::api::expr::v1alpha1::SourceInfo source_info;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
syntax_version: "version"
location: "location"
line_offsets: 1
line_offsets: 2
positions { key: 1 value: 2 }
positions { key: 3 value: 4 }
macro_calls {
key: 1
value { ident_expr { name: "name" } }
}
)pb",
&source_info));
auto native_source_info = ConvertProtoSourceInfoToNative(source_info);
EXPECT_EQ(native_source_info->syntax_version(), "version");
EXPECT_EQ(native_source_info->location(), "location");
EXPECT_EQ(native_source_info->line_offsets(), std::vector<int32_t>({1, 2}));
EXPECT_EQ(native_source_info->positions().at(1), 2);
EXPECT_EQ(native_source_info->positions().at(3), 4);
ASSERT_TRUE(native_source_info->macro_calls().at(1).has_ident_expr());
ASSERT_EQ(native_source_info->macro_calls().at(1).ident_expr().name(),
"name");
}
TEST(AstConvertersTest, PrimitiveTypeUnspecifiedToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::PRIMITIVE_TYPE_UNSPECIFIED);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kPrimitiveTypeUnspecified);
}
TEST(AstConvertersTest, PrimitiveTypeBoolToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::BOOL);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kBool);
}
TEST(AstConvertersTest, PrimitiveTypeInt64ToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::INT64);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kInt64);
}
TEST(AstConvertersTest, PrimitiveTypeUint64ToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::UINT64);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kUint64);
}
TEST(AstConvertersTest, PrimitiveTypeDoubleToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::DOUBLE);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kDouble);
}
TEST(AstConvertersTest, PrimitiveTypeStringToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::STRING);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kString);
}
TEST(AstConvertersTest, PrimitiveTypeBytesToNative) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(google::api::expr::v1alpha1::Type::BYTES);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_primitive());
EXPECT_EQ(native_type->primitive(), PrimitiveType::kBytes);
}
TEST(AstConvertersTest, PrimitiveTypeError) {
google::api::expr::v1alpha1::Type type;
type.set_primitive(::google::api::expr::v1alpha1::Type_PrimitiveType(7));
auto native_type = ConvertProtoTypeToNative(type);
EXPECT_EQ(native_type.status().code(), absl::StatusCode::kInvalidArgument);
EXPECT_THAT(native_type.status().message(),
::testing::HasSubstr("Illegal type specified for "
"google::api::expr::v1alpha1::Type::PrimitiveType."));
}
TEST(AstConvertersTest, WellKnownTypeUnspecifiedToNative) {
google::api::expr::v1alpha1::Type type;
type.set_well_known(google::api::expr::v1alpha1::Type::WELL_KNOWN_TYPE_UNSPECIFIED);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_well_known());
EXPECT_EQ(native_type->well_known(),
WellKnownType::kWellKnownTypeUnspecified);
}
TEST(AstConvertersTest, WellKnownTypeAnyToNative) {
google::api::expr::v1alpha1::Type type;
type.set_well_known(google::api::expr::v1alpha1::Type::ANY);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_well_known());
EXPECT_EQ(native_type->well_known(), WellKnownType::kAny);
}
TEST(AstConvertersTest, WellKnownTypeTimestampToNative) {
google::api::expr::v1alpha1::Type type;
type.set_well_known(google::api::expr::v1alpha1::Type::TIMESTAMP);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_well_known());
EXPECT_EQ(native_type->well_known(), WellKnownType::kTimestamp);
}
TEST(AstConvertersTest, WellKnownTypeDuraionToNative) {
google::api::expr::v1alpha1::Type type;
type.set_well_known(google::api::expr::v1alpha1::Type::DURATION);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_well_known());
EXPECT_EQ(native_type->well_known(), WellKnownType::kDuration);
}
TEST(AstConvertersTest, WellKnownTypeError) {
google::api::expr::v1alpha1::Type type;
type.set_well_known(::google::api::expr::v1alpha1::Type_WellKnownType(4));
auto native_type = ConvertProtoTypeToNative(type);
EXPECT_EQ(native_type.status().code(), absl::StatusCode::kInvalidArgument);
EXPECT_THAT(native_type.status().message(),
::testing::HasSubstr("Illegal type specified for "
"google::api::expr::v1alpha1::Type::WellKnownType."));
}
TEST(AstConvertersTest, ListTypeToNative) {
google::api::expr::v1alpha1::Type type;
type.mutable_list_type()->mutable_elem_type()->set_primitive(
google::api::expr::v1alpha1::Type::BOOL);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_list_type());
auto& native_list_type = native_type->list_type();
ASSERT_TRUE(native_list_type.elem_type().has_primitive());
EXPECT_EQ(native_list_type.elem_type().primitive(), PrimitiveType::kBool);
}
TEST(AstConvertersTest, MapTypeToNative) {
google::api::expr::v1alpha1::Type type;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
map_type {
key_type { primitive: BOOL }
value_type { primitive: DOUBLE }
}
)pb",
&type));
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_map_type());
auto& native_map_type = native_type->map_type();
ASSERT_TRUE(native_map_type.key_type().has_primitive());
EXPECT_EQ(native_map_type.key_type().primitive(), PrimitiveType::kBool);
ASSERT_TRUE(native_map_type.value_type().has_primitive());
EXPECT_EQ(native_map_type.value_type().primitive(), PrimitiveType::kDouble);
}
TEST(AstConvertersTest, FunctionTypeToNative) {
google::api::expr::v1alpha1::Type type;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
function {
result_type { primitive: BOOL }
arg_types { primitive: DOUBLE }
arg_types { primitive: STRING }
}
)pb",
&type));
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_function());
auto& native_function_type = native_type->function();
ASSERT_TRUE(native_function_type.result_type().has_primitive());
EXPECT_EQ(native_function_type.result_type().primitive(),
PrimitiveType::kBool);
ASSERT_TRUE(native_function_type.arg_types().at(0).has_primitive());
EXPECT_EQ(native_function_type.arg_types().at(0).primitive(),
PrimitiveType::kDouble);
ASSERT_TRUE(native_function_type.arg_types().at(1).has_primitive());
EXPECT_EQ(native_function_type.arg_types().at(1).primitive(),
PrimitiveType::kString);
}
TEST(AstConvertersTest, AbstractTypeToNative) {
google::api::expr::v1alpha1::Type type;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
abstract_type {
name: "name"
parameter_types { primitive: DOUBLE }
parameter_types { primitive: STRING }
}
)pb",
&type));
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_abstract_type());
auto& native_abstract_type = native_type->abstract_type();
EXPECT_EQ(native_abstract_type.name(), "name");
ASSERT_TRUE(native_abstract_type.parameter_types().at(0).has_primitive());
EXPECT_EQ(native_abstract_type.parameter_types().at(0).primitive(),
PrimitiveType::kDouble);
ASSERT_TRUE(native_abstract_type.parameter_types().at(1).has_primitive());
EXPECT_EQ(native_abstract_type.parameter_types().at(1).primitive(),
PrimitiveType::kString);
}
TEST(AstConvertersTest, DynamicTypeToNative) {
google::api::expr::v1alpha1::Type type;
type.mutable_dyn();
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_dyn());
}
TEST(AstConvertersTest, NullTypeToNative) {
google::api::expr::v1alpha1::Type type;
type.set_null(google::protobuf::NULL_VALUE);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_null());
EXPECT_EQ(native_type->null(), nullptr);
}
TEST(AstConvertersTest, PrimitiveTypeWrapperToNative) {
google::api::expr::v1alpha1::Type type;
type.set_wrapper(google::api::expr::v1alpha1::Type::BOOL);
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_wrapper());
EXPECT_EQ(native_type->wrapper(), PrimitiveType::kBool);
}
TEST(AstConvertersTest, MessageTypeToNative) {
google::api::expr::v1alpha1::Type type;
type.set_message_type("message");
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_message_type());
EXPECT_EQ(native_type->message_type().type(), "message");
}
TEST(AstConvertersTest, ParamTypeToNative) {
google::api::expr::v1alpha1::Type type;
type.set_type_param("param");
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_type_param());
EXPECT_EQ(native_type->type_param().type(), "param");
}
TEST(AstConvertersTest, NestedTypeToNative) {
google::api::expr::v1alpha1::Type type;
type.mutable_type()->mutable_dyn();
auto native_type = ConvertProtoTypeToNative(type);
ASSERT_TRUE(native_type->has_type());
EXPECT_TRUE(native_type->type().has_dyn());
}
TEST(AstConvertersTest, TypeError) {
auto native_type = ConvertProtoTypeToNative(google::api::expr::v1alpha1::Type());
EXPECT_EQ(native_type.status().code(), absl::StatusCode::kInvalidArgument);
EXPECT_THAT(native_type.status().message(),
::testing::HasSubstr(
"Illegal type specified for google::api::expr::v1alpha1::Type."));
}
TEST(AstConvertersTest, ReferenceToNative) {
google::api::expr::v1alpha1::Reference reference;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
name: "name"
overload_id: "id1"
overload_id: "id2"
value { bool_value: true }
)pb",
&reference));
auto native_reference = ConvertProtoReferenceToNative(reference);
EXPECT_EQ(native_reference->name(), "name");
EXPECT_EQ(native_reference->overload_id(),
std::vector<std::string>({"id1", "id2"}));
EXPECT_TRUE(native_reference->value().bool_value());
}
}
}
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::internal::test::EqualsProto;
using ::google::api::expr::parser::Parse;
using ::testing::HasSubstr;
using ParsedExprPb = google::api::expr::v1alpha1::ParsedExpr;
using CheckedExprPb = google::api::expr::v1alpha1::CheckedExpr;
using TypePb = google::api::expr::v1alpha1::Type;
TEST(AstConvertersTest, CheckedExprToAst) {
CheckedExprPb checked_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
reference_map {
key: 1
value {
name: "name"
overload_id: "id1"
overload_id: "id2"
value { bool_value: true }
}
}
type_map {
key: 1
value { dyn {} }
}
source_info {
syntax_version: "version"
location: "location"
line_offsets: 1
line_offsets: 2
positions { key: 1 value: 2 }
positions { key: 3 value: 4 }
macro_calls {
key: 1
value { ident_expr { name: "name" } }
}
}
expr_version: "version"
expr { ident_expr { name: "expr" } }
)pb",
&checked_expr));
ASSERT_OK_AND_ASSIGN(auto ast, CreateAstFromCheckedExpr(checked_expr));
ASSERT_TRUE(ast->IsChecked());
}
TEST(AstConvertersTest, AstToCheckedExprBasic) {
ast_internal::AstImpl ast;
ast.root_expr().set_id(1);
ast.root_expr().mutable_ident_expr().set_name("expr");
ast.source_info().set_syntax_version("version");
ast.source_info().set_location("location");
ast.source_info().mutable_line_offsets().push_back(1);
ast.source_info().mutable_line_offsets().push_back(2);
ast.source_info().mutable_positions().insert({1, 2});
ast.source_info().mutable_positions().insert({3, 4});
ast_internal::Expr macro;
macro.mutable_ident_expr().set_name("name");
ast.source_info().mutable_macro_calls().insert({1, std::move(macro)});
ast_internal::AstImpl::TypeMap type_map;
ast_internal::AstImpl::ReferenceMap reference_map;
ast_internal::Reference reference;
reference.set_name("name");
reference.mutable_overload_id().push_back("id1");
reference.mutable_overload_id().push_back("id2");
reference.mutable_value().set_bool_value(true);
ast_internal::Type type;
type.set_type_kind(ast_internal::DynamicType());
ast.reference_map().insert({1, std::move(reference)});
ast.type_map().insert({1, std::move(type)});
ast.set_expr_version("version");
ast.set_is_checked(true);
ASSERT_OK_AND_ASSIGN(auto checked_pb, CreateCheckedExprFromAst(ast));
EXPECT_THAT(checked_pb, EqualsProto(R"pb(
reference_map {
key: 1
value {
name: "name"
overload_id: "id1"
overload_id: "id2"
value { bool_value: true }
}
}
type_map {
key: 1
value { dyn {} }
}
source_info {
syntax_version: "version"
location: "location"
line_offsets: 1
line_offsets: 2
positions { key: 1 value: 2 }
positions { key: 3 value: 4 }
macro_calls {
key: 1
value { ident_expr { name: "name" } }
}
}
expr_version: "version"
expr {
id: 1
ident_expr { name: "expr" }
}
)pb"));
}
constexpr absl::string_view kTypesTestCheckedExpr =
R"pb(reference_map: {
key: 1
value: { name: "x" }
}
type_map: {
key: 1
value: { primitive: INT64 }
}
source_info: {
location: "<input>"
line_offsets: 2
positions: { key: 1 value: 0 }
}
expr: {
id: 1
ident_expr: { name: "x" }
})pb";
struct CheckedExprToAstTypesTestCase {
absl::string_view type;
};
class CheckedExprToAstTypesTest
: public testing::TestWithParam<CheckedExprToAstTypesTestCase> {
public:
void SetUp() override {
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(kTypesTestCheckedExpr,
&checked_expr_));
}
protected:
CheckedExprPb checked_expr_;
};
TEST_P(CheckedExprToAstTypesTest, CheckedExprToAstTypes) {
TypePb test_type;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(GetParam().type, &test_type));
(*checked_expr_.mutable_type_map())[1] = test_type;
ASSERT_OK_AND_ASSIGN(auto ast, CreateAstFromCheckedExpr(checked_expr_));
EXPECT_THAT(CreateCheckedExprFromAst(*ast),
IsOkAndHolds(EqualsProto(checked_expr_)));
}
INSTANTIATE_TEST_SUITE_P(
Types, CheckedExprToAstTypesTest,
testing::ValuesIn<CheckedExprToAstTypesTestCase>({
{R"pb(list_type { elem_type { primitive: INT64 } })pb"},
{R"pb(map_type {
key_type { primitive: STRING }
value_type { primitive: INT64 }
})pb"},
{R"pb(message_type: "com.example.TestType")pb"},
{R"pb(primitive: BOOL)pb"},
{R"pb(primitive: INT64)pb"},
{R"pb(primitive: UINT64)pb"},
{R"pb(primitive: DOUBLE)pb"},
{R"pb(primitive: STRING)pb"},
{R"pb(primitive: BYTES)pb"},
{R"pb(wrapper: BOOL)pb"},
{R"pb(wrapper: INT64)pb"},
{R"pb(wrapper: UINT64)pb"},
{R"pb(wrapper: DOUBLE)pb"},
{R"pb(wrapper: STRING)pb"},
{R"pb(wrapper: BYTES)pb"},
{R"pb(well_known: TIMESTAMP)pb"},
{R"pb(well_known: DURATION)pb"},
{R"pb(well_known: ANY)pb"},
{R"pb(dyn {})pb"},
{R"pb(error {})pb"},
{R"pb(null: NULL_VALUE)pb"},
{R"pb(
abstract_type {
name: "MyType"
parameter_types { primitive: INT64 }
}
)pb"},
{R"pb(
type { primitive: INT64 }
)pb"},
{R"pb(type_param: "T")pb"},
{R"pb(
function {
result_type { primitive: INT64 }
arg_types { primitive: INT64 }
}
)pb"},
}));
TEST(AstConvertersTest, ParsedExprToAst) {
ParsedExprPb parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
source_info {
syntax_version: "version"
location: "location"
line_offsets: 1
line_offsets: 2
positions { key: 1 value: 2 }
positions { key: 3 value: 4 }
macro_calls {
key: 1
value { ident_expr { name: "name" } }
}
}
expr { ident_expr { name: "expr" } }
)pb",
&parsed_expr));
ASSERT_OK_AND_ASSIGN(auto ast,
cel::extensions::CreateAstFromParsedExpr(parsed_expr));
}
TEST(AstConvertersTest, AstToParsedExprBasic) {
ast_internal::Expr expr;
expr.set_id(1);
expr.mutable_ident_expr().set_name("expr");
ast_internal::SourceInfo source_info;
source_info.set_syntax_version("version");
source_info.set_location("location");
source_info.mutable_line_offsets().push_back(1);
source_info.mutable_line_offsets().push_back(2);
source_info.mutable_positions().insert({1, 2});
source_info.mutable_positions().insert({3, 4});
ast_internal::Expr macro;
macro.mutable_ident_expr().set_name("name");
source_info.mutable_macro_calls().insert({1, std::move(macro)});
ast_internal::AstImpl ast(std::move(expr), std::move(source_info));
ASSERT_OK_AND_ASSIGN(auto checked_pb, CreateParsedExprFromAst(ast));
EXPECT_THAT(checked_pb, EqualsProto(R"pb(
source_info {
syntax_version: "version"
location: "location"
line_offsets: 1
line_offsets: 2
positions { key: 1 value: 2 }
positions { key: 3 value: 4 }
macro_calls {
key: 1
value { ident_expr { name: "name" } }
}
}
expr {
id: 1
ident_expr { name: "expr" }
}
)pb"));
}
TEST(AstConvertersTest, ExprToAst) {
google::api::expr::v1alpha1::Expr expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
ident_expr { name: "expr" }
)pb",
&expr));
ASSERT_OK_AND_ASSIGN(auto ast,
cel::extensions::CreateAstFromParsedExpr(expr));
}
TEST(AstConvertersTest, ExprAndSourceInfoToAst) {
google::api::expr::v1alpha1::Expr expr;
google::api::expr::v1alpha1::SourceInfo source_info;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
syntax_version: "version"
location: "location"
line_offsets: 1
line_offsets: 2
positions { key: 1 value: 2 }
positions { key: 3 value: 4 }
macro_calls {
key: 1
value { ident_expr { name: "name" } }
}
)pb",
&source_info));
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
ident_expr { name: "expr" }
)pb",
&expr));
ASSERT_OK_AND_ASSIGN(
auto ast, cel::extensions::CreateAstFromParsedExpr(expr, &source_info));
}
TEST(AstConvertersTest, EmptyNodeRoundTrip) {
ParsedExprPb parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr {
id: 1
select_expr {
operand {
id: 2
# no kind set.
}
field: "field"
}
}
source_info {}
)pb",
&parsed_expr));
ASSERT_OK_AND_ASSIGN(auto ast, CreateAstFromParsedExpr(parsed_expr));
ASSERT_OK_AND_ASSIGN(ParsedExprPb copy, CreateParsedExprFromAst(*ast));
EXPECT_THAT(copy, EqualsProto(parsed_expr));
}
TEST(AstConvertersTest, DurationConstantRoundTrip) {
ParsedExprPb parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr {
id: 1
const_expr {
# deprecated, but support existing ASTs.
duration_value { seconds: 10 }
}
}
source_info {}
)pb",
&parsed_expr));
ASSERT_OK_AND_ASSIGN(auto ast, CreateAstFromParsedExpr(parsed_expr));
ASSERT_OK_AND_ASSIGN(ParsedExprPb copy, CreateParsedExprFromAst(*ast));
EXPECT_THAT(copy, EqualsProto(parsed_expr));
}
TEST(AstConvertersTest, TimestampConstantRoundTrip) {
ParsedExprPb parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr {
id: 1
const_expr {
# deprecated, but support existing ASTs.
timestamp_value { seconds: 10 }
}
}
source_info {}
)pb",
&parsed_expr));
ASSERT_OK_AND_ASSIGN(auto ast, CreateAstFromParsedExpr(parsed_expr));
ASSERT_OK_AND_ASSIGN(ParsedExprPb copy, CreateParsedExprFromAst(*ast));
EXPECT_THAT(copy, EqualsProto(parsed_expr));
}
struct ConversionRoundTripCase {
absl::string_view expr;
};
class ConversionRoundTripTest
: public testing::TestWithParam<ConversionRoundTripCase> {
public:
ConversionRoundTripTest() {
options_.add_macro_calls = true;
options_.enable_optional_syntax = true;
}
protected:
ParserOptions options_;
};
TEST_P(ConversionRoundTripTest, ParsedExprCopyable) {
ASSERT_OK_AND_ASSIGN(ParsedExprPb parsed_expr,
Parse(GetParam().expr, "<input>", options_));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> ast,
CreateAstFromParsedExpr(parsed_expr));
const auto& impl = ast_internal::AstImpl::CastFromPublicAst(*ast);
EXPECT_THAT(CreateCheckedExprFromAst(impl),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("AST is not type-checked")));
EXPECT_THAT(CreateParsedExprFromAst(impl),
IsOkAndHolds(EqualsProto(parsed_expr)));
}
TEST_P(ConversionRoundTripTest, CheckedExprCopyable) {
ASSERT_OK_AND_ASSIGN(ParsedExprPb parsed_expr,
Parse(GetParam().expr, "<input>", options_));
CheckedExprPb checked_expr;
*checked_expr.mutable_expr() = parsed_expr.expr();
*checked_expr.mutable_source_info() = parsed_expr.source_info();
int64_t root_id = checked_expr.expr().id();
(*checked_expr.mutable_reference_map())[root_id].add_overload_id("_==_");
(*checked_expr.mutable_type_map())[root_id].set_primitive(TypePb::BOOL);
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> ast,
CreateAstFromCheckedExpr(checked_expr));
const auto& impl = ast_internal::AstImpl::CastFromPublicAst(*ast);
EXPECT_THAT(CreateCheckedExprFromAst(impl),
IsOkAndHolds(EqualsProto(checked_expr)));
}
INSTANTIATE_TEST_SUITE_P(
ExpressionCases, ConversionRoundTripTest,
testing::ValuesIn<ConversionRoundTripCase>(
{{R"cel(null == null)cel"},
{R"cel(1 == 2)cel"},
{R"cel(1u == 2u)cel"},
{R"cel(1.1 == 2.1)cel"},
{R"cel(b"1" == b"2")cel"},
{R"cel("42" == "42")cel"},
{R"cel("s".startsWith("s") == true)cel"},
{R"cel([1, 2, 3] == [1, 2, 3])cel"},
{R"cel(TestAllTypes{single_int64: 42}.single_int64 == 42)cel"},
{R"cel([1, 2, 3].map(x, x + 2).size() == 3)cel"},
{R"cel({"a": 1, "b": 2}["a"] == 1)cel"},
{R"cel(ident == 42)cel"},
{R"cel(ident.field == 42)cel"},
{R"cel({?"abc": {}[?1]}.?abc.orValue(42) == 42)cel"},
{R"cel([1, 2, ?optional.none()].size() == 2)cel"}}));
TEST(ExtensionConversionRoundTripTest, RoundTrip) {
ParsedExprPb parsed_expr;
ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(
R"pb(
expr {
id: 1
ident_expr { name: "unused" }
}
source_info {
extensions {
id: "extension"
version { major: 1 minor: 2 }
affected_components: COMPONENT_UNSPECIFIED
affected_components: COMPONENT_PARSER
affected_components: COMPONENT_TYPE_CHECKER
affected_components: COMPONENT_RUNTIME
}
}
)pb",
&parsed_expr));
ASSERT_OK_AND_ASSIGN(std::unique_ptr<Ast> ast,
CreateAstFromParsedExpr(parsed_expr));
const auto& impl = ast_internal::AstImpl::CastFromPublicAst(*ast);
EXPECT_THAT(CreateCheckedExprFromAst(impl),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("AST is not type-checked")));
EXPECT_THAT(CreateParsedExprFromAst(impl),
IsOkAndHolds(EqualsProto(parsed_expr)));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/ast_converters.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/ast_converters_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
23f03aa2-bf49-43f8-b085-00c345b87bac | cpp | google/quiche | quiche_data_writer | quiche/common/quiche_data_writer.cc | quiche/common/quiche_data_writer_test.cc | #include "quiche/common/quiche_data_writer.h"
#include <algorithm>
#include <limits>
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/quiche_endian.h"
namespace quiche {
QuicheDataWriter::QuicheDataWriter(size_t size, char* buffer)
: QuicheDataWriter(size, buffer, quiche::NETWORK_BYTE_ORDER) {}
QuicheDataWriter::QuicheDataWriter(size_t size, char* buffer,
quiche::Endianness endianness)
: buffer_(buffer), capacity_(size), length_(0), endianness_(endianness) {}
QuicheDataWriter::~QuicheDataWriter() {}
char* QuicheDataWriter::data() { return buffer_; }
bool QuicheDataWriter::WriteUInt8(uint8_t value) {
return WriteBytes(&value, sizeof(value));
}
bool QuicheDataWriter::WriteUInt16(uint16_t value) {
if (endianness_ == quiche::NETWORK_BYTE_ORDER) {
value = quiche::QuicheEndian::HostToNet16(value);
}
return WriteBytes(&value, sizeof(value));
}
bool QuicheDataWriter::WriteUInt32(uint32_t value) {
if (endianness_ == quiche::NETWORK_BYTE_ORDER) {
value = quiche::QuicheEndian::HostToNet32(value);
}
return WriteBytes(&value, sizeof(value));
}
bool QuicheDataWriter::WriteUInt64(uint64_t value) {
if (endianness_ == quiche::NETWORK_BYTE_ORDER) {
value = quiche::QuicheEndian::HostToNet64(value);
}
return WriteBytes(&value, sizeof(value));
}
bool QuicheDataWriter::WriteBytesToUInt64(size_t num_bytes, uint64_t value) {
if (num_bytes > sizeof(value)) {
return false;
}
if (endianness_ == quiche::HOST_BYTE_ORDER) {
return WriteBytes(&value, num_bytes);
}
value = quiche::QuicheEndian::HostToNet64(value);
return WriteBytes(reinterpret_cast<char*>(&value) + sizeof(value) - num_bytes,
num_bytes);
}
bool QuicheDataWriter::WriteStringPiece16(absl::string_view val) {
if (val.size() > std::numeric_limits<uint16_t>::max()) {
return false;
}
if (!WriteUInt16(static_cast<uint16_t>(val.size()))) {
return false;
}
return WriteBytes(val.data(), val.size());
}
bool QuicheDataWriter::WriteStringPiece(absl::string_view val) {
return WriteBytes(val.data(), val.size());
}
char* QuicheDataWriter::BeginWrite(size_t length) {
if (length_ > capacity_) {
return nullptr;
}
if (capacity_ - length_ < length) {
return nullptr;
}
#ifdef ARCH_CPU_64_BITS
QUICHE_DCHECK_LE(length, std::numeric_limits<uint32_t>::max());
#endif
return buffer_ + length_;
}
bool QuicheDataWriter::WriteBytes(const void* data, size_t data_len) {
char* dest = BeginWrite(data_len);
if (!dest) {
return false;
}
std::copy(static_cast<const char*>(data),
static_cast<const char*>(data) + data_len, dest);
length_ += data_len;
return true;
}
bool QuicheDataWriter::WriteRepeatedByte(uint8_t byte, size_t count) {
char* dest = BeginWrite(count);
if (!dest) {
return false;
}
std::fill(dest, dest + count, byte);
length_ += count;
return true;
}
void QuicheDataWriter::WritePadding() {
QUICHE_DCHECK_LE(length_, capacity_);
if (length_ > capacity_) {
return;
}
std::fill(buffer_ + length_, buffer_ + capacity_, 0x00);
length_ = capacity_;
}
bool QuicheDataWriter::WritePaddingBytes(size_t count) {
return WriteRepeatedByte(0x00, count);
}
bool QuicheDataWriter::WriteTag(uint32_t tag) {
return WriteBytes(&tag, sizeof(tag));
}
bool QuicheDataWriter::WriteVarInt62(uint64_t value) {
QUICHE_DCHECK_EQ(endianness(), quiche::NETWORK_BYTE_ORDER);
size_t remaining_bytes = remaining();
char* next = buffer() + length();
if ((value & kVarInt62ErrorMask) == 0) {
if ((value & kVarInt62Mask8Bytes) != 0) {
if (remaining_bytes >= 8) {
*(next + 0) = ((value >> 56) & 0x3f) + 0xc0;
*(next + 1) = (value >> 48) & 0xff;
*(next + 2) = (value >> 40) & 0xff;
*(next + 3) = (value >> 32) & 0xff;
*(next + 4) = (value >> 24) & 0xff;
*(next + 5) = (value >> 16) & 0xff;
*(next + 6) = (value >> 8) & 0xff;
*(next + 7) = value & 0xff;
IncreaseLength(8);
return true;
}
return false;
}
if ((value & kVarInt62Mask4Bytes) != 0) {
if (remaining_bytes >= 4) {
*(next + 0) = ((value >> 24) & 0x3f) + 0x80;
*(next + 1) = (value >> 16) & 0xff;
*(next + 2) = (value >> 8) & 0xff;
*(next + 3) = value & 0xff;
IncreaseLength(4);
return true;
}
return false;
}
if ((value & kVarInt62Mask2Bytes) != 0) {
if (remaining_bytes >= 2) {
*(next + 0) = ((value >> 8) & 0x3f) + 0x40;
*(next + 1) = (value)&0xff;
IncreaseLength(2);
return true;
}
return false;
}
if (remaining_bytes >= 1) {
*next = (value & 0x3f);
IncreaseLength(1);
return true;
}
return false;
}
return false;
}
bool QuicheDataWriter::WriteStringPieceVarInt62(
const absl::string_view& string_piece) {
if (!WriteVarInt62(string_piece.size())) {
return false;
}
if (!string_piece.empty()) {
if (!WriteBytes(string_piece.data(), string_piece.size())) {
return false;
}
}
return true;
}
QuicheVariableLengthIntegerLength QuicheDataWriter::GetVarInt62Len(
uint64_t value) {
if ((value & kVarInt62ErrorMask) != 0) {
QUICHE_BUG(invalid_varint) << "Attempted to encode a value, " << value
<< ", that is too big for VarInt62";
return VARIABLE_LENGTH_INTEGER_LENGTH_0;
}
if ((value & kVarInt62Mask8Bytes) != 0) {
return VARIABLE_LENGTH_INTEGER_LENGTH_8;
}
if ((value & kVarInt62Mask4Bytes) != 0) {
return VARIABLE_LENGTH_INTEGER_LENGTH_4;
}
if ((value & kVarInt62Mask2Bytes) != 0) {
return VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
return VARIABLE_LENGTH_INTEGER_LENGTH_1;
}
bool QuicheDataWriter::WriteVarInt62WithForcedLength(
uint64_t value, QuicheVariableLengthIntegerLength write_length) {
QUICHE_DCHECK_EQ(endianness(), NETWORK_BYTE_ORDER);
size_t remaining_bytes = remaining();
if (remaining_bytes < write_length) {
return false;
}
const QuicheVariableLengthIntegerLength min_length = GetVarInt62Len(value);
if (write_length < min_length) {
QUICHE_BUG(invalid_varint_forced) << "Cannot write value " << value
<< " with write_length " << write_length;
return false;
}
if (write_length == min_length) {
return WriteVarInt62(value);
}
if (write_length == VARIABLE_LENGTH_INTEGER_LENGTH_2) {
return WriteUInt8(0b01000000) && WriteUInt8(value);
}
if (write_length == VARIABLE_LENGTH_INTEGER_LENGTH_4) {
return WriteUInt8(0b10000000) && WriteUInt8(0) && WriteUInt16(value);
}
if (write_length == VARIABLE_LENGTH_INTEGER_LENGTH_8) {
return WriteUInt8(0b11000000) && WriteUInt8(0) && WriteUInt16(0) &&
WriteUInt32(value);
}
QUICHE_BUG(invalid_write_length)
<< "Invalid write_length " << static_cast<int>(write_length);
return false;
}
bool QuicheDataWriter::Seek(size_t length) {
if (!BeginWrite(length)) {
return false;
}
length_ += length;
return true;
}
std::string QuicheDataWriter::DebugString() const {
return absl::StrCat(" { capacity: ", capacity_, ", length: ", length_, " }");
}
} | #include "quiche/common/quiche_data_writer.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_data_reader.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quiche {
namespace test {
namespace {
char* AsChars(unsigned char* data) { return reinterpret_cast<char*>(data); }
struct TestParams {
explicit TestParams(quiche::Endianness endianness) : endianness(endianness) {}
quiche::Endianness endianness;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
(p.endianness == quiche::NETWORK_BYTE_ORDER ? "Network" : "Host"),
"ByteOrder");
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (quiche::Endianness endianness :
{quiche::NETWORK_BYTE_ORDER, quiche::HOST_BYTE_ORDER}) {
params.push_back(TestParams(endianness));
}
return params;
}
class QuicheDataWriterTest : public QuicheTestWithParam<TestParams> {};
INSTANTIATE_TEST_SUITE_P(QuicheDataWriterTests, QuicheDataWriterTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicheDataWriterTest, Write16BitUnsignedIntegers) {
char little_endian16[] = {0x22, 0x11};
char big_endian16[] = {0x11, 0x22};
char buffer16[2];
{
uint16_t in_memory16 = 0x1122;
QuicheDataWriter writer(2, buffer16, GetParam().endianness);
writer.WriteUInt16(in_memory16);
test::CompareCharArraysWithHexError(
"uint16_t", buffer16, 2,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian16
: little_endian16,
2);
uint16_t read_number16;
QuicheDataReader reader(buffer16, 2, GetParam().endianness);
reader.ReadUInt16(&read_number16);
EXPECT_EQ(in_memory16, read_number16);
}
{
uint64_t in_memory16 = 0x0000000000001122;
QuicheDataWriter writer(2, buffer16, GetParam().endianness);
writer.WriteBytesToUInt64(2, in_memory16);
test::CompareCharArraysWithHexError(
"uint16_t", buffer16, 2,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian16
: little_endian16,
2);
uint64_t read_number16;
QuicheDataReader reader(buffer16, 2, GetParam().endianness);
reader.ReadBytesToUInt64(2, &read_number16);
EXPECT_EQ(in_memory16, read_number16);
}
}
TEST_P(QuicheDataWriterTest, Write24BitUnsignedIntegers) {
char little_endian24[] = {0x33, 0x22, 0x11};
char big_endian24[] = {0x11, 0x22, 0x33};
char buffer24[3];
uint64_t in_memory24 = 0x0000000000112233;
QuicheDataWriter writer(3, buffer24, GetParam().endianness);
writer.WriteBytesToUInt64(3, in_memory24);
test::CompareCharArraysWithHexError(
"uint24", buffer24, 3,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian24
: little_endian24,
3);
uint64_t read_number24;
QuicheDataReader reader(buffer24, 3, GetParam().endianness);
reader.ReadBytesToUInt64(3, &read_number24);
EXPECT_EQ(in_memory24, read_number24);
}
TEST_P(QuicheDataWriterTest, Write32BitUnsignedIntegers) {
char little_endian32[] = {0x44, 0x33, 0x22, 0x11};
char big_endian32[] = {0x11, 0x22, 0x33, 0x44};
char buffer32[4];
{
uint32_t in_memory32 = 0x11223344;
QuicheDataWriter writer(4, buffer32, GetParam().endianness);
writer.WriteUInt32(in_memory32);
test::CompareCharArraysWithHexError(
"uint32_t", buffer32, 4,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian32
: little_endian32,
4);
uint32_t read_number32;
QuicheDataReader reader(buffer32, 4, GetParam().endianness);
reader.ReadUInt32(&read_number32);
EXPECT_EQ(in_memory32, read_number32);
}
{
uint64_t in_memory32 = 0x11223344;
QuicheDataWriter writer(4, buffer32, GetParam().endianness);
writer.WriteBytesToUInt64(4, in_memory32);
test::CompareCharArraysWithHexError(
"uint32_t", buffer32, 4,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian32
: little_endian32,
4);
uint64_t read_number32;
QuicheDataReader reader(buffer32, 4, GetParam().endianness);
reader.ReadBytesToUInt64(4, &read_number32);
EXPECT_EQ(in_memory32, read_number32);
}
}
TEST_P(QuicheDataWriterTest, Write40BitUnsignedIntegers) {
uint64_t in_memory40 = 0x0000001122334455;
char little_endian40[] = {0x55, 0x44, 0x33, 0x22, 0x11};
char big_endian40[] = {0x11, 0x22, 0x33, 0x44, 0x55};
char buffer40[5];
QuicheDataWriter writer(5, buffer40, GetParam().endianness);
writer.WriteBytesToUInt64(5, in_memory40);
test::CompareCharArraysWithHexError(
"uint40", buffer40, 5,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian40
: little_endian40,
5);
uint64_t read_number40;
QuicheDataReader reader(buffer40, 5, GetParam().endianness);
reader.ReadBytesToUInt64(5, &read_number40);
EXPECT_EQ(in_memory40, read_number40);
}
TEST_P(QuicheDataWriterTest, Write48BitUnsignedIntegers) {
uint64_t in_memory48 = 0x0000112233445566;
char little_endian48[] = {0x66, 0x55, 0x44, 0x33, 0x22, 0x11};
char big_endian48[] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
char buffer48[6];
QuicheDataWriter writer(6, buffer48, GetParam().endianness);
writer.WriteBytesToUInt64(6, in_memory48);
test::CompareCharArraysWithHexError(
"uint48", buffer48, 6,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian48
: little_endian48,
6);
uint64_t read_number48;
QuicheDataReader reader(buffer48, 6, GetParam().endianness);
reader.ReadBytesToUInt64(6., &read_number48);
EXPECT_EQ(in_memory48, read_number48);
}
TEST_P(QuicheDataWriterTest, Write56BitUnsignedIntegers) {
uint64_t in_memory56 = 0x0011223344556677;
char little_endian56[] = {0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11};
char big_endian56[] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77};
char buffer56[7];
QuicheDataWriter writer(7, buffer56, GetParam().endianness);
writer.WriteBytesToUInt64(7, in_memory56);
test::CompareCharArraysWithHexError(
"uint56", buffer56, 7,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER ? big_endian56
: little_endian56,
7);
uint64_t read_number56;
QuicheDataReader reader(buffer56, 7, GetParam().endianness);
reader.ReadBytesToUInt64(7, &read_number56);
EXPECT_EQ(in_memory56, read_number56);
}
TEST_P(QuicheDataWriterTest, Write64BitUnsignedIntegers) {
uint64_t in_memory64 = 0x1122334455667788;
unsigned char little_endian64[] = {0x88, 0x77, 0x66, 0x55,
0x44, 0x33, 0x22, 0x11};
unsigned char big_endian64[] = {0x11, 0x22, 0x33, 0x44,
0x55, 0x66, 0x77, 0x88};
char buffer64[8];
QuicheDataWriter writer(8, buffer64, GetParam().endianness);
writer.WriteBytesToUInt64(8, in_memory64);
test::CompareCharArraysWithHexError(
"uint64_t", buffer64, 8,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER
? AsChars(big_endian64)
: AsChars(little_endian64),
8);
uint64_t read_number64;
QuicheDataReader reader(buffer64, 8, GetParam().endianness);
reader.ReadBytesToUInt64(8, &read_number64);
EXPECT_EQ(in_memory64, read_number64);
QuicheDataWriter writer2(8, buffer64, GetParam().endianness);
writer2.WriteUInt64(in_memory64);
test::CompareCharArraysWithHexError(
"uint64_t", buffer64, 8,
GetParam().endianness == quiche::NETWORK_BYTE_ORDER
? AsChars(big_endian64)
: AsChars(little_endian64),
8);
read_number64 = 0u;
QuicheDataReader reader2(buffer64, 8, GetParam().endianness);
reader2.ReadUInt64(&read_number64);
EXPECT_EQ(in_memory64, read_number64);
}
TEST_P(QuicheDataWriterTest, WriteIntegers) {
char buf[43];
uint8_t i8 = 0x01;
uint16_t i16 = 0x0123;
uint32_t i32 = 0x01234567;
uint64_t i64 = 0x0123456789ABCDEF;
QuicheDataWriter writer(46, buf, GetParam().endianness);
for (size_t i = 0; i < 10; ++i) {
switch (i) {
case 0u:
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 1u:
EXPECT_TRUE(writer.WriteUInt8(i8));
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 2u:
EXPECT_TRUE(writer.WriteUInt16(i16));
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 3u:
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 4u:
EXPECT_TRUE(writer.WriteUInt32(i32));
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
case 5u:
case 6u:
case 7u:
case 8u:
EXPECT_TRUE(writer.WriteBytesToUInt64(i, i64));
break;
default:
EXPECT_FALSE(writer.WriteBytesToUInt64(i, i64));
}
}
QuicheDataReader reader(buf, 46, GetParam().endianness);
for (size_t i = 0; i < 10; ++i) {
uint8_t read8;
uint16_t read16;
uint32_t read32;
uint64_t read64;
switch (i) {
case 0u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0u, read64);
break;
case 1u:
EXPECT_TRUE(reader.ReadUInt8(&read8));
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(i8, read8);
EXPECT_EQ(0xEFu, read64);
break;
case 2u:
EXPECT_TRUE(reader.ReadUInt16(&read16));
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(i16, read16);
EXPECT_EQ(0xCDEFu, read64);
break;
case 3u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0xABCDEFu, read64);
break;
case 4u:
EXPECT_TRUE(reader.ReadUInt32(&read32));
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(i32, read32);
EXPECT_EQ(0x89ABCDEFu, read64);
break;
case 5u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x6789ABCDEFu, read64);
break;
case 6u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x456789ABCDEFu, read64);
break;
case 7u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x23456789ABCDEFu, read64);
break;
case 8u:
EXPECT_TRUE(reader.ReadBytesToUInt64(i, &read64));
EXPECT_EQ(0x0123456789ABCDEFu, read64);
break;
default:
EXPECT_FALSE(reader.ReadBytesToUInt64(i, &read64));
}
}
}
TEST_P(QuicheDataWriterTest, WriteBytes) {
char bytes[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
char buf[ABSL_ARRAYSIZE(bytes)];
QuicheDataWriter writer(ABSL_ARRAYSIZE(buf), buf, GetParam().endianness);
EXPECT_TRUE(writer.WriteBytes(bytes, ABSL_ARRAYSIZE(bytes)));
for (unsigned int i = 0; i < ABSL_ARRAYSIZE(bytes); ++i) {
EXPECT_EQ(bytes[i], buf[i]);
}
}
const int kVarIntBufferLength = 1024;
bool EncodeDecodeValue(uint64_t value_in, char* buffer, size_t size_of_buffer) {
memset(buffer, 0, size_of_buffer);
QuicheDataWriter writer(size_of_buffer, buffer,
quiche::Endianness::NETWORK_BYTE_ORDER);
if (writer.WriteVarInt62(value_in) != true) {
return false;
}
size_t expected_length = 0;
if (value_in <= 0x3f) {
expected_length = 1;
} else if (value_in <= 0x3fff) {
expected_length = 2;
} else if (value_in <= 0x3fffffff) {
expected_length = 4;
} else {
expected_length = 8;
}
if (writer.length() != expected_length) {
return false;
}
QuicheDataReader reader(buffer, expected_length,
quiche::Endianness::NETWORK_BYTE_ORDER);
uint64_t value_out;
if (reader.ReadVarInt62(&value_out) == false) {
return false;
}
if (value_in != value_out) {
return false;
}
return reader.IsDoneReading();
}
TEST_P(QuicheDataWriterTest, VarInt8Layout) {
char buffer[1024];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer.WriteVarInt62(UINT64_C(0x3142f3e4d5c6b7a8)));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 0)),
(0x31 + 0xc0));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 1)), 0x42);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 2)), 0xf3);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 3)), 0xe4);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 4)), 0xd5);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 5)), 0xc6);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 6)), 0xb7);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 7)), 0xa8);
}
TEST_P(QuicheDataWriterTest, VarInt4Layout) {
char buffer[1024];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer.WriteVarInt62(0x3243f4e5));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 0)),
(0x32 + 0x80));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 1)), 0x43);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 2)), 0xf4);
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 3)), 0xe5);
}
TEST_P(QuicheDataWriterTest, VarInt2Layout) {
char buffer[1024];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer.WriteVarInt62(0x3647));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 0)),
(0x36 + 0x40));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 1)), 0x47);
}
TEST_P(QuicheDataWriterTest, VarInt1Layout) {
char buffer[1024];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
EXPECT_TRUE(writer.WriteVarInt62(0x3f));
EXPECT_EQ(static_cast<unsigned char>(*(writer.data() + 0)), 0x3f);
}
TEST_P(QuicheDataWriterTest, VarIntGoodTargetedValues) {
char buffer[kVarIntBufferLength];
uint64_t passing_values[] = {
0,
1,
0x3e,
0x3f,
0x40,
0x41,
0x3ffe,
0x3fff,
0x4000,
0x4001,
0x3ffffffe,
0x3fffffff,
0x40000000,
0x40000001,
0x3ffffffffffffffe,
0x3fffffffffffffff,
0xfe,
0xff,
0x100,
0x101,
0xfffe,
0xffff,
0x10000,
0x10001,
0xfffffe,
0xffffff,
0x1000000,
0x1000001,
0xfffffffe,
0xffffffff,
0x100000000,
0x100000001,
0xfffffffffe,
0xffffffffff,
0x10000000000,
0x10000000001,
0xfffffffffffe,
0xffffffffffff,
0x1000000000000,
0x1000000000001,
0xfffffffffffffe,
0xffffffffffffff,
0x100000000000000,
0x100000000000001,
};
for (uint64_t test_val : passing_values) {
EXPECT_TRUE(
EncodeDecodeValue(test_val, static_cast<char*>(buffer), sizeof(buffer)))
<< " encode/decode of " << test_val << " failed";
}
}
TEST_P(QuicheDataWriterTest, VarIntBadTargetedValues) {
char buffer[kVarIntBufferLength];
uint64_t failing_values[] = {
0x4000000000000000,
0x4000000000000001,
0xfffffffffffffffe,
0xffffffffffffffff,
};
for (uint64_t test_val : failing_values) {
EXPECT_FALSE(
EncodeDecodeValue(test_val, static_cast<char*>(buffer), sizeof(buffer)))
<< " encode/decode of " << test_val << " succeeded, but was an "
<< "invalid value";
}
}
TEST_P(QuicheDataWriterTest, WriteVarInt62WithForcedLength) {
char buffer[90];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer));
writer.WriteVarInt62WithForcedLength(1, VARIABLE_LENGTH_INTEGER_LENGTH_1);
writer.WriteVarInt62WithForcedLength(1, VARIABLE_LENGTH_INTEGER_LENGTH_2);
writer.WriteVarInt62WithForcedLength(1, VARIABLE_LENGTH_INTEGER_LENGTH_4);
writer.WriteVarInt62WithForcedLength(1, VARIABLE_LENGTH_INTEGER_LENGTH_8);
writer.WriteVarInt62WithForcedLength(63, VARIABLE_LENGTH_INTEGER_LENGTH_1);
writer.WriteVarInt62WithForcedLength(63, VARIABLE_LENGTH_INTEGER_LENGTH_2);
writer.WriteVarInt62WithForcedLength(63, VARIABLE_LENGTH_INTEGER_LENGTH_4);
writer.WriteVarInt62WithForcedLength(63, VARIABLE_LENGTH_INTEGER_LENGTH_8);
writer.WriteVarInt62WithForcedLength(64, VARIABLE_LENGTH_INTEGER_LENGTH_2);
writer.WriteVarInt62WithForcedLength(64, VARIABLE_LENGTH_INTEGER_LENGTH_4);
writer.WriteVarInt62WithForcedLength(64, VARIABLE_LENGTH_INTEGER_LENGTH_8);
writer.WriteVarInt62WithForcedLength(16383, VARIABLE_LENGTH_INTEGER_LENGTH_2);
writer.WriteVarInt62WithForcedLength(16383, VARIABLE_LENGTH_INTEGER_LENGTH_4);
writer.WriteVarInt62WithForcedLength(16383, VARIABLE_LENGTH_INTEGER_LENGTH_8);
writer.WriteVarInt62WithForcedLength(16384, VARIABLE_LENGTH_INTEGER_LENGTH_4);
writer.WriteVarInt62WithForcedLength(16384, VARIABLE_LENGTH_INTEGER_LENGTH_8);
writer.WriteVarInt62WithForcedLength(1073741823,
VARIABLE_LENGTH_INTEGER_LENGTH_4);
writer.WriteVarInt62WithForcedLength(1073741823,
VARIABLE_LENGTH_INTEGER_LENGTH_8);
writer.WriteVarInt62WithForcedLength(1073741824,
VARIABLE_LENGTH_INTEGER_LENGTH_8);
QuicheDataReader reader(buffer, sizeof(buffer));
uint64_t test_val = 0;
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 1u);
}
for (int i = 0; i < 4; ++i) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 63u);
}
for (int i = 0; i < 3; ++i) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 64u);
}
for (int i = 0; i < 3; ++i) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 16383u);
}
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 16384u);
}
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 1073741823u);
}
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, 1073741824u);
EXPECT_FALSE(reader.ReadVarInt62(&test_val));
}
const int kMultiVarCount = 1000;
TEST_P(QuicheDataWriterTest, MultiVarInt8) {
uint64_t test_val;
char buffer[8 * kMultiVarCount];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(writer.WriteVarInt62(UINT64_C(0x3142f3e4d5c6b7a8) + i));
}
EXPECT_EQ(writer.length(), 8u * kMultiVarCount);
EXPECT_FALSE(writer.WriteVarInt62(UINT64_C(0x3142f3e4d5c6b7a8)));
QuicheDataReader reader(buffer, sizeof(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, (UINT64_C(0x3142f3e4d5c6b7a8) + i));
}
EXPECT_FALSE(reader.ReadVarInt62(&test_val));
}
TEST_P(QuicheDataWriterTest, MultiVarInt4) {
uint64_t test_val;
char buffer[4 * kMultiVarCount];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(writer.WriteVarInt62(UINT64_C(0x3142f3e4) + i));
}
EXPECT_EQ(writer.length(), 4u * kMultiVarCount);
EXPECT_FALSE(writer.WriteVarInt62(UINT64_C(0x3142f3e4)));
QuicheDataReader reader(buffer, sizeof(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, (UINT64_C(0x3142f3e4) + i));
}
EXPECT_FALSE(reader.ReadVarInt62(&test_val));
}
TEST_P(QuicheDataWriterTest, MultiVarInt2) {
uint64_t test_val;
char buffer[2 * kMultiVarCount];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(writer.WriteVarInt62(UINT64_C(0x3142) + i));
}
EXPECT_EQ(writer.length(), 2u * kMultiVarCount);
EXPECT_FALSE(writer.WriteVarInt62(UINT64_C(0x3142)));
QuicheDataReader reader(buffer, sizeof(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, (UINT64_C(0x3142) + i));
}
EXPECT_FALSE(reader.ReadVarInt62(&test_val));
}
TEST_P(QuicheDataWriterTest, MultiVarInt1) {
uint64_t test_val;
char buffer[1 * kMultiVarCount];
memset(buffer, 0, sizeof(buffer));
QuicheDataWriter writer(sizeof(buffer), static_cast<char*>(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(writer.WriteVarInt62(UINT64_C(0x30) + (i & 0xf)));
}
EXPECT_EQ(writer.length(), 1u * kMultiVarCount);
EXPECT_FALSE(writer.WriteVarInt62(UINT64_C(0x31)));
QuicheDataReader reader(buffer, sizeof(buffer),
quiche::Endianness::NETWORK_BYTE_ORDER);
for (int i = 0; i < kMultiVarCount; i++) {
EXPECT_TRUE(reader.ReadVarInt62(&test_val));
EXPECT_EQ(test_val, (UINT64_C(0x30) + (i & 0xf)));
}
EXPECT_FALSE(reader.ReadVarInt62(&test_val));
}
TEST_P(QuicheDataWriterTest, Seek) {
char buffer[3] = {};
QuicheDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_TRUE(writer.WriteUInt8(42));
EXPECT_TRUE(writer.Seek(1));
EXPECT_TRUE(writer.WriteUInt8(3));
char expected[] = {42, 0, 3};
for (size_t i = 0; i < ABSL_ARRAYSIZE(expected); ++i) {
EXPECT_EQ(buffer[i], expected[i]);
}
}
TEST_P(QuicheDataWriterTest, SeekTooFarFails) {
char buffer[20];
{
QuicheDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_TRUE(writer.Seek(20));
EXPECT_FALSE(writer.Seek(1));
}
{
QuicheDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_FALSE(writer.Seek(100));
}
{
QuicheDataWriter writer(ABSL_ARRAYSIZE(buffer), buffer,
GetParam().endianness);
EXPECT_TRUE(writer.Seek(10));
EXPECT_FALSE(writer.Seek(std::numeric_limits<size_t>::max()));
}
}
TEST_P(QuicheDataWriterTest, PayloadReads) {
char buffer[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
char expected_first_read[4] = {1, 2, 3, 4};
char expected_remaining[12] = {5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
QuicheDataReader reader(buffer, sizeof(buffer));
absl::string_view previously_read_payload1 = reader.PreviouslyReadPayload();
EXPECT_TRUE(previously_read_payload1.empty());
char first_read_buffer[4] = {};
EXPECT_TRUE(reader.ReadBytes(first_read_buffer, sizeof(first_read_buffer)));
test::CompareCharArraysWithHexError(
"first read", first_read_buffer, sizeof(first_read_buffer),
expected_first_read, sizeof(expected_first_read));
absl::string_view peeked_remaining_payload = reader.PeekRemainingPayload();
test::CompareCharArraysWithHexError(
"peeked_remaining_payload", peeked_remaining_payload.data(),
peeked_remaining_payload.length(), expected_remaining,
sizeof(expected_remaining));
absl::string_view full_payload = reader.FullPayload();
test::CompareCharArraysWithHexError("full_payload", full_payload.data(),
full_payload.length(), buffer,
sizeof(buffer));
absl::string_view previously_read_payload2 = reader.PreviouslyReadPayload();
test::CompareCharArraysWithHexError(
"previously_read_payload2", previously_read_payload2.data(),
previously_read_payload2.length(), first_read_buffer,
sizeof(first_read_buffer));
absl::string_view read_remaining_payload = reader.ReadRemainingPayload();
test::CompareCharArraysWithHexError(
"read_remaining_payload", read_remaining_payload.data(),
read_remaining_payload.length(), expected_remaining,
sizeof(expected_remaining));
EXPECT_TRUE(reader.IsDoneReading());
absl::string_view full_payload2 = reader.FullPayload();
test::CompareCharArraysWithHexError("full_payload2", full_payload2.data(),
full_payload2.length(), buffer,
sizeof(buffer));
absl::string_view previously_read_payload3 = reader.PreviouslyReadPayload();
test::CompareCharArraysWithHexError(
"previously_read_payload3", previously_read_payload3.data(),
previously_read_payload3.length(), buffer, sizeof(buffer));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_data_writer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_data_writer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c84094b4-b932-4a99-bebd-df271b170370 | cpp | google/tensorstore | bytes | tensorstore/driver/zarr3/codec/bytes.cc | tensorstore/driver/zarr3/codec/bytes_test.cc | #include "tensorstore/driver/zarr3/codec/bytes.h"
#include <assert.h>
#include <stdint.h>
#include <optional>
#include <string_view>
#include <utility>
#include "absl/status/status.h"
#include "riegeli/bytes/reader.h"
#include "riegeli/bytes/writer.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/registry.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/internal/global_initializer.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/riegeli/array_endian_codec.h"
#include "tensorstore/internal/unaligned_data_type_functions.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/endian.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_zarr3 {
namespace {
absl::Status InvalidDataTypeError(DataType dtype) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Data type ", dtype, " not compatible with \"bytes\" codec"));
}
class BytesCodec : public ZarrArrayToBytesCodec {
public:
explicit BytesCodec(DataType decoded_dtype, endian endianness)
: dtype_(decoded_dtype), endianness_(endianness) {}
Result<PreparedState::Ptr> Prepare(
span<const Index> decoded_shape) const final;
private:
DataType dtype_;
endian endianness_;
};
}
absl::Status BytesCodecSpec::GetDecodedChunkLayout(
const ArrayDataTypeAndShapeInfo& array_info,
ArrayCodecChunkLayoutInfo& decoded) const {
if (array_info.dtype.valid() &&
!internal::IsTrivialDataType(array_info.dtype)) {
return InvalidDataTypeError(array_info.dtype);
}
const DimensionIndex rank = array_info.rank;
if (rank != dynamic_rank) {
auto& inner_order = decoded.inner_order.emplace();
for (DimensionIndex i = 0; i < rank; ++i) {
inner_order[i] = i;
}
}
if (array_info.shape) {
auto& shape = *array_info.shape;
auto& read_chunk_shape = decoded.read_chunk_shape.emplace();
for (DimensionIndex i = 0; i < rank; ++i) {
read_chunk_shape[i] = shape[i];
}
}
return absl::OkStatus();
}
bool BytesCodecSpec::SupportsInnerOrder(
const ArrayCodecResolveParameters& decoded,
span<DimensionIndex> preferred_inner_order) const {
if (!decoded.inner_order) return true;
if (PermutationMatchesOrder(span(decoded.inner_order->data(), decoded.rank),
c_order)) {
return true;
}
SetPermutation(c_order, preferred_inner_order);
return false;
}
Result<ZarrArrayToBytesCodec::Ptr> BytesCodecSpec::Resolve(
ArrayCodecResolveParameters&& decoded, BytesCodecResolveParameters& encoded,
ZarrArrayToBytesCodecSpec::Ptr* resolved_spec) const {
assert(decoded.dtype.valid());
if (!internal::IsTrivialDataType(decoded.dtype)) {
return InvalidDataTypeError(decoded.dtype);
}
const bool is_endian_invariant =
internal::IsEndianInvariantDataType(decoded.dtype);
if (!options.constraints && !is_endian_invariant && !options.endianness) {
return absl::InvalidArgumentError(
tensorstore::StrCat("\"bytes\" codec requires that \"endian\" option "
"is specified for data type ",
decoded.dtype));
}
encoded.item_bits = decoded.dtype.size() * 8;
DimensionIndex rank = decoded.rank;
if (decoded.codec_chunk_shape) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"\"bytes\" codec does not support codec_chunk_shape (",
span<const Index>(decoded.codec_chunk_shape->data(), rank),
" was specified"));
}
if (decoded.inner_order) {
auto& decoded_inner_order = *decoded.inner_order;
for (DimensionIndex i = 0; i < rank; ++i) {
if (decoded_inner_order[i] != i) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"\"bytes\" codec does not support inner_order of ",
span<const DimensionIndex>(decoded_inner_order.data(), rank)));
}
}
}
endian resolved_endianness = options.endianness.value_or(endian::native);
if (resolved_spec) {
resolved_spec->reset(new BytesCodecSpec(Options{
is_endian_invariant ? std::optional<endian>()
: std::optional<endian>(resolved_endianness)}));
}
return internal::MakeIntrusivePtr<BytesCodec>(decoded.dtype,
resolved_endianness);
}
namespace {
namespace jb = ::tensorstore::internal_json_binding;
constexpr auto EndiannessBinder() {
return jb::Enum<endian, std::string_view>({
{endian::little, "little"},
{endian::big, "big"},
});
}
}
absl::Status BytesCodecSpec::MergeFrom(const ZarrCodecSpec& other,
bool strict) {
using Self = BytesCodecSpec;
const auto& other_options = static_cast<const Self&>(other).options;
TENSORSTORE_RETURN_IF_ERROR(MergeConstraint<&Options::endianness>(
"endian", options, other_options, EndiannessBinder()));
return absl::OkStatus();
}
ZarrCodecSpec::Ptr BytesCodecSpec::Clone() const {
return internal::MakeIntrusivePtr<BytesCodecSpec>(*this);
}
namespace {
class BytesCodecPreparedState : public ZarrArrayToBytesCodec::PreparedState {
public:
int64_t encoded_size() const final { return encoded_size_; }
absl::Status EncodeArray(SharedArrayView<const void> decoded,
riegeli::Writer& writer) const final {
if (internal::EncodeArrayEndian(std::move(decoded), endianness_, c_order,
writer)) {
return absl::OkStatus();
}
assert(!writer.ok());
return writer.status();
}
Result<SharedArray<const void>> DecodeArray(
span<const Index> decoded_shape, riegeli::Reader& reader) const final {
return internal::DecodeArrayEndian(reader, dtype_, decoded_shape,
endianness_, c_order);
}
DataType dtype_;
endian endianness_;
int64_t encoded_size_;
};
}
Result<ZarrArrayToBytesCodec::PreparedState::Ptr> BytesCodec::Prepare(
span<const Index> decoded_shape) const {
int64_t bytes = dtype_.size();
for (auto size : decoded_shape) {
if (internal::MulOverflow(size, bytes, &bytes)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing encoded size of array of shape ",
decoded_shape));
}
}
auto state = internal::MakeIntrusivePtr<BytesCodecPreparedState>();
state->dtype_ = dtype_;
state->endianness_ = endianness_;
state->encoded_size_ = bytes;
return state;
}
internal::IntrusivePtr<const BytesCodecSpec> DefaultBytesCodec() {
return internal::MakeIntrusivePtr<BytesCodecSpec>(
BytesCodecSpec::Options{endian::native});
}
TENSORSTORE_GLOBAL_INITIALIZER {
using Self = BytesCodecSpec;
using Options = Self::Options;
RegisterCodec<Self>(
"bytes",
jb::Projection<&Self::options>(jb::Sequence(
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
obj->constraints = options.constraints;
}
return absl::OkStatus();
},
jb::Member("endian",
jb::Projection<&Options::endianness>(
jb::Optional(EndiannessBinder())))
)));
}
}
} | #include <stdint.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/driver/zarr3/codec/codec_chain_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_spec.h"
#include "tensorstore/driver/zarr3/codec/codec_test_util.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_zarr3::ArrayCodecResolveParameters;
using ::tensorstore::internal_zarr3::CodecRoundTripTestParams;
using ::tensorstore::internal_zarr3::CodecSpecRoundTripTestParams;
using ::tensorstore::internal_zarr3::GetDefaultBytesCodecJson;
using ::tensorstore::internal_zarr3::TestCodecRoundTrip;
using ::tensorstore::internal_zarr3::TestCodecSpecRoundTrip;
using ::tensorstore::internal_zarr3::ZarrCodecChainSpec;
TEST(BytesTest, SpecRoundTrip) {
CodecSpecRoundTripTestParams p;
p.orig_spec = {"bytes"};
p.expected_spec = ::nlohmann::json::array_t{GetDefaultBytesCodecJson()};
TestCodecSpecRoundTrip(p);
}
TEST(BytesTest, DuplicateArrayToBytes) {
EXPECT_THAT(
ZarrCodecChainSpec::FromJson({
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
{{"name", "bytes"}, {"configuration", {{"endian", "little"}}}},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected bytes -> bytes codec, but received: .*"));
}
TEST(BytesTest, RoundTrip) {
CodecRoundTripTestParams p;
p.spec = {"bytes"};
TestCodecRoundTrip(p);
}
TEST(BytesTest, AutomaticTranspose) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
auto& inner_order = p.inner_order.emplace();
inner_order[0] = 1;
inner_order[1] = 0;
EXPECT_THAT(
TestCodecSpecResolve(
::nlohmann::json::array_t{GetDefaultBytesCodecJson()}, p),
::testing::Optional(MatchesJson({
{{"name", "transpose"}, {"configuration", {{"order", {1, 0}}}}},
GetDefaultBytesCodecJson(),
})));
}
TEST(BytesTest, EndianInvariantDataType) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint8_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(::nlohmann::json::array_t{{{"name", "bytes"}}}, p,
false),
::testing::Optional(
MatchesJson(::nlohmann::json::array_t{{{"name", "bytes"}}})));
}
TEST(BytesTest, MissingEndianEndianInvariantDataType) {
ArrayCodecResolveParameters p;
p.dtype = dtype_v<uint16_t>;
p.rank = 2;
EXPECT_THAT(
TestCodecSpecResolve(::nlohmann::json::array_t{{{"name", "bytes"}}}, p,
false),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: \"bytes\" codec requires that \"endian\" option is "
"specified for data type uint16"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/bytes.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr3/codec/bytes_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
770b6edd-c724-4e1f-9105-36781d3f9b9e | cpp | tensorflow/tensorflow | xla_legalize_tf | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf.cc | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_test.cc | #include <memory>
#include <optional>
#include <string>
#include <utility>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_with_tf2xla_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/rewriters.h"
#include "xla/mlir_hlo/mhlo/utils/type_conversion.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace mlir {
namespace mhlo {
namespace {
#define GEN_PASS_DEF_LEGALIZETF
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
auto *mlir_legalization_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_count",
"Counts the attempts of legalization of ops", "op_name");
auto *mlir_failed_legalization_count = tensorflow::monitoring::Counter<2>::New(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count",
"Counts the failure of legalization of ops", "op_name", "legality");
class LegalizeTF : public impl::LegalizeTFBase<LegalizeTF> {
public:
explicit LegalizeTF(bool legalize_chlo,
std::optional<StringRef> tf2xla_fallback_device_type,
bool prefer_tf2xla) {
legalize_chlo_ = legalize_chlo;
prefer_tf2xla_ = prefer_tf2xla;
use_tf2xla_fallback_ = tf2xla_fallback_device_type.has_value();
if (tf2xla_fallback_device_type.has_value()) {
device_type_ = tf2xla_fallback_device_type.value().str();
}
}
void runOnOperation() override;
};
#define GEN_PASS_DEF_LEGALIZETFMODULEPASS
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
RewritePatternSet PatternsIncludeOps(RewritePatternSet &from) {
RewritePatternSet to(from.getContext());
for (auto &pattern : from.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
bool include =
!pat_op_name ||
IsTypeLegalizedWithMlir(pat_op_name->getRegisteredInfo()->getTypeID());
if (include) to.add(std::move(pattern));
}
to.add(std::move(from.getPDLPatterns()));
return to;
}
std::string OperationLegalityString(Operation *op,
const ConversionTarget &target) {
auto op_name = op->getName();
auto action = target.getOpAction(op_name);
if (!action.has_value()) {
return "Unknown";
}
switch (action.value_or(ConversionTarget::LegalizationAction::Legal)) {
case ConversionTarget::LegalizationAction::Legal:
return "Legal";
case ConversionTarget::LegalizationAction::Dynamic:
return "Dynamic";
case ConversionTarget::LegalizationAction::Illegal:
return "Illegal";
default:
return "Invalid";
}
}
void IncrementFailedLegalizationCount(Operation *op,
const ConversionTarget &target) {
auto op_name = op->getName();
auto name_string = op_name.getStringRef().str();
auto op_legality = OperationLegalityString(op, target);
mlir_failed_legalization_count->GetCell(name_string, op_legality)
->IncrementBy(1);
}
mlir::LogicalResult ApplyPatterns(Operation *op, RewritePatternSet &patterns,
bool legalize_chlo) {
ConversionTarget target =
GetDefaultLegalConversionTargets(*op->getContext(), legalize_chlo);
DenseSet<Operation *> unconverted_ops;
ConversionConfig config;
config.unlegalizedOps = &unconverted_ops;
auto result = applyPartialConversion(op, target, std::move(patterns), config);
if (failed(result)) {
IncrementFailedLegalizationCount(op, target);
}
for (const auto &unconverted_op : unconverted_ops) {
IncrementFailedLegalizationCount(unconverted_op, target);
}
return result;
}
LogicalResult legalizeTF(Operation *op, bool legalize_chlo,
std::optional<StringRef> tf2xla_fallback_device_type,
bool prefer_tf2xla) {
MLIRContext *context = op->getContext();
RewritePatternSet legalize_lower_patterns(context);
PopulateLegalizeTfPatterns(context, &legalize_lower_patterns);
TF::PopulateTFLoweringBeforeHLOPatterns(context, &legalize_lower_patterns);
if (tf2xla_fallback_device_type && prefer_tf2xla) {
VLOG(1) << "TF to XLA legalization patterns are partitioned by op into "
"either native MLIR legalization, or TF2XLA fallback "
"legalzation, with a preference toward TF2XLA.";
} else if (tf2xla_fallback_device_type) {
VLOG(1) << "TF to XLA legalization patterns include all native patterns "
"and TF2XLA fallback patterns.";
} else {
VLOG(1) << "TF to XLA legalization patterns are native patterns only.";
}
RewritePatternSet patterns = (tf2xla_fallback_device_type && prefer_tf2xla)
? PatternsIncludeOps(legalize_lower_patterns)
: std::move(legalize_lower_patterns);
Tf2XlaTypeConverter converter;
if (tf2xla_fallback_device_type) {
PopulateLegalizeTfWithTf2XlaPatterns(tf2xla_fallback_device_type.value(),
patterns, context, converter,
prefer_tf2xla);
}
stablehlo::StablehloToHloTypeConverter hlo_converter;
if (legalize_chlo) {
chlo::populateChloToHloPatterns(context, &hlo_converter, &patterns);
}
chlo::ConstantLikeOp::getCanonicalizationPatterns(patterns, context);
return ApplyPatterns(op, patterns, legalize_chlo);
}
void LegalizeTF::runOnOperation() {
auto op = getOperation();
auto op_name = op->getName().getStringRef().str();
mlir_legalization_count->GetCell(op_name)->IncrementBy(1);
std::optional<StringRef> tf2xla_fallback_device_type = std::nullopt;
if (use_tf2xla_fallback_) {
tf2xla_fallback_device_type = device_type_;
}
if (failed(legalizeTF(op, legalize_chlo_, tf2xla_fallback_device_type,
prefer_tf2xla_))) {
signalPassFailure();
}
}
}
std::unique_ptr<OperationPass<ModuleOp>> createLegalizeTFPass(
bool legalize_chlo, std::optional<StringRef> tf2xla_fallback_device_type,
bool prefer_tf2xla) {
return std::make_unique<LegalizeTF>(
legalize_chlo, tf2xla_fallback_device_type, prefer_tf2xla);
}
}
} | #include <functional>
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::PassManager;
using ::tensorflow::monitoring::testing::CellReader;
absl::StatusOr<OwningOpRef<ModuleOp>> GetMlirModuleFromString(
absl::string_view module_string, MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
OwningOpRef<ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(module_string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
bool BuildAndRunPipeline(absl::string_view module_string,
const std::function<void(PassManager*)>& passes) {
mlir::registerPassManagerCLOptions();
MLIRContext context;
OwningOpRef<ModuleOp> module =
GetMlirModuleFromString(module_string, &context).value();
PassManager pm(&context);
if (mlir::failed(mlir::applyPassManagerCLOptions(pm))) return false;
passes(&pm);
return pm.run(module.get()).succeeded();
}
std::function<void(PassManager*)> legalizeTFPasses() {
return [](PassManager* pm) {
pm->addPass(mlir::mhlo::createLegalizeTFPass(
true, llvm::StringRef("gpu/xpu"),
false));
};
}
TEST(XlaLegalizeTest, IllegalOp) {
constexpr char kMlirIllegalOpStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> legalize_failure_count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
auto status = BuildAndRunPipeline(kMlirIllegalOpStr, legalizeTFPasses());
EXPECT_TRUE(status);
EXPECT_EQ(legalize_failure_count.Read("tf.DoesntExist", "Unknown"), 1);
}
TEST(XlaLegalizeTest, LegalOp) {
static constexpr char kMlirLegalOpStr[] = R"(
func.func @infeed_dequeue_tuple_dynamic_error() -> (tensor<3x3xf32>, tensor<4x?xf32>) {
%0:2 = "tf.InfeedDequeueTuple"() : () -> (tensor<3x3xf32>, tensor<4x?xf32>) func.return %0#0, %0#1 : tensor<3x3xf32>, tensor<4x?xf32>
})";
CellReader<int64_t> legalize_failure_count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
auto status = BuildAndRunPipeline(kMlirLegalOpStr, legalizeTFPasses());
EXPECT_TRUE(status);
EXPECT_EQ(legalize_failure_count.Read("tf.InfeedDequeueTuple", "Unknown"), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e7f4235a-4f85-4973-acd0-fdcd09f31976 | cpp | google/quiche | pacing_sender | quiche/quic/core/congestion_control/pacing_sender.cc | quiche/quic/core/congestion_control/pacing_sender_test.cc | #include "quiche/quic/core/congestion_control/pacing_sender.h"
#include <algorithm>
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
static const uint32_t kInitialUnpacedBurst = 10;
}
PacingSender::PacingSender()
: sender_(nullptr),
max_pacing_rate_(QuicBandwidth::Zero()),
application_driven_pacing_rate_(QuicBandwidth::Infinite()),
burst_tokens_(kInitialUnpacedBurst),
ideal_next_packet_send_time_(QuicTime::Zero()),
initial_burst_size_(kInitialUnpacedBurst),
lumpy_tokens_(0),
pacing_limited_(false) {}
PacingSender::~PacingSender() {}
void PacingSender::set_sender(SendAlgorithmInterface* sender) {
QUICHE_DCHECK(sender != nullptr);
sender_ = sender;
}
void PacingSender::OnCongestionEvent(bool rtt_updated,
QuicByteCount bytes_in_flight,
QuicTime event_time,
const AckedPacketVector& acked_packets,
const LostPacketVector& lost_packets,
QuicPacketCount num_ect,
QuicPacketCount num_ce) {
QUICHE_DCHECK(sender_ != nullptr);
if (!lost_packets.empty()) {
burst_tokens_ = 0;
}
sender_->OnCongestionEvent(rtt_updated, bytes_in_flight, event_time,
acked_packets, lost_packets, num_ect, num_ce);
}
void PacingSender::OnPacketSent(
QuicTime sent_time, QuicByteCount bytes_in_flight,
QuicPacketNumber packet_number, QuicByteCount bytes,
HasRetransmittableData has_retransmittable_data) {
QUICHE_DCHECK(sender_ != nullptr);
QUIC_DVLOG(3) << "Packet " << packet_number << " with " << bytes
<< " bytes sent at " << sent_time
<< ". bytes_in_flight: " << bytes_in_flight;
sender_->OnPacketSent(sent_time, bytes_in_flight, packet_number, bytes,
has_retransmittable_data);
if (has_retransmittable_data != HAS_RETRANSMITTABLE_DATA) {
return;
}
if (remove_non_initial_burst_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_pacing_remove_non_initial_burst, 1, 2);
} else {
if (bytes_in_flight == 0 && !sender_->InRecovery()) {
burst_tokens_ =
std::min(initial_burst_size_,
static_cast<uint32_t>(sender_->GetCongestionWindow() /
kDefaultTCPMSS));
}
}
if (burst_tokens_ > 0) {
--burst_tokens_;
ideal_next_packet_send_time_ = QuicTime::Zero();
pacing_limited_ = false;
return;
}
QuicTime::Delta delay =
PacingRate(bytes_in_flight + bytes).TransferTime(bytes);
if (!pacing_limited_ || lumpy_tokens_ == 0) {
lumpy_tokens_ = std::max(
1u, std::min(static_cast<uint32_t>(GetQuicFlag(quic_lumpy_pacing_size)),
static_cast<uint32_t>(
(sender_->GetCongestionWindow() *
GetQuicFlag(quic_lumpy_pacing_cwnd_fraction)) /
kDefaultTCPMSS)));
if (sender_->BandwidthEstimate() <
QuicBandwidth::FromKBitsPerSecond(
GetQuicFlag(quic_lumpy_pacing_min_bandwidth_kbps))) {
lumpy_tokens_ = 1u;
}
if ((bytes_in_flight + bytes) >= sender_->GetCongestionWindow()) {
lumpy_tokens_ = 1u;
}
}
--lumpy_tokens_;
if (pacing_limited_) {
ideal_next_packet_send_time_ = ideal_next_packet_send_time_ + delay;
} else {
ideal_next_packet_send_time_ =
std::max(ideal_next_packet_send_time_ + delay, sent_time + delay);
}
pacing_limited_ = sender_->CanSend(bytes_in_flight + bytes);
}
void PacingSender::OnApplicationLimited() {
pacing_limited_ = false;
}
void PacingSender::SetBurstTokens(uint32_t burst_tokens) {
initial_burst_size_ = burst_tokens;
burst_tokens_ = std::min(
initial_burst_size_,
static_cast<uint32_t>(sender_->GetCongestionWindow() / kDefaultTCPMSS));
}
QuicTime::Delta PacingSender::TimeUntilSend(
QuicTime now, QuicByteCount bytes_in_flight) const {
QUICHE_DCHECK(sender_ != nullptr);
if (!sender_->CanSend(bytes_in_flight)) {
return QuicTime::Delta::Infinite();
}
if (remove_non_initial_burst_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_pacing_remove_non_initial_burst, 2, 2);
if (burst_tokens_ > 0 || lumpy_tokens_ > 0) {
QUIC_DVLOG(1) << "Can send packet now. burst_tokens:" << burst_tokens_
<< ", lumpy_tokens:" << lumpy_tokens_;
return QuicTime::Delta::Zero();
}
} else {
if (burst_tokens_ > 0 || bytes_in_flight == 0 || lumpy_tokens_ > 0) {
QUIC_DVLOG(1) << "Sending packet now. burst_tokens:" << burst_tokens_
<< ", bytes_in_flight:" << bytes_in_flight
<< ", lumpy_tokens:" << lumpy_tokens_;
return QuicTime::Delta::Zero();
}
}
if (ideal_next_packet_send_time_ > now + kAlarmGranularity) {
QUIC_DVLOG(1) << "Delaying packet: "
<< (ideal_next_packet_send_time_ - now).ToMicroseconds();
return ideal_next_packet_send_time_ - now;
}
QUIC_DVLOG(1) << "Can send packet now. ideal_next_packet_send_time: "
<< ideal_next_packet_send_time_ << ", now: " << now;
return QuicTime::Delta::Zero();
}
QuicBandwidth PacingSender::PacingRate(QuicByteCount bytes_in_flight) const {
QUICHE_DCHECK(sender_ != nullptr);
if (!max_pacing_rate_.IsZero()) {
return QuicBandwidth::FromBitsPerSecond(
std::min(max_pacing_rate_.ToBitsPerSecond(),
sender_->PacingRate(bytes_in_flight).ToBitsPerSecond()));
}
return sender_->PacingRate(bytes_in_flight);
}
} | #include "quiche/quic/core/congestion_control/pacing_sender.h"
#include <memory>
#include <utility>
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
using testing::_;
using testing::AtMost;
using testing::Return;
using testing::StrictMock;
namespace quic {
namespace test {
const QuicByteCount kBytesInFlight = 1024;
const int kInitialBurstPackets = 10;
class TestPacingSender : public PacingSender {
public:
using PacingSender::lumpy_tokens;
using PacingSender::PacingSender;
QuicTime ideal_next_packet_send_time() const {
return GetNextReleaseTime().release_time;
}
};
class PacingSenderTest : public QuicTest {
protected:
PacingSenderTest()
: zero_time_(QuicTime::Delta::Zero()),
infinite_time_(QuicTime::Delta::Infinite()),
packet_number_(1),
mock_sender_(new StrictMock<MockSendAlgorithm>()),
pacing_sender_(new TestPacingSender) {
pacing_sender_->set_sender(mock_sender_.get());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(9));
}
~PacingSenderTest() override {}
void InitPacingRate(QuicPacketCount burst_size, QuicBandwidth bandwidth) {
mock_sender_ = std::make_unique<StrictMock<MockSendAlgorithm>>();
pacing_sender_ = std::make_unique<TestPacingSender>();
pacing_sender_->set_sender(mock_sender_.get());
EXPECT_CALL(*mock_sender_, PacingRate(_)).WillRepeatedly(Return(bandwidth));
EXPECT_CALL(*mock_sender_, BandwidthEstimate())
.WillRepeatedly(Return(bandwidth));
if (burst_size == 0) {
EXPECT_CALL(*mock_sender_, OnCongestionEvent(_, _, _, _, _, _, _));
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(1), kMaxOutgoingPacketSize));
AckedPacketVector empty;
pacing_sender_->OnCongestionEvent(true, 1234, clock_.Now(), empty,
lost_packets, 0, 0);
} else if (burst_size != kInitialBurstPackets) {
QUIC_LOG(FATAL) << "Unsupported burst_size " << burst_size
<< " specificied, only 0 and " << kInitialBurstPackets
<< " are supported.";
}
}
void CheckPacketIsSentImmediately(HasRetransmittableData retransmittable_data,
QuicByteCount prior_in_flight,
bool in_recovery, QuicPacketCount cwnd) {
for (int i = 0; i < 2; ++i) {
EXPECT_CALL(*mock_sender_, CanSend(prior_in_flight))
.WillOnce(Return(true));
EXPECT_EQ(zero_time_,
pacing_sender_->TimeUntilSend(clock_.Now(), prior_in_flight))
<< "Next packet to send is " << packet_number_;
}
if (prior_in_flight == 0 &&
!GetQuicReloadableFlag(quic_pacing_remove_non_initial_burst)) {
EXPECT_CALL(*mock_sender_, InRecovery()).WillOnce(Return(in_recovery));
}
EXPECT_CALL(*mock_sender_,
OnPacketSent(clock_.Now(), prior_in_flight, packet_number_,
kMaxOutgoingPacketSize, retransmittable_data));
EXPECT_CALL(*mock_sender_, GetCongestionWindow())
.WillRepeatedly(Return(cwnd * kDefaultTCPMSS));
EXPECT_CALL(*mock_sender_,
CanSend(prior_in_flight + kMaxOutgoingPacketSize))
.Times(AtMost(1))
.WillRepeatedly(Return((prior_in_flight + kMaxOutgoingPacketSize) <
(cwnd * kDefaultTCPMSS)));
pacing_sender_->OnPacketSent(clock_.Now(), prior_in_flight,
packet_number_++, kMaxOutgoingPacketSize,
retransmittable_data);
}
void CheckPacketIsSentImmediately() {
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, kBytesInFlight,
false, 10);
}
void CheckPacketIsDelayed(QuicTime::Delta delay) {
for (int i = 0; i < 2; ++i) {
EXPECT_CALL(*mock_sender_, CanSend(kBytesInFlight))
.WillOnce(Return(true));
EXPECT_EQ(delay.ToMicroseconds(),
pacing_sender_->TimeUntilSend(clock_.Now(), kBytesInFlight)
.ToMicroseconds());
}
}
void UpdateRtt() {
EXPECT_CALL(*mock_sender_,
OnCongestionEvent(true, kBytesInFlight, _, _, _, _, _));
AckedPacketVector empty_acked;
LostPacketVector empty_lost;
pacing_sender_->OnCongestionEvent(true, kBytesInFlight, clock_.Now(),
empty_acked, empty_lost, 0, 0);
}
void OnApplicationLimited() { pacing_sender_->OnApplicationLimited(); }
const QuicTime::Delta zero_time_;
const QuicTime::Delta infinite_time_;
MockClock clock_;
QuicPacketNumber packet_number_;
std::unique_ptr<StrictMock<MockSendAlgorithm>> mock_sender_;
std::unique_ptr<TestPacingSender> pacing_sender_;
};
TEST_F(PacingSenderTest, NoSend) {
for (int i = 0; i < 2; ++i) {
EXPECT_CALL(*mock_sender_, CanSend(kBytesInFlight)).WillOnce(Return(false));
EXPECT_EQ(infinite_time_,
pacing_sender_->TimeUntilSend(clock_.Now(), kBytesInFlight));
}
}
TEST_F(PacingSenderTest, SendNow) {
for (int i = 0; i < 2; ++i) {
EXPECT_CALL(*mock_sender_, CanSend(kBytesInFlight)).WillOnce(Return(true));
EXPECT_EQ(zero_time_,
pacing_sender_->TimeUntilSend(clock_.Now(), kBytesInFlight));
}
}
TEST_F(PacingSenderTest, VariousSending) {
InitPacingRate(
0, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
UpdateRtt();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(2));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(4));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(8));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(8));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
OnApplicationLimited();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
CheckPacketIsSentImmediately();
}
TEST_F(PacingSenderTest, InitialBurst) {
InitPacingRate(
10, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
UpdateRtt();
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
if (GetQuicReloadableFlag(quic_pacing_remove_non_initial_burst)) {
for (int i = 0; i < 6; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
return;
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, false, 10);
for (int i = 0; i < kInitialBurstPackets - 1; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(PacingSenderTest, InitialBurstNoRttMeasurement) {
InitPacingRate(
10, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
if (GetQuicReloadableFlag(quic_pacing_remove_non_initial_burst)) {
for (int i = 0; i < 6; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
return;
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, false, 10);
for (int i = 0; i < kInitialBurstPackets - 1; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(PacingSenderTest, FastSending) {
InitPacingRate(10, QuicBandwidth::FromBytesAndTimeDelta(
2 * kMaxOutgoingPacketSize,
QuicTime::Delta::FromMilliseconds(1)));
UpdateRtt();
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMicroseconds(2000));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
if (GetQuicReloadableFlag(quic_pacing_remove_non_initial_burst)) {
for (int i = 0; i < 10; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
return;
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, false, 10);
for (int i = 0; i < kInitialBurstPackets - 1; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMicroseconds(2000));
}
TEST_F(PacingSenderTest, NoBurstEnteringRecovery) {
InitPacingRate(
0, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
CheckPacketIsSentImmediately();
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(1), kMaxOutgoingPacketSize));
AckedPacketVector empty_acked;
EXPECT_CALL(*mock_sender_, OnCongestionEvent(true, kMaxOutgoingPacketSize, _,
testing::IsEmpty(), _, _, _));
pacing_sender_->OnCongestionEvent(true, kMaxOutgoingPacketSize, clock_.Now(),
empty_acked, lost_packets, 0, 0);
CheckPacketIsSentImmediately();
EXPECT_CALL(*mock_sender_, CanSend(kMaxOutgoingPacketSize))
.WillOnce(Return(true));
EXPECT_EQ(
QuicTime::Delta::FromMilliseconds(2),
pacing_sender_->TimeUntilSend(clock_.Now(), kMaxOutgoingPacketSize));
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(PacingSenderTest, NoBurstInRecovery) {
InitPacingRate(
0, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
UpdateRtt();
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, true, 10);
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(PacingSenderTest, CwndLimited) {
InitPacingRate(
0, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
UpdateRtt();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(2));
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA,
2 * kMaxOutgoingPacketSize, false, 2);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(PacingSenderTest, LumpyPacingWithInitialBurstToken) {
SetQuicFlag(quic_lumpy_pacing_size, 3);
SetQuicFlag(quic_lumpy_pacing_cwnd_fraction, 0.5f);
InitPacingRate(
10, QuicBandwidth::FromBytesAndTimeDelta(
kMaxOutgoingPacketSize, QuicTime::Delta::FromMilliseconds(1)));
UpdateRtt();
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
OnApplicationLimited();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3));
CheckPacketIsSentImmediately();
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA,
20 * kMaxOutgoingPacketSize, false, 20);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, kBytesInFlight, false,
5);
CheckPacketIsSentImmediately();
CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
}
TEST_F(PacingSenderTest, NoLumpyPacingForLowBandwidthFlows) {
SetQuicFlag(quic_lumpy_pacing_size, 3);
SetQuicFlag(quic_lumpy_pacing_cwnd_fraction, 0.5f);
QuicTime::Delta inter_packet_delay = QuicTime::Delta::FromMilliseconds(100);
InitPacingRate(kInitialBurstPackets,
QuicBandwidth::FromBytesAndTimeDelta(kMaxOutgoingPacketSize,
inter_packet_delay));
UpdateRtt();
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
for (int i = 0; i < 200; ++i) {
CheckPacketIsDelayed(inter_packet_delay);
}
}
TEST_F(PacingSenderTest, NoBurstsForLumpyPacingWithAckAggregation) {
QuicTime::Delta inter_packet_delay = QuicTime::Delta::FromMilliseconds(1);
InitPacingRate(kInitialBurstPackets,
QuicBandwidth::FromBytesAndTimeDelta(kMaxOutgoingPacketSize,
inter_packet_delay));
UpdateRtt();
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA,
10 * kMaxOutgoingPacketSize, false, 10);
EXPECT_EQ(0u, pacing_sender_->lumpy_tokens());
CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA,
10 * kMaxOutgoingPacketSize, false, 10);
EXPECT_EQ(0u, pacing_sender_->lumpy_tokens());
CheckPacketIsDelayed(2 * inter_packet_delay);
}
TEST_F(PacingSenderTest, IdealNextPacketSendTimeWithLumpyPacing) {
SetQuicFlag(quic_lumpy_pacing_size, 3);
SetQuicFlag(quic_lumpy_pacing_cwnd_fraction, 0.5f);
QuicTime::Delta inter_packet_delay = QuicTime::Delta::FromMilliseconds(1);
InitPacingRate(kInitialBurstPackets,
QuicBandwidth::FromBytesAndTimeDelta(kMaxOutgoingPacketSize,
inter_packet_delay));
for (int i = 0; i < kInitialBurstPackets; ++i) {
CheckPacketIsSentImmediately();
}
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 2u);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + 2 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 1u);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + 3 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 0u);
CheckPacketIsDelayed(3 * inter_packet_delay);
clock_.AdvanceTime(3 * inter_packet_delay);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 2u);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + 2 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 1u);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + 3 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 0u);
CheckPacketIsDelayed(3 * inter_packet_delay);
clock_.AdvanceTime(4.5 * inter_packet_delay);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() - 0.5 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 2u);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + 0.5 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 1u);
CheckPacketIsSentImmediately();
EXPECT_EQ(pacing_sender_->ideal_next_packet_send_time(),
clock_.Now() + 1.5 * inter_packet_delay);
EXPECT_EQ(pacing_sender_->lumpy_tokens(), 0u);
CheckPacketIsDelayed(1.5 * inter_packet_delay);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/pacing_sender.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/congestion_control/pacing_sender_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c899a3c4-6fd5-4e1e-a310-16b62a1d4c34 | cpp | tensorflow/tensorflow | quantization_wrapper_utils | tensorflow/lite/tools/optimize/quantization_wrapper_utils.cc | tensorflow/lite/tools/optimize/quantization_wrapper_utils_test.cc | #include "tensorflow/lite/tools/optimize/quantization_wrapper_utils.h"
#include <fstream>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/compiler/mlir/lite/tools/optimize/operator_property.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace impl {
class FlatBufferModel;
}
namespace optimize {
namespace {
#ifdef TFLITE_CUSTOM_LSTM
constexpr bool kUseCustomLSTM = true;
#else
constexpr bool kUseCustomLSTM = false;
#endif
void MakeTensor(const string& name, std::unique_ptr<TensorT>* tensor) {
TensorT* tensor_raw = new TensorT;
tensor_raw->name = name;
tensor_raw->shape = {0};
tensor_raw->type = TensorType_FLOAT32;
tensor->reset(tensor_raw);
}
string CreateTensorName(int op_index, int tensor_index) {
return "intermediate_" + std::to_string(op_index) + "_" +
std::to_string(tensor_index);
}
bool IntermediateTensorExists(ModelT* model) {
for (int subgraph_idx = 0; subgraph_idx < model->subgraphs.size();
++subgraph_idx) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) {
OperatorT* op = subgraph->operators[op_idx].get();
if (!op->intermediates.empty()) {
return true;
}
}
}
return false;
}
}
TfLiteStatus LoadModel(const string& path, ModelT* model) {
auto input_model = impl::FlatBufferModel::BuildFromFile(path.c_str());
if (!input_model) {
return kTfLiteError;
}
auto readonly_model = input_model->GetModel();
if (!readonly_model) {
return kTfLiteError;
}
readonly_model->UnPackTo(model);
return kTfLiteOk;
}
TfLiteStatus AddIntermediateTensorsToFusedOp(
flatbuffers::FlatBufferBuilder* builder, ModelT* model) {
if (model->subgraphs.size() == 1 && model->subgraphs[0]->operators.empty()) {
return kTfLiteOk;
}
if (IntermediateTensorExists(model)) {
return kTfLiteOk;
}
for (int subgraph_idx = 0; subgraph_idx < model->subgraphs.size();
++subgraph_idx) {
SubGraphT* subgraph = model->subgraphs.at(subgraph_idx).get();
for (size_t op_idx = 0; op_idx < subgraph->operators.size(); op_idx++) {
OperatorT* op = subgraph->operators[op_idx].get();
operator_property::OperatorProperty property =
operator_property::GetOperatorProperty(model, subgraph_idx, op_idx);
if (property.intermediates.empty()) {
continue;
}
const int next_tensor_index = subgraph->tensors.size();
int num_intermediates = property.intermediates.size();
if (kUseCustomLSTM) {
num_intermediates = 12;
}
for (int i = 0; i < num_intermediates; ++i) {
std::unique_ptr<TensorT> intermediate_tensor;
auto name = CreateTensorName(op_idx, i);
MakeTensor(name, &intermediate_tensor);
subgraph->tensors.push_back(std::move(intermediate_tensor));
op->intermediates.push_back(next_tensor_index + i);
}
}
}
flatbuffers::Offset<Model> output_model_location =
Model::Pack(*builder, model);
FinishModelBuffer(*builder, output_model_location);
return kTfLiteOk;
}
bool WriteFile(const std::string& out_file, const uint8_t* bytes,
size_t num_bytes) {
std::fstream stream(out_file, std::ios::binary | std::ios::out);
for (size_t i = 0; i < num_bytes; i++) {
stream << bytes[i];
}
return (!stream.bad() && !stream.fail());
}
}
} | #include "tensorflow/lite/tools/optimize/quantization_wrapper_utils.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
namespace optimize {
namespace {
using ::testing::ElementsAreArray;
TEST(LstmPreprocess, Add2Tensors) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto buffer = std::make_unique<tflite::BufferT>();
auto lstm_op_code = std::make_unique<OperatorCodeT>();
auto lstm_op = std::make_unique<OperatorT>();
lstm_op_code->builtin_code = BuiltinOperator_LSTM;
lstm_op_code->deprecated_builtin_code =
static_cast<int8_t>(BuiltinOperator_LSTM);
lstm_op_code->version = 2;
lstm_op->opcode_index = 0;
lstm_op->inputs = {0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
lstm_op->outputs = {24};
model->subgraphs.push_back(std::move(subgraph));
for (int i = 0; i < lstm_op->inputs.size(); ++i) {
const int index = lstm_op->inputs[i];
if (index == -1) {
continue;
}
auto tensor = std::make_unique<TensorT>();
tensor->name = "lstm_tensor" + std::to_string(index);
tensor->shape = {2, 3, 4};
tensor->type = TensorType_FLOAT32;
model->subgraphs[0]->tensors.push_back(std::move(tensor));
}
model->subgraphs[0]->operators.push_back(std::move(lstm_op));
model->operator_codes.push_back(std::move(lstm_op_code));
model->buffers.push_back(std::move(buffer));
flatbuffers::FlatBufferBuilder builder;
tflite::optimize::AddIntermediateTensorsToFusedOp(&builder, model.get());
EXPECT_EQ(model->operator_codes.size(), 1);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 26);
EXPECT_EQ(model->buffers.size(), 1);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_LSTM);
EXPECT_EQ(model->subgraphs[0]->tensors[0]->name, "lstm_tensor0");
EXPECT_EQ(model->subgraphs[0]->tensors[21]->name, "intermediate_0_0");
EXPECT_EQ(model->subgraphs[0]->tensors[22]->name, "intermediate_0_1");
EXPECT_EQ(model->subgraphs[0]->tensors[23]->name, "intermediate_0_2");
EXPECT_EQ(model->subgraphs[0]->tensors[24]->name, "intermediate_0_3");
EXPECT_EQ(model->subgraphs[0]->tensors[25]->name, "intermediate_0_4");
EXPECT_THAT(
model->subgraphs[0]->operators[0]->inputs,
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->outputs,
ElementsAreArray({24}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->intermediates,
ElementsAreArray({21, 22, 23, 24, 25}));
tflite::optimize::AddIntermediateTensorsToFusedOp(&builder, model.get());
EXPECT_EQ(model->operator_codes.size(), 1);
EXPECT_EQ(model->subgraphs.size(), 1);
EXPECT_EQ(model->subgraphs[0]->operators.size(), 1);
EXPECT_EQ(model->subgraphs[0]->tensors.size(), 26);
EXPECT_EQ(model->buffers.size(), 1);
EXPECT_EQ(GetBuiltinCode(model->operator_codes[0].get()),
BuiltinOperator_LSTM);
EXPECT_EQ(model->subgraphs[0]->tensors[0]->name, "lstm_tensor0");
EXPECT_EQ(model->subgraphs[0]->tensors[21]->name, "intermediate_0_0");
EXPECT_EQ(model->subgraphs[0]->tensors[22]->name, "intermediate_0_1");
EXPECT_EQ(model->subgraphs[0]->tensors[23]->name, "intermediate_0_2");
EXPECT_EQ(model->subgraphs[0]->tensors[24]->name, "intermediate_0_3");
EXPECT_EQ(model->subgraphs[0]->tensors[25]->name, "intermediate_0_4");
EXPECT_THAT(
model->subgraphs[0]->operators[0]->inputs,
ElementsAreArray({0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->outputs,
ElementsAreArray({24}));
EXPECT_THAT(model->subgraphs[0]->operators[0]->intermediates,
ElementsAreArray({21, 22, 23, 24, 25}));
}
}
}
}
int main(int argc, char** argv) { return RUN_ALL_TESTS(); } | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/quantization_wrapper_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/quantization_wrapper_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a0a5c0c3-6389-4956-96d2-7c74e77154df | cpp | google/libaddressinput | string_split | cpp/src/util/string_split.cc | cpp/test/util/string_split_unittest.cc | #include "string_split.h"
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
namespace i18n {
namespace addressinput {
void SplitString(const std::string& str, char s, std::vector<std::string>* r) {
assert(r != nullptr);
r->clear();
size_t last = 0;
size_t c = str.size();
for (size_t i = 0; i <= c; ++i) {
if (i == c || str[i] == s) {
std::string tmp(str, last, i - last);
if (i != c || !r->empty() || !tmp.empty()) {
r->push_back(tmp);
}
last = i + 1;
}
}
}
}
} | #include "util/string_split.h"
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::SplitString;
TEST(StringSplitTest, SplitString) {
std::vector<std::string> r;
SplitString(std::string(), ',', &r);
EXPECT_EQ(0U, r.size());
SplitString("a,b,c", ',', &r);
ASSERT_EQ(3U, r.size());
EXPECT_EQ(r[0], "a");
EXPECT_EQ(r[1], "b");
EXPECT_EQ(r[2], "c");
SplitString("a, b, c", ',', &r);
ASSERT_EQ(3U, r.size());
EXPECT_EQ(r[0], "a");
EXPECT_EQ(r[1], " b");
EXPECT_EQ(r[2], " c");
SplitString("a,,c", ',', &r);
ASSERT_EQ(3U, r.size());
EXPECT_EQ(r[0], "a");
EXPECT_EQ(r[1], "");
EXPECT_EQ(r[2], "c");
SplitString(" ", '*', &r);
EXPECT_EQ(1U, r.size());
SplitString("foo", '*', &r);
ASSERT_EQ(1U, r.size());
EXPECT_EQ(r[0], "foo");
SplitString("foo ,", ',', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "foo ");
EXPECT_EQ(r[1], "");
SplitString(",", ',', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], "");
SplitString("\t\ta\t", '\t', &r);
ASSERT_EQ(4U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], "");
EXPECT_EQ(r[2], "a");
EXPECT_EQ(r[3], "");
SplitString("\ta\t\nb\tcc", '\n', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "\ta\t");
EXPECT_EQ(r[1], "b\tcc");
SplitString(" ", '*', &r);
ASSERT_EQ(1U, r.size());
EXPECT_EQ(r[0], " ");
SplitString("\t \ta\t ", '\t', &r);
ASSERT_EQ(4U, r.size());
EXPECT_EQ(r[0], "");
EXPECT_EQ(r[1], " ");
EXPECT_EQ(r[2], "a");
EXPECT_EQ(r[3], " ");
SplitString("\ta\t\nb\tcc", '\n', &r);
ASSERT_EQ(2U, r.size());
EXPECT_EQ(r[0], "\ta\t");
EXPECT_EQ(r[1], "b\tcc");
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/util/string_split.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/util/string_split_unittest.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
815416d7-7bea-456d-bc05-1a7d561ca943 | cpp | google/tensorstore | file_util | tensorstore/internal/os/file_util.h | tensorstore/internal/os/file_util_test.cc | #ifndef TENSORSTORE_INTERNAL_OS_FILE_UTIL_H_
#define TENSORSTORE_INTERNAL_OS_FILE_UTIL_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <string_view>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "tensorstore/internal/os/unique_handle.h"
#include "tensorstore/util/result.h"
#ifndef _WIN32
#include <fcntl.h>
#include <sys/stat.h>
#include <time.h>
#include <unistd.h>
#endif
#include "tensorstore/internal/os/include_windows.h"
namespace tensorstore {
namespace internal_os {
#ifdef _WIN32
using FileDescriptor = HANDLE;
struct FileDescriptorTraits {
static FileDescriptor Invalid() { return ((FileDescriptor)-1); }
static void Close(FileDescriptor fd);
};
using FileInfo = ::BY_HANDLE_FILE_INFORMATION;
constexpr inline bool IsDirSeparator(char c) { return c == '\\' || c == '/'; }
#else
using FileDescriptor = int;
struct FileDescriptorTraits {
static FileDescriptor Invalid() { return -1; }
static void Close(FileDescriptor fd) { ::close(fd); }
};
typedef struct ::stat FileInfo;
constexpr inline bool IsDirSeparator(char c) { return c == '/'; }
#endif
inline constexpr std::string_view kLockSuffix = ".__lock";
using UniqueFileDescriptor = UniqueHandle<FileDescriptor, FileDescriptorTraits>;
Result<UniqueFileDescriptor> OpenExistingFileForReading(
const std::string& path);
Result<UniqueFileDescriptor> OpenFileForWriting(const std::string& path);
Result<ptrdiff_t> ReadFromFile(FileDescriptor fd, void* buf, size_t count,
int64_t offset);
Result<ptrdiff_t> WriteToFile(FileDescriptor fd, const void* buf, size_t count);
Result<ptrdiff_t> WriteCordToFile(FileDescriptor fd, absl::Cord value);
absl::Status TruncateFile(FileDescriptor fd);
absl::Status RenameOpenFile(FileDescriptor fd, const std::string& old_name,
const std::string& new_name);
absl::Status DeleteOpenFile(FileDescriptor fd, const std::string& path);
absl::Status DeleteFile(const std::string& path);
absl::Status FsyncFile(FileDescriptor fd);
using UnlockFn = void (*)(FileDescriptor fd);
Result<UnlockFn> AcquireFdLock(FileDescriptor fd);
absl::Status GetFileInfo(FileDescriptor fd, FileInfo* info);
absl::Status GetFileInfo(const std::string& path, FileInfo* info);
inline bool IsRegularFile(const FileInfo& info) {
#ifdef _WIN32
return !(info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
#else
return S_ISREG(info.st_mode);
#endif
}
inline bool IsDirectory(const FileInfo& info) {
#ifdef _WIN32
return (info.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY);
#else
return S_ISDIR(info.st_mode);
#endif
}
inline uint64_t GetSize(const FileInfo& info) {
#ifdef _WIN32
return (static_cast<int64_t>(info.nFileSizeHigh) << 32) +
static_cast<int64_t>(info.nFileSizeLow);
#else
return info.st_size;
#endif
}
inline auto GetDeviceId(const FileInfo& info) {
#ifdef _WIN32
return info.dwVolumeSerialNumber;
#else
return info.st_dev;
#endif
}
inline uint64_t GetFileId(const FileInfo& info) {
#ifdef _WIN32
return (static_cast<uint64_t>(info.nFileIndexHigh) << 32) |
static_cast<uint64_t>(info.nFileIndexLow);
#else
return info.st_ino;
#endif
}
inline absl::Time GetMTime(const FileInfo& info) {
#ifdef _WIN32
uint64_t windowsTicks =
(static_cast<uint64_t>(info.ftLastWriteTime.dwHighDateTime) << 32) |
static_cast<uint64_t>(info.ftLastWriteTime.dwLowDateTime);
return absl::UnixEpoch() +
absl::Seconds((windowsTicks / 10000000) - 11644473600ULL) +
absl::Nanoseconds(windowsTicks % 10000000);
#else
#if defined(__APPLE__)
const struct ::timespec t = info.st_mtimespec;
#else
const struct ::timespec t = info.st_mtim;
#endif
return absl::FromTimeT(t.tv_sec) + absl::Nanoseconds(t.tv_nsec);
#endif
}
Result<UniqueFileDescriptor> OpenDirectoryDescriptor(const std::string& path);
absl::Status MakeDirectory(const std::string& path);
absl::Status FsyncDirectory(FileDescriptor fd);
#ifdef _WIN32
Result<std::string> GetWindowsTempDir();
#endif
}
}
#endif | #include "tensorstore/internal/os/file_util.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::IsOk;
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::StatusIs;
using ::tensorstore::internal_os::DeleteFile;
using ::tensorstore::internal_os::DeleteOpenFile;
using ::tensorstore::internal_os::FileInfo;
using ::tensorstore::internal_os::FsyncFile;
using ::tensorstore::internal_os::GetDeviceId;
using ::tensorstore::internal_os::GetFileId;
using ::tensorstore::internal_os::GetFileInfo;
using ::tensorstore::internal_os::GetMTime;
using ::tensorstore::internal_os::GetSize;
using ::tensorstore::internal_os::IsDirSeparator;
using ::tensorstore::internal_os::IsRegularFile;
using ::tensorstore::internal_os::OpenExistingFileForReading;
using ::tensorstore::internal_os::OpenFileForWriting;
using ::tensorstore::internal_os::ReadFromFile;
using ::tensorstore::internal_os::RenameOpenFile;
using ::tensorstore::internal_os::TruncateFile;
using ::tensorstore::internal_os::WriteCordToFile;
using ::tensorstore::internal_os::WriteToFile;
TEST(FileUtilTest, Basics) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
std::string foo_txt = tempdir.path() + "/foo.txt";
std::string renamed_txt = tempdir.path() + "/renamed.txt";
EXPECT_TRUE(IsDirSeparator('/'));
auto now = absl::Now() - absl::Seconds(1);
{
auto f = OpenExistingFileForReading(foo_txt);
EXPECT_THAT(f, StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(DeleteFile(foo_txt), StatusIs(absl::StatusCode::kNotFound));
}
{
auto f = OpenFileForWriting(foo_txt);
EXPECT_THAT(f, IsOk());
EXPECT_THAT(TruncateFile(f->get()), IsOk());
EXPECT_THAT(WriteCordToFile(f->get(), absl::Cord("foo")), IsOkAndHolds(3));
EXPECT_THAT(WriteToFile(f->get(), "bar", 3), IsOkAndHolds(3));
EXPECT_THAT(FsyncFile(f->get()), IsOk());
}
{
char buf[16];
auto f = OpenExistingFileForReading(foo_txt);
EXPECT_THAT(f, IsOk());
EXPECT_THAT(ReadFromFile(f->get(), buf, 3, 0), IsOkAndHolds(3));
FileInfo info;
EXPECT_THAT(GetFileInfo(f->get(), &info), IsOk());
EXPECT_TRUE(IsRegularFile(info));
EXPECT_THAT(GetSize(info), 6);
EXPECT_TRUE(IsRegularFile(info));
EXPECT_THAT(GetFileId(info), ::testing::Ne(0));
EXPECT_THAT(GetDeviceId(info), ::testing::Ne(0));
EXPECT_THAT(GetMTime(info), ::testing::Ge(now));
EXPECT_THAT(RenameOpenFile(f->get(), foo_txt, renamed_txt), IsOk());
}
{
auto f = OpenExistingFileForReading(renamed_txt);
EXPECT_THAT(f, IsOk());
EXPECT_THAT(
TruncateFile(f->get()),
::testing::AnyOf(StatusIs(absl::StatusCode::kInvalidArgument),
StatusIs(absl::StatusCode::kPermissionDenied)));
}
{
std::string bar_txt = tempdir.path() + "/bar.txt";
auto f = OpenFileForWriting(bar_txt);
EXPECT_THAT(WriteToFile(f->get(), "bar", 3), IsOkAndHolds(3));
EXPECT_THAT(DeleteOpenFile(f->get(), bar_txt), IsOk());
}
}
TEST(FileUtilTest, LockFile) {
tensorstore::internal_testing::ScopedTemporaryDirectory tempdir;
std::string foo_txt = absl::StrCat(tempdir.path(), "/foo.txt",
tensorstore::internal_os::kLockSuffix);
auto f = OpenFileForWriting(foo_txt);
EXPECT_THAT(f, IsOk());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto lock, tensorstore::internal_os::AcquireFdLock(f->get()));
lock(f->get());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_util.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/os/file_util_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c4be7536-a004-445e-9ae3-3c93ab000533 | cpp | google/quiche | quic_poll_event_loop | quiche/quic/core/io/quic_poll_event_loop.cc | quiche/quic/core/io/quic_poll_event_loop_test.cc | #include "quiche/quic/core/io/quic_poll_event_loop.h"
#include <algorithm>
#include <cerrno>
#include <cmath>
#include <memory>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_alarm.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
namespace {
using PollMask = decltype(::pollfd().events);
PollMask GetPollMask(QuicSocketEventMask event_mask) {
return ((event_mask & kSocketEventReadable) ? POLLIN : 0) |
((event_mask & kSocketEventWritable) ? POLLOUT : 0) |
((event_mask & kSocketEventError) ? POLLERR : 0);
}
QuicSocketEventMask GetEventMask(PollMask poll_mask) {
return ((poll_mask & POLLIN) ? kSocketEventReadable : 0) |
((poll_mask & POLLOUT) ? kSocketEventWritable : 0) |
((poll_mask & POLLERR) ? kSocketEventError : 0);
}
}
QuicPollEventLoop::QuicPollEventLoop(QuicClock* clock) : clock_(clock) {}
bool QuicPollEventLoop::RegisterSocket(SocketFd fd, QuicSocketEventMask events,
QuicSocketEventListener* listener) {
auto [it, success] =
registrations_.insert({fd, std::make_shared<Registration>()});
if (!success) {
return false;
}
Registration& registration = *it->second;
registration.events = events;
registration.listener = listener;
return true;
}
bool QuicPollEventLoop::UnregisterSocket(SocketFd fd) {
return registrations_.erase(fd);
}
bool QuicPollEventLoop::RearmSocket(SocketFd fd, QuicSocketEventMask events) {
auto it = registrations_.find(fd);
if (it == registrations_.end()) {
return false;
}
it->second->events |= events;
return true;
}
bool QuicPollEventLoop::ArtificiallyNotifyEvent(SocketFd fd,
QuicSocketEventMask events) {
auto it = registrations_.find(fd);
if (it == registrations_.end()) {
return false;
}
has_artificial_events_pending_ = true;
it->second->artificially_notify_at_next_iteration |= events;
return true;
}
void QuicPollEventLoop::RunEventLoopOnce(QuicTime::Delta default_timeout) {
const QuicTime start_time = clock_->Now();
ProcessAlarmsUpTo(start_time);
QuicTime::Delta timeout = ComputePollTimeout(start_time, default_timeout);
ProcessIoEvents(start_time, timeout);
const QuicTime end_time = clock_->Now();
ProcessAlarmsUpTo(end_time);
}
QuicTime::Delta QuicPollEventLoop::ComputePollTimeout(
QuicTime now, QuicTime::Delta default_timeout) const {
default_timeout = std::max(default_timeout, QuicTime::Delta::Zero());
if (has_artificial_events_pending_) {
return QuicTime::Delta::Zero();
}
if (alarms_.empty()) {
return default_timeout;
}
QuicTime end_time = std::min(now + default_timeout, alarms_.begin()->first);
if (end_time < now) {
return QuicTime::Delta::Zero();
}
return end_time - now;
}
int QuicPollEventLoop::PollWithRetries(absl::Span<pollfd> fds,
QuicTime start_time,
QuicTime::Delta timeout) {
const QuicTime timeout_at = start_time + timeout;
int poll_result;
for (;;) {
float timeout_ms = std::ceil(timeout.ToMicroseconds() / 1000.f);
poll_result =
PollSyscall(fds.data(), fds.size(), static_cast<int>(timeout_ms));
bool done = poll_result > 0 || (poll_result < 0 && errno != EINTR);
if (done) {
break;
}
QuicTime now = clock_->Now();
if (now >= timeout_at) {
break;
}
timeout = timeout_at - now;
}
return poll_result;
}
void QuicPollEventLoop::ProcessIoEvents(QuicTime start_time,
QuicTime::Delta timeout) {
const size_t registration_count = registrations_.size();
auto pollfds = std::make_unique<pollfd[]>(registration_count);
size_t i = 0;
for (auto& [fd, registration] : registrations_) {
QUICHE_CHECK_LT(
i, registration_count);
pollfds[i].fd = fd;
pollfds[i].events = GetPollMask(registration->events);
pollfds[i].revents = 0;
++i;
}
int poll_result =
PollWithRetries(absl::Span<pollfd>(pollfds.get(), registration_count),
start_time, timeout);
if (poll_result == 0 && !has_artificial_events_pending_) {
return;
}
std::vector<ReadyListEntry> ready_list;
ready_list.reserve(registration_count);
for (i = 0; i < registration_count; i++) {
DispatchIoEvent(ready_list, pollfds[i].fd, pollfds[i].revents);
}
has_artificial_events_pending_ = false;
RunReadyCallbacks(ready_list);
}
void QuicPollEventLoop::DispatchIoEvent(std::vector<ReadyListEntry>& ready_list,
SocketFd fd, PollMask mask) {
auto it = registrations_.find(fd);
if (it == registrations_.end()) {
QUIC_BUG(poll returned an unregistered fd) << fd;
return;
}
Registration& registration = *it->second;
mask |= GetPollMask(registration.artificially_notify_at_next_iteration);
mask &= GetPollMask(registration.events |
registration.artificially_notify_at_next_iteration);
registration.artificially_notify_at_next_iteration = QuicSocketEventMask();
if (!mask) {
return;
}
ready_list.push_back(ReadyListEntry{fd, it->second, GetEventMask(mask)});
registration.events &= ~GetEventMask(mask);
}
void QuicPollEventLoop::RunReadyCallbacks(
std::vector<ReadyListEntry>& ready_list) {
for (ReadyListEntry& entry : ready_list) {
std::shared_ptr<Registration> registration = entry.registration.lock();
if (!registration) {
continue;
}
registration->listener->OnSocketEvent(this, entry.fd, entry.events);
}
ready_list.clear();
}
void QuicPollEventLoop::ProcessAlarmsUpTo(QuicTime time) {
std::vector<std::weak_ptr<Alarm*>> alarms_to_call;
while (!alarms_.empty() && alarms_.begin()->first <= time) {
auto& [deadline, schedule_handle_weak] = *alarms_.begin();
alarms_to_call.push_back(std::move(schedule_handle_weak));
alarms_.erase(alarms_.begin());
}
for (std::weak_ptr<Alarm*>& schedule_handle_weak : alarms_to_call) {
std::shared_ptr<Alarm*> schedule_handle = schedule_handle_weak.lock();
if (!schedule_handle) {
continue;
}
(*schedule_handle)->DoFire();
}
while (!alarms_.empty()) {
if (alarms_.begin()->second.expired()) {
alarms_.erase(alarms_.begin());
} else {
break;
}
}
}
QuicAlarm* QuicPollEventLoop::AlarmFactory::CreateAlarm(
QuicAlarm::Delegate* delegate) {
return new Alarm(loop_, QuicArenaScopedPtr<QuicAlarm::Delegate>(delegate));
}
QuicArenaScopedPtr<QuicAlarm> QuicPollEventLoop::AlarmFactory::CreateAlarm(
QuicArenaScopedPtr<QuicAlarm::Delegate> delegate,
QuicConnectionArena* arena) {
if (arena != nullptr) {
return arena->New<Alarm>(loop_, std::move(delegate));
}
return QuicArenaScopedPtr<QuicAlarm>(new Alarm(loop_, std::move(delegate)));
}
QuicPollEventLoop::Alarm::Alarm(
QuicPollEventLoop* loop, QuicArenaScopedPtr<QuicAlarm::Delegate> delegate)
: QuicAlarm(std::move(delegate)), loop_(loop) {}
void QuicPollEventLoop::Alarm::SetImpl() {
current_schedule_handle_ = std::make_shared<Alarm*>(this);
loop_->alarms_.insert({deadline(), current_schedule_handle_});
}
void QuicPollEventLoop::Alarm::CancelImpl() {
current_schedule_handle_.reset();
}
std::unique_ptr<QuicAlarmFactory> QuicPollEventLoop::CreateAlarmFactory() {
return std::make_unique<AlarmFactory>(this);
}
int QuicPollEventLoop::PollSyscall(pollfd* fds, size_t nfds, int timeout) {
#if defined(_WIN32)
return WSAPoll(fds, nfds, timeout);
#else
return ::poll(fds, nfds, timeout);
#endif
}
} | #include "quiche/quic/core/io/quic_poll_event_loop.h"
#include <fcntl.h>
#include <unistd.h>
#include <cerrno>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "quiche/quic/core/io/quic_event_loop.h"
#include "quiche/quic/core/quic_alarm.h"
#include "quiche/quic/core/quic_alarm_factory.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
class QuicPollEventLoopPeer {
public:
static QuicTime::Delta ComputePollTimeout(const QuicPollEventLoop& loop,
QuicTime now,
QuicTime::Delta default_timeout) {
return loop.ComputePollTimeout(now, default_timeout);
}
};
}
namespace quic::test {
namespace {
using testing::_;
using testing::AtMost;
using testing::ElementsAre;
constexpr QuicSocketEventMask kAllEvents =
kSocketEventReadable | kSocketEventWritable | kSocketEventError;
constexpr QuicTime::Delta kDefaultTimeout = QuicTime::Delta::FromSeconds(100);
class MockQuicSocketEventListener : public QuicSocketEventListener {
public:
MOCK_METHOD(void, OnSocketEvent,
(QuicEventLoop* , SocketFd ,
QuicSocketEventMask ),
(override));
};
class MockDelegate : public QuicAlarm::Delegate {
public:
QuicConnectionContext* GetConnectionContext() override { return nullptr; }
MOCK_METHOD(void, OnAlarm, (), (override));
};
class QuicPollEventLoopForTest : public QuicPollEventLoop {
public:
QuicPollEventLoopForTest(MockClock* clock)
: QuicPollEventLoop(clock), clock_(clock) {}
int PollSyscall(pollfd* fds, nfds_t nfds, int timeout) override {
timeouts_.push_back(timeout);
if (eintr_after_ != QuicTime::Delta::Infinite()) {
errno = EINTR;
clock_->AdvanceTime(eintr_after_);
eintr_after_ = QuicTime::Delta::Infinite();
return -1;
}
if (poll_return_after_ != QuicTime::Delta::Infinite()) {
clock_->AdvanceTime(poll_return_after_);
poll_return_after_ = QuicTime::Delta::Infinite();
} else {
clock_->AdvanceTime(QuicTime::Delta::FromMilliseconds(timeout));
}
return QuicPollEventLoop::PollSyscall(fds, nfds, timeout);
}
void TriggerEintrAfter(QuicTime::Delta time) { eintr_after_ = time; }
void ReturnFromPollAfter(QuicTime::Delta time) { poll_return_after_ = time; }
const std::vector<int>& timeouts() const { return timeouts_; }
private:
MockClock* clock_;
QuicTime::Delta eintr_after_ = QuicTime::Delta::Infinite();
QuicTime::Delta poll_return_after_ = QuicTime::Delta::Infinite();
std::vector<int> timeouts_;
};
class QuicPollEventLoopTest : public QuicTest {
public:
QuicPollEventLoopTest()
: loop_(&clock_), factory_(loop_.CreateAlarmFactory()) {
int fds[2];
int result = ::pipe(fds);
QUICHE_CHECK(result >= 0) << "Failed to create a pipe, errno: " << errno;
read_fd_ = fds[0];
write_fd_ = fds[1];
QUICHE_CHECK(::fcntl(read_fd_, F_SETFL,
::fcntl(read_fd_, F_GETFL) | O_NONBLOCK) == 0)
<< "Failed to mark pipe FD non-blocking, errno: " << errno;
QUICHE_CHECK(::fcntl(write_fd_, F_SETFL,
::fcntl(write_fd_, F_GETFL) | O_NONBLOCK) == 0)
<< "Failed to mark pipe FD non-blocking, errno: " << errno;
clock_.AdvanceTime(10 * kDefaultTimeout);
}
~QuicPollEventLoopTest() {
close(read_fd_);
close(write_fd_);
}
QuicTime::Delta ComputePollTimeout() {
return QuicPollEventLoopPeer::ComputePollTimeout(loop_, clock_.Now(),
kDefaultTimeout);
}
std::pair<std::unique_ptr<QuicAlarm>, MockDelegate*> CreateAlarm() {
auto delegate = std::make_unique<testing::StrictMock<MockDelegate>>();
MockDelegate* delegate_unowned = delegate.get();
auto alarm = absl::WrapUnique(factory_->CreateAlarm(delegate.release()));
return std::make_pair(std::move(alarm), delegate_unowned);
}
protected:
MockClock clock_;
QuicPollEventLoopForTest loop_;
std::unique_ptr<QuicAlarmFactory> factory_;
int read_fd_;
int write_fd_;
};
TEST_F(QuicPollEventLoopTest, NothingHappens) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
ASSERT_TRUE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
EXPECT_FALSE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
EXPECT_CALL(listener, OnSocketEvent(_, write_fd_, kSocketEventWritable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(4));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(5));
EXPECT_THAT(loop_.timeouts(), ElementsAre(4, 5));
}
TEST_F(QuicPollEventLoopTest, RearmWriter) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
EXPECT_CALL(listener, OnSocketEvent(_, write_fd_, kSocketEventWritable))
.Times(2);
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
ASSERT_TRUE(loop_.RearmSocket(write_fd_, kSocketEventWritable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicPollEventLoopTest, Readable) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
ASSERT_EQ(4, write(write_fd_, "test", 4));
EXPECT_CALL(listener, OnSocketEvent(_, read_fd_, kSocketEventReadable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicPollEventLoopTest, RearmReader) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
ASSERT_EQ(4, write(write_fd_, "test", 4));
EXPECT_CALL(listener, OnSocketEvent(_, read_fd_, kSocketEventReadable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicPollEventLoopTest, WriterUnblocked) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
EXPECT_CALL(listener, OnSocketEvent(_, write_fd_, kSocketEventWritable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
int io_result;
std::string data(2048, 'a');
do {
io_result = write(write_fd_, data.data(), data.size());
} while (io_result > 0);
ASSERT_EQ(errno, EAGAIN);
ASSERT_TRUE(loop_.RearmSocket(write_fd_, kSocketEventWritable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
EXPECT_CALL(listener, OnSocketEvent(_, write_fd_, kSocketEventWritable));
do {
io_result = read(read_fd_, data.data(), data.size());
} while (io_result > 0);
ASSERT_EQ(errno, EAGAIN);
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicPollEventLoopTest, ArtificialEvent) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
ASSERT_TRUE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
ASSERT_TRUE(loop_.ArtificiallyNotifyEvent(read_fd_, kSocketEventReadable));
EXPECT_EQ(ComputePollTimeout(), QuicTime::Delta::Zero());
{
testing::InSequence s;
EXPECT_CALL(listener, OnSocketEvent(_, read_fd_, kSocketEventReadable));
EXPECT_CALL(listener, OnSocketEvent(_, write_fd_, kSocketEventWritable));
}
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
}
TEST_F(QuicPollEventLoopTest, Unregister) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
ASSERT_TRUE(loop_.UnregisterSocket(write_fd_));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
EXPECT_FALSE(loop_.UnregisterSocket(write_fd_));
EXPECT_FALSE(loop_.RearmSocket(write_fd_, kSocketEventWritable));
EXPECT_FALSE(loop_.ArtificiallyNotifyEvent(write_fd_, kSocketEventWritable));
}
TEST_F(QuicPollEventLoopTest, UnregisterInsideEventHandler) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
ASSERT_TRUE(loop_.RegisterSocket(write_fd_, kAllEvents, &listener));
EXPECT_CALL(listener, OnSocketEvent(_, read_fd_, kSocketEventReadable))
.WillOnce([this]() { ASSERT_TRUE(loop_.UnregisterSocket(write_fd_)); });
EXPECT_CALL(listener, OnSocketEvent(_, write_fd_, kSocketEventWritable))
.Times(0);
ASSERT_TRUE(loop_.ArtificiallyNotifyEvent(read_fd_, kSocketEventReadable));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(1));
}
TEST_F(QuicPollEventLoopTest, EintrHandler) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
loop_.TriggerEintrAfter(QuicTime::Delta::FromMilliseconds(25));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(100));
EXPECT_THAT(loop_.timeouts(), ElementsAre(100, 75));
}
TEST_F(QuicPollEventLoopTest, PollReturnsEarly) {
testing::StrictMock<MockQuicSocketEventListener> listener;
ASSERT_TRUE(loop_.RegisterSocket(read_fd_, kAllEvents, &listener));
loop_.ReturnFromPollAfter(QuicTime::Delta::FromMilliseconds(25));
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(100));
EXPECT_THAT(loop_.timeouts(), ElementsAre(100, 75));
}
TEST_F(QuicPollEventLoopTest, AlarmInFuture) {
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
constexpr auto kAlarmTimeout = QuicTime::Delta::FromMilliseconds(5);
auto [alarm, delegate] = CreateAlarm();
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
alarm->Set(clock_.Now() + kAlarmTimeout);
EXPECT_EQ(ComputePollTimeout(), kAlarmTimeout);
EXPECT_CALL(*delegate, OnAlarm());
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(100));
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
}
TEST_F(QuicPollEventLoopTest, AlarmsInPast) {
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
constexpr auto kAlarmTimeout = QuicTime::Delta::FromMilliseconds(5);
auto [alarm1, delegate1] = CreateAlarm();
auto [alarm2, delegate2] = CreateAlarm();
alarm1->Set(clock_.Now() - 2 * kAlarmTimeout);
alarm2->Set(clock_.Now() - kAlarmTimeout);
{
testing::InSequence s;
EXPECT_CALL(*delegate1, OnAlarm());
EXPECT_CALL(*delegate2, OnAlarm());
}
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(100));
}
TEST_F(QuicPollEventLoopTest, AlarmCancelled) {
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
constexpr auto kAlarmTimeout = QuicTime::Delta::FromMilliseconds(5);
auto [alarm, delegate] = CreateAlarm();
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
alarm->Set(clock_.Now() + kAlarmTimeout);
alarm->Cancel();
alarm->Set(clock_.Now() + 2 * kAlarmTimeout);
EXPECT_EQ(ComputePollTimeout(), kAlarmTimeout);
EXPECT_CALL(*delegate, OnAlarm());
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(100));
EXPECT_THAT(loop_.timeouts(), ElementsAre(10));
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
}
TEST_F(QuicPollEventLoopTest, AlarmCancelsAnotherAlarm) {
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
constexpr auto kAlarmTimeout = QuicTime::Delta::FromMilliseconds(5);
auto [alarm1_ptr, delegate1] = CreateAlarm();
auto [alarm2_ptr, delegate2] = CreateAlarm();
QuicAlarm& alarm1 = *alarm1_ptr;
QuicAlarm& alarm2 = *alarm2_ptr;
alarm1.Set(clock_.Now() - kAlarmTimeout);
alarm2.Set(clock_.Now() - kAlarmTimeout);
int alarms_called = 0;
EXPECT_CALL(*delegate1, OnAlarm()).Times(AtMost(1)).WillOnce([&]() {
alarm2.Cancel();
++alarms_called;
});
EXPECT_CALL(*delegate2, OnAlarm()).Times(AtMost(1)).WillOnce([&]() {
alarm1.Cancel();
++alarms_called;
});
loop_.RunEventLoopOnce(QuicTime::Delta::FromMilliseconds(100));
EXPECT_EQ(alarms_called, 1);
EXPECT_EQ(ComputePollTimeout(), kDefaultTimeout);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/io/quic_poll_event_loop.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/io/quic_poll_event_loop_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7d8d55ae-5c41-44a2-905e-07d95bfb1b0c | cpp | tensorflow/tensorflow | writer_lib | tensorflow/lite/tools/serialization/writer_lib.cc | tensorflow/lite/tools/serialization/writer_lib_test.cc | #include "tensorflow/lite/tools/serialization/writer_lib.h"
#include <cstdlib>
#include <cstring>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "flatbuffers/base.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/string.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/interpreter.h"
#if FLATBUFFERS_LITTLEENDIAN == 0
#include "tensorflow/lite/core/model_builder.h"
#endif
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/tools/serialization/enum_mapping.h"
#include "tensorflow/lite/tools/versioning/op_version.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
CreateOpCodeTableImpl(flatbuffers::FlatBufferBuilder* fbb,
std::vector<OpCode>* opcodes) {
std::vector<flatbuffers::Offset<OperatorCode>> codes;
for (const auto& it : *opcodes) {
const char* custom_name = it.custom.empty() ? nullptr : it.custom.c_str();
int32_t op_version = it.builtin != tflite::BuiltinOperator_CUSTOM ? 0 : 1;
codes.push_back(
CreateOperatorCodeDirect(*fbb, static_cast<BuiltinOperator>(it.builtin),
custom_name, op_version));
}
return fbb->template CreateVector<flatbuffers::Offset<OperatorCode>>(codes);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
ExportBuffersImpl(flatbuffers::FlatBufferBuilder* fbb,
std::vector<std::pair<const uint8_t*, size_t>>* buffers) {
std::vector<flatbuffers::Offset<Buffer>> buffer_vector;
for (auto buffer : *buffers) {
auto data_offset = fbb->CreateVector(buffer.first, buffer.second);
buffer_vector.push_back(CreateBuffer(*fbb, data_offset));
}
return fbb->template CreateVector<flatbuffers::Offset<Buffer>>(buffer_vector);
}
TfLiteStatus WriteImpl(const std::string& filename, void* data, size_t size) {
FILE* fp = fopen(filename.c_str(), "wb");
if (!fp) return kTfLiteError;
#if FLATBUFFERS_LITTLEENDIAN == 0
const tflite::Model* input_model = tflite::GetModel(data);
tflite::FlatBufferModel::ByteSwapTFLiteModel(input_model);
#endif
const int result_size = fwrite(data, 1, size, fp);
fclose(fp);
if (result_size != size) return kTfLiteError;
return kTfLiteOk;
}
std::pair<BuiltinOptions, flatbuffers::Offset<void>> CreateBuiltinUnion(
flatbuffers::FlatBufferBuilder* fbb, enum BuiltinOperator op,
void* builtin_op_data, int node_inputs_size) {
switch (op) {
#include "tensorflow/lite/tools/serialization/option_writer_generated.h"
}
return std::make_pair(BuiltinOptions_NONE, flatbuffers::Offset<void>());
}
}
template <class T_OUTPUT, class T_INPUT>
flatbuffers::Offset<flatbuffers::Vector<T_OUTPUT>> SubgraphWriter::ExportVector(
flatbuffers::FlatBufferBuilder* fbb, const T_INPUT& v) {
std::vector<T_OUTPUT> inputs(v.begin(), v.end());
return fbb->template CreateVector<T_OUTPUT>(inputs);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Operator>>>
SubgraphWriter::ExportOperators(flatbuffers::FlatBufferBuilder* fbb) {
std::vector<flatbuffers::Offset<Operator>> operators;
std::vector<int> operator_to_opcode;
operator_to_opcode.resize(subgraph_->nodes_size(), -1);
for (int op_index : execution_plan_) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteRegistration* registration = &node_and_registration->second;
if (!registration->custom_name) {
operator_to_opcode[op_index] =
GetOpCodeForBuiltin(registration->builtin_code);
} else {
operator_to_opcode[op_index] =
GetOpCodeForCustom(registration->custom_name);
}
}
for (int op_index : execution_plan_) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteNode& node = node_and_registration->first;
const TfLiteRegistration& registration = node_and_registration->second;
flatbuffers::Offset<void> builtin_options;
BuiltinOptions builtin_options_type = BuiltinOptions_NONE;
auto custom_options_format = CustomOptionsFormat_FLEXBUFFERS;
flatbuffers::Offset<flatbuffers::Vector<uint8_t>> custom_options = 0;
if (!registration.custom_name) {
auto builtin_options_and_type = CreateBuiltinUnion(
fbb, static_cast<enum BuiltinOperator>(registration.builtin_code),
node.builtin_data, node.inputs->size);
builtin_options = builtin_options_and_type.second;
builtin_options_type = builtin_options_and_type.first;
} else {
auto custom_writer = custom_op_to_writer_.find(registration.custom_name);
if (custom_writer != custom_op_to_writer_.end() &&
custom_writer->second) {
custom_writer->second(fbb, subgraph_, op_index, &custom_options,
&custom_options_format);
} else {
custom_options = fbb->CreateVector(
reinterpret_cast<const uint8_t*>(node.custom_initial_data),
node.custom_initial_data_size);
}
}
int opcode_index = operator_to_opcode[op_index];
std::vector<int> written_inputs =
RemapTensorIndicesToWritten(TfLiteIntArrayView(node.inputs));
std::vector<int> written_outputs =
RemapTensorIndicesToWritten(TfLiteIntArrayView(node.outputs));
auto inputs = ExportVector<int32_t>(fbb, written_inputs);
auto outputs = ExportVector<int32_t>(fbb, written_outputs);
operators.push_back(CreateOperator(*fbb, opcode_index, inputs, outputs,
builtin_options_type, builtin_options,
custom_options, custom_options_format));
}
return fbb->template CreateVector<flatbuffers::Offset<Operator>>(operators);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Tensor>>>
SubgraphWriter::ExportTensors(flatbuffers::FlatBufferBuilder* fbb) {
tensor_to_written_tensor_.resize(subgraph_->tensors_size(), -1);
std::vector<flatbuffers::Offset<Tensor>> tensors;
std::vector<bool> tensor_is_temporary(subgraph_->tensors_size(), false);
for (int op_index = 0; op_index < subgraph_->nodes_size(); ++op_index) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
for (auto tensor_index :
TfLiteIntArrayView(node_and_registration->first.temporaries))
tensor_is_temporary[tensor_index] = true;
}
int curr_output_index = 0;
for (int tensor_index = 0; tensor_index < subgraph_->tensors_size();
tensor_index++) {
if (!tensor_is_temporary[tensor_index] &&
unused_tensors_.find(tensor_index) == unused_tensors_.end()) {
tensor_to_written_tensor_[tensor_index] = curr_output_index++;
}
}
for (int tensor_index = 0; tensor_index < subgraph_->tensors_size();
++tensor_index) {
if (tensor_to_written_tensor_[tensor_index] == -1) continue;
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
int buffer_index = 0;
if (tensor->allocation_type == kTfLiteMmapRo) {
buffer_index = buffers_->size();
buffers_->push_back(std::make_pair(
reinterpret_cast<const uint8_t*>(tensor->data.raw), tensor->bytes));
}
TensorType type = TfLiteTypeToSchemaType(tensor->type);
flatbuffers::Offset<QuantizationParameters> quantization_params;
const flatbuffers::Offset<flatbuffers::Vector<float>> null_array;
flatbuffers::Offset<flatbuffers::Vector<float>> scale_array;
flatbuffers::Offset<flatbuffers::Vector<int64_t>> zero_point_array;
if (tensor->quantization.type == kTfLiteAffineQuantization) {
if (tensor->params.scale != 0.f) {
scale_array = fbb->CreateVector<float>({tensor->params.scale});
zero_point_array =
fbb->CreateVector<int64_t>({tensor->params.zero_point});
quantization_params = CreateQuantizationParameters(
*fbb, null_array, null_array, scale_array, zero_point_array);
} else {
const TfLiteAffineQuantization* params =
reinterpret_cast<TfLiteAffineQuantization*>(
tensor->quantization.params);
const size_t num_scales = params->scale->size;
std::vector<float> scale_vector(params->scale->data,
params->scale->data + num_scales);
std::vector<int64_t> zero_point_vector(
params->zero_point->data, params->zero_point->data + num_scales);
scale_array = fbb->CreateVector<float>(scale_vector);
zero_point_array = fbb->CreateVector<int64_t>(zero_point_vector);
quantization_params = CreateQuantizationParameters(
*fbb, null_array, null_array, scale_array, zero_point_array,
QuantizationDetails_NONE, 0, params->quantized_dimension);
}
}
if (tensor->dims) {
TfLiteIntArrayView shape_view(tensor->dims);
std::vector<int> shape =
std::vector<int>(shape_view.begin(), shape_view.end());
Offset<flatbuffers::String> tensor_name_offset = 0;
if (tensor->name != nullptr) {
tensor_name_offset = fbb->CreateString(tensor->name);
}
flatbuffers::Offset<flatbuffers::Vector<int32_t>>
shape_signature_offset = 0;
if (serialize_dims_signature_ && tensor->dims_signature != nullptr) {
TfLiteIntArrayView shape_signature_view(tensor->dims_signature);
std::vector<int32_t> shape_signature(shape_signature_view.begin(),
shape_signature_view.end());
shape_signature_offset = ExportVector<int32_t>(fbb, shape_signature);
}
bool has_rank = true;
tensors.push_back(CreateTensor(
*fbb, ExportVector<int32_t>(fbb, shape), type, buffer_index,
tensor_name_offset, quantization_params, tensor->is_variable,
0, shape_signature_offset, has_rank));
}
}
}
return fbb->template CreateVector<flatbuffers::Offset<Tensor>>(tensors);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
SubgraphWriter::ExportBuffers(flatbuffers::FlatBufferBuilder* fbb) {
return ExportBuffersImpl(fbb, buffers_);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
SubgraphWriter::CreateOpCodeTable(flatbuffers::FlatBufferBuilder* fbb) {
return CreateOpCodeTableImpl(fbb, opcodes_);
}
template <class T>
std::vector<int> SubgraphWriter::RemapTensorIndicesToWritten(const T& input) {
std::vector<int> output;
output.reserve(input.size());
for (int x : input) {
if (x == -1) {
output.push_back(x);
continue;
}
if (tensor_to_written_tensor_[x] != -1) {
output.push_back(tensor_to_written_tensor_[x]);
}
}
return output;
}
TfLiteStatus SubgraphWriter::GetBuffer(std::unique_ptr<uint8_t[]>* out,
size_t* size) {
if (!out || !size) return kTfLiteError;
flatbuffers::FlatBufferBuilder builder(10240);
std::vector<flatbuffers::Offset<SubGraph>> subgraphs_as_vector;
subgraphs_as_vector.push_back(
PopulateAndGetOffset(&builder, subgraph_->GetName()));
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
buffers = ExportBuffers(&builder);
auto description = builder.CreateString("Exported from Subgraph.");
auto op_codes = CreateOpCodeTable(&builder);
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, op_codes,
builder.CreateVector(subgraphs_as_vector),
description, buffers);
::tflite::FinishModelBuffer(builder, model);
::tflite::UpdateOpVersion(builder.GetBufferPointer());
const uint8_t* buffer = builder.GetBufferPointer();
*size = builder.GetSize();
(*out).reset(new uint8_t[*size]);
memcpy(out->get(), buffer, *size);
return kTfLiteOk;
}
flatbuffers::Offset<SubGraph> SubgraphWriter::PopulateAndGetOffset(
flatbuffers::FlatBufferBuilder* builder, const std::string& subgraph_name) {
auto tensors = ExportTensors(builder);
std::vector<int> written_inputs = RemapTensorIndicesToWritten(inputs_);
std::vector<int> written_outputs = RemapTensorIndicesToWritten(outputs_);
auto inputs = ExportVector<int32_t>(builder, written_inputs);
auto outputs = ExportVector<int32_t>(builder, written_outputs);
auto ops = ExportOperators(builder);
auto name = builder->CreateString(subgraph_name);
return CreateSubGraph(*builder, tensors, inputs, outputs, ops, name);
}
TfLiteStatus SubgraphWriter::Write(const std::string& filename) {
std::unique_ptr<uint8_t[]> buffer;
size_t size;
TF_LITE_ENSURE_STATUS(GetBuffer(&buffer, &size));
return WriteImpl(filename, buffer.get(), size);
}
TfLiteStatus SubgraphWriter::RegisterCustomWriter(
const std::string& custom_name, CustomWriter custom_writer) {
if (custom_op_to_writer_.find(custom_name) != custom_op_to_writer_.end()) {
return kTfLiteError;
}
custom_op_to_writer_.insert(std::make_pair(custom_name, custom_writer));
return kTfLiteOk;
}
TfLiteStatus SubgraphWriter::CheckInputOutput(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& execution_plan) {
absl::flat_hash_set<int> known_tensors(inputs.begin(), inputs.end());
known_tensors.insert(subgraph_->variables().begin(),
subgraph_->variables().end());
for (int op_index : execution_plan) {
const auto* node_and_registration =
subgraph_->node_and_registration(op_index);
const TfLiteNode& node = node_and_registration->first;
for (int tensor_index : TfLiteIntArrayView(node.inputs)) {
if (tensor_index < 0) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
} else {
return kTfLiteError;
}
}
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
if (tensor->allocation_type == kTfLiteMmapRo) {
continue;
}
}
if (known_tensors.find(tensor_index) == known_tensors.end()) {
subgraph_->context()->ReportError(
subgraph_->context(),
"Node (%d) uses an input (%d) that is not provided.", op_index,
tensor_index);
return kTfLiteError;
}
}
TfLiteIntArrayView outputs(node.outputs);
known_tensors.insert(outputs.begin(), outputs.end());
}
for (int tensor_index : outputs) {
if (TfLiteTensor* tensor = subgraph_->tensor(tensor_index)) {
if (tensor->allocation_type == kTfLiteMmapRo) {
continue;
}
}
if (known_tensors.find(tensor_index) == known_tensors.end()) {
subgraph_->context()->ReportError(
subgraph_->context(),
"Output (%d) is not produced by the execution plan.", tensor_index);
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteStatus SubgraphWriter::SetCustomInputOutput(
const std::vector<int>& inputs, const std::vector<int>& outputs,
const std::vector<int>& execution_plan) {
TF_LITE_ENSURE_STATUS(CheckInputOutput(inputs, outputs, execution_plan));
inputs_ = inputs;
outputs_ = outputs;
execution_plan_ = execution_plan;
return kTfLiteOk;
}
ModelWriter::ModelWriter(Interpreter* interpreter,
bool serialize_dims_signature) {
std::vector<Subgraph*> subgraphs;
subgraphs.reserve(interpreter->subgraphs_size());
for (int i = 0; i < interpreter->subgraphs_size(); ++i) {
subgraphs.push_back(interpreter->subgraph(i));
}
Init(subgraphs, serialize_dims_signature);
}
ModelWriter::ModelWriter(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature) {
Init(subgraphs, serialize_dims_signature);
}
void ModelWriter::Init(const std::vector<Subgraph*>& subgraphs,
bool serialize_dims_signature) {
buffers_.push_back(std::make_pair(nullptr, 0));
subgraph_writers_.reserve(subgraphs.size());
for (auto* subgraph : subgraphs) {
SubgraphWriter writer(subgraph, &buffers_, &opcodes_,
&builtin_op_to_opcode_, serialize_dims_signature);
subgraph_writers_.push_back(writer);
}
if (!subgraphs.empty()) {
absl::flat_hash_map<Subgraph*, int> subgraph_to_new_subgraph_index;
for (int i = 0; i < subgraphs.size(); ++i) {
subgraph_to_new_subgraph_index[subgraphs[i]] = i;
}
auto* all_subgraphs = subgraphs[0]->GetSubgraphs();
for (int i = 0; i < all_subgraphs->size(); ++i) {
auto it = subgraph_to_new_subgraph_index.find(all_subgraphs->at(i));
if (it != subgraph_to_new_subgraph_index.end()) {
subgraph_index_mapper_[i] = it->second;
}
}
}
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
ModelWriter::ExportBuffers(flatbuffers::FlatBufferBuilder* fbb) {
return ExportBuffersImpl(fbb, &buffers_);
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<OperatorCode>>>
ModelWriter::CreateOpCodeTable(flatbuffers::FlatBufferBuilder* fbb) {
return CreateOpCodeTableImpl(fbb, &opcodes_);
}
TfLiteStatus ModelWriter::GetBuffer(std::unique_ptr<uint8_t[]>* out,
size_t* size) {
if (!out || !size) return kTfLiteError;
flatbuffers::FlatBufferBuilder builder(10240);
std::vector<flatbuffers::Offset<SubGraph>> subgraphs_as_vector;
subgraphs_as_vector.reserve(subgraph_writers_.size());
for (auto& subgraph_writer : subgraph_writers_) {
subgraphs_as_vector.push_back(subgraph_writer.PopulateAndGetOffset(
&builder, subgraph_writer.subgraph_->GetName()));
}
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>
buffers = ExportBuffers(&builder);
auto description = builder.CreateString("Exported from Subgraph.");
auto op_codes = CreateOpCodeTable(&builder);
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, op_codes,
builder.CreateVector(subgraphs_as_vector),
description, buffers);
::tflite::FinishModelBuffer(builder, model);
::tflite::UpdateOpVersion(builder.GetBufferPointer());
UpdateSubgraphReferences(&builder);
const uint8_t* buffer = builder.GetBufferPointer();
*size = builder.GetSize();
(*out).reset(new uint8_t[*size]);
memcpy(out->get(), buffer, *size);
return kTfLiteOk;
}
TfLiteStatus ModelWriter::Write(const std::string& filename) {
std::unique_ptr<uint8_t[]> buffer;
size_t size;
TF_LITE_ENSURE_STATUS(GetBuffer(&buffer, &size));
return WriteImpl(filename, buffer.get(), size);
}
void ModelWriter::SetUnusedTensors(int subgraph_index,
const std::set<int>& unused_tensors) {
subgraph_writers_[subgraph_index].SetUnusedTensors(unused_tensors);
}
TfLiteStatus ModelWriter::SetCustomInputOutput(
int subgraph_index, const std::vector<int>& inputs,
const std::vector<int>& outputs, const std::vector<int>& execution_plan) {
return subgraph_writers_[subgraph_index].SetCustomInputOutput(inputs, outputs,
execution_plan);
}
TfLiteStatus ModelWriter::RegisterCustomWriter(const std::string& custom_name,
CustomWriter custom_writer) {
for (auto& subgraph_writer : subgraph_writers_) {
subgraph_writer.RegisterCustomWriter(custom_name, custom_writer);
}
return kTfLiteOk;
}
TfLiteStatus ModelWriter::UpdateSubgraphReferences(
flatbuffers::FlatBufferBuilder* fbb) {
auto model = tflite::GetMutableModel(fbb->GetBufferPointer());
for (SubGraph* subgraph : *model->mutable_subgraphs()) {
for (Operator* op : *subgraph->mutable_operators()) {
if (op->builtin_options_type() == BuiltinOptions_WhileOptions) {
auto while_options =
static_cast<tflite::WhileOptions*>(op->mutable_builtin_options());
auto new_cond_index =
subgraph_index_mapper_.find(while_options->cond_subgraph_index());
auto new_body_index =
subgraph_index_mapper_.find(while_options->body_subgraph_index());
if (new_cond_index == subgraph_index_mapper_.end() ||
new_body_index == subgraph_index_mapper_.end()) {
return kTfLiteError;
}
while_options->mutate_cond_subgraph_index(new_cond_index->second);
while_options->mutate_body_subgraph_index(new_body_index->second);
} else if (op->builtin_options_type() == BuiltinOptions_IfOptions) {
auto if_options =
static_cast<tflite::IfOptions*>(op->mutable_builtin_options());
auto new_then_index =
subgraph_index_mapper_.find(if_options->then_subgraph_index());
auto new_else_index =
subgraph_index_mapper_.find(if_options->else_subgraph_index());
if (new_then_index == subgraph_index_mapper_.end() ||
new_else_index == subgraph_index_mapper_.end()) {
return kTfLiteError;
}
if_options->mutate_then_subgraph_index(new_then_index->second);
if_options->mutate_else_subgraph_index(new_else_index->second);
}
}
}
return kTfLiteOk;
}
} | #include "tensorflow/lite/tools/serialization/writer_lib.h"
#include <cstdlib>
#include <fstream>
#include <memory>
#include <numeric>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/compiler/mlir/lite/schema/mutable/schema_generated.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
#include "tsl/platform/logging.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::FillIntTensor;
std::string CreateFilePath(const std::string& file_name) {
const char* tmp_dir = getenv("TEST_TMPDIR");
return std::string(tmp_dir ? tmp_dir : "./") + file_name;
}
class SingleSubgraphTest : public ::testing::TestWithParam<bool> {
protected:
void WriteToFile(Interpreter* interpreter, const std::string& filename,
bool use_subgraph_writer) {
if (use_subgraph_writer) {
SubgraphWriter writer(&interpreter->primary_subgraph());
CHECK_EQ(writer.Write(filename), kTfLiteOk);
} else {
ModelWriter writer(interpreter);
CHECK_EQ(writer.Write(filename), kTfLiteOk);
}
}
};
TEST_P(SingleSubgraphTest, InvalidDestinations) {
Interpreter interpreter;
interpreter.AddTensors(3);
float foo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<char*>(foo), sizeof(foo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
if (GetParam()) {
SubgraphWriter writer(&interpreter.primary_subgraph());
CHECK_EQ(writer.Write(""), kTfLiteError);
} else {
ModelWriter writer(&interpreter);
CHECK_EQ(writer.Write(""), kTfLiteError);
}
size_t size;
if (GetParam()) {
SubgraphWriter writer(&interpreter.primary_subgraph());
CHECK_EQ(writer.GetBuffer(nullptr, &size), kTfLiteError);
} else {
ModelWriter writer(&interpreter);
CHECK_EQ(writer.GetBuffer(nullptr, &size), kTfLiteError);
}
}
TEST_P(SingleSubgraphTest, FloatModelTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
float foo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<char*>(foo), sizeof(foo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_float.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputTest) {
Interpreter interpreter;
interpreter.AddTensors(4);
constexpr float kFoo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<const char*>(kFoo), sizeof(kFoo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "d", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({3});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const TfLiteRegistration* reg2 = resolver.FindOp(BuiltinOperator_RELU, 1);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, reg2);
const std::string test_file = CreateFilePath("test_custom.tflite");
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({2}, {3},
{1}),
kTfLiteOk);
writer.SetUnusedTensors({0, 1});
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputErrorCasesTest) {
Interpreter interpreter;
interpreter.AddTensors(5);
constexpr float kFoo[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadOnly(
1, kTfLiteFloat32, "b", {3}, TfLiteQuantization(),
reinterpret_cast<const char*>(kFoo), sizeof(kFoo));
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(3, kTfLiteFloat32, "d", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(4, kTfLiteFloat32, "e", {3},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({4});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const TfLiteRegistration* reg2 = resolver.FindOp(BuiltinOperator_RELU, 1);
interpreter.AddNodeWithParameters({2}, {3}, nullptr, 0, nullptr, reg2);
const TfLiteRegistration* reg3 = resolver.FindOp(BuiltinOperator_RELU6, 1);
interpreter.AddNodeWithParameters({3}, {4}, nullptr, 0, nullptr, reg3);
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({2}, {3},
{0, 1}),
kTfLiteError);
EXPECT_EQ(writer.SetCustomInputOutput({0, 1}, {4},
{0, 1}),
kTfLiteError);
EXPECT_EQ(writer.SetCustomInputOutput({0, 1}, {3},
{0, 1}),
kTfLiteOk);
}
TEST_P(SingleSubgraphTest, CustomInputOutputVariableTensorTest) {
Interpreter interpreter;
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {3},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(1, kTfLiteFloat32, "b", {3},
TfLiteQuantization(),
true);
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {3},
TfLiteQuantization());
interpreter.SetInputs({0});
interpreter.SetOutputs({2});
interpreter.SetVariables({1});
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
interpreter.AddNodeWithParameters({0, 1}, {2}, nullptr, 0,
reinterpret_cast<void*>(builtin_data),
resolver.FindOp(BuiltinOperator_ADD, 1));
const std::string test_file = CreateFilePath("test_variables.tflite");
SubgraphWriter writer(&interpreter.primary_subgraph());
EXPECT_EQ(writer.SetCustomInputOutput({0}, {2},
{0}),
kTfLiteOk);
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, PerTensorQuantizedModelTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(
0, kTfLiteUInt8, "a", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetTensorParametersReadWrite(
1, kTfLiteUInt8, "b", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetTensorParametersReadWrite(
2, kTfLiteUInt8, "c", {3}, TfLiteQuantizationParams({1 / 256., 128}));
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_uint8.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
TEST_P(SingleSubgraphTest, OpVersioningTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32, "a", {1, 4},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(1, kTfLiteInt32, "b", {2},
TfLiteQuantization());
interpreter.SetTensorParametersReadWrite(2, kTfLiteFloat32, "c", {4, 4},
TfLiteQuantization());
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
const TfLiteRegistration* reg =
resolver.FindOp(BuiltinOperator_BROADCAST_TO, 2);
interpreter.AddNodeWithParameters({0, 1}, {2},
nullptr, 0,
nullptr, reg);
const std::string test_file = CreateFilePath("test_float.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
ASSERT_EQ(new_interpreter->nodes_size(), 1);
TfLiteRegistration output_reg =
new_interpreter->node_and_registration(0)->second;
ASSERT_EQ(output_reg.builtin_code, BuiltinOperator_BROADCAST_TO);
CHECK_EQ(output_reg.version, 2);
}
TEST_P(SingleSubgraphTest, DynamicShapeTest) {
Interpreter interpreter;
interpreter.AddTensors(3);
std::vector<int> dims = {1, 3};
std::vector<int> dims_signature = {-1, 3};
interpreter.SetTensorParametersReadWrite(
0, kTfLiteFloat32, "a", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetTensorParametersReadWrite(
1, kTfLiteFloat32, "b", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetTensorParametersReadWrite(
2, kTfLiteFloat32, "c", dims, TfLiteQuantizationParams{1.0, 0},
false, &dims_signature);
interpreter.SetInputs({0, 1});
interpreter.SetOutputs({2});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteAddParams* builtin_data =
reinterpret_cast<TfLiteAddParams*>(malloc(sizeof(TfLiteAddParams)));
builtin_data->activation = kTfLiteActNone;
builtin_data->pot_scale_int16 = false;
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_ADD, 1);
interpreter.AddNodeWithParameters({0, 1}, {2}, initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
const std::string test_file = CreateFilePath("test_dynamic_shape.tflite");
WriteToFile(&interpreter, test_file, GetParam());
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
CHECK_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
TfLiteTensor* tensor0 = new_interpreter->tensor(0);
CHECK_NOTNULL(tensor0->dims_signature);
TfLiteIntArrayView shape_view(tensor0->dims_signature);
CHECK_EQ(shape_view.size(), 2);
CHECK_EQ(shape_view[0], -1);
}
INSTANTIATE_TEST_SUITE_P(Writer, SingleSubgraphTest, ::testing::Bool());
struct ReshapeTestPattern {
int num_inputs;
bool is_param_valid;
bool has_buggy_non_flatten_shape;
};
class ReshapeLayerTest : public ::testing::TestWithParam<ReshapeTestPattern> {};
TEST_P(ReshapeLayerTest, ReshapeLayerTest) {
const auto param = GetParam();
Interpreter interpreter;
const int total_tensors = param.num_inputs + 1;
interpreter.AddTensors(total_tensors);
int output_shape[] = {1, 2, 3};
interpreter.SetTensorParametersReadWrite(0, kTfLiteFloat32,
"a", {6},
TfLiteQuantization());
ASSERT_LE(param.num_inputs, 2);
if (param.num_inputs == 2) {
if (param.has_buggy_non_flatten_shape) {
interpreter.SetTensorParametersReadOnly(
1, kTfLiteInt32, "b", {3, 1},
TfLiteQuantization(), reinterpret_cast<char*>(output_shape),
sizeof(output_shape));
} else {
interpreter.SetTensorParametersReadOnly(
1, kTfLiteInt32, "b", {3},
TfLiteQuantization(), reinterpret_cast<char*>(output_shape),
sizeof(output_shape));
}
}
interpreter.SetTensorParametersReadWrite(total_tensors - 1,
kTfLiteFloat32, "c",
{3}, TfLiteQuantization());
std::vector<int> input_tensors(param.num_inputs);
std::iota(input_tensors.begin(), input_tensors.end(), 0);
interpreter.SetInputs(input_tensors);
interpreter.SetOutputs({total_tensors - 1});
const char* initial_data = "";
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
TfLiteReshapeParams* builtin_data = reinterpret_cast<TfLiteReshapeParams*>(
malloc(sizeof(TfLiteReshapeParams)));
memset(builtin_data, 0, sizeof(TfLiteReshapeParams));
if (param.is_param_valid) {
builtin_data->num_dimensions = 3;
for (int dim = 0; dim < builtin_data->num_dimensions; ++dim) {
builtin_data->shape[dim] = output_shape[dim];
}
}
const TfLiteRegistration* reg = resolver.FindOp(BuiltinOperator_RESHAPE, 1);
interpreter.AddNodeWithParameters(input_tensors,
{total_tensors - 1},
initial_data, 0,
reinterpret_cast<void*>(builtin_data), reg);
SubgraphWriter writer(&interpreter.primary_subgraph());
std::stringstream ss;
ss << CreateFilePath("test_reshape_") << param.num_inputs
<< param.is_param_valid << ".tflite";
std::string filename = ss.str();
writer.Write(filename);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(filename.c_str());
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
}
INSTANTIATE_TEST_SUITE_P(
Writer, ReshapeLayerTest,
::testing::Values(ReshapeTestPattern{2,
true,
false},
ReshapeTestPattern{2,
false,
false},
ReshapeTestPattern{1,
true,
false},
ReshapeTestPattern{2,
true,
true}),
[](const ::testing::TestParamInfo<ReshapeLayerTest::ParamType>& info) {
std::stringstream ss;
ss << "num_inputs_" << info.param.num_inputs << "_valid_param_"
<< info.param.is_param_valid << "_buggy_shape_"
<< info.param.has_buggy_non_flatten_shape;
std::string name = ss.str();
return name;
});
class WhileTest : public subgraph_test_util::ControlFlowOpTest {
protected:
TfLiteCustomAllocation NewCustomAlloc(size_t num_bytes,
int required_alignment) {
char* new_alloc = new char[num_bytes + required_alignment];
char* new_underlying_buffer_aligned_ptr = reinterpret_cast<char*>(
AlignTo(required_alignment, reinterpret_cast<intptr_t>(new_alloc)));
custom_alloc_buffers_.emplace_back(new_alloc);
return TfLiteCustomAllocation(
{new_underlying_buffer_aligned_ptr, num_bytes});
}
intptr_t AlignTo(size_t alignment, intptr_t offset) {
return offset % alignment == 0 ? offset
: offset + (alignment - offset % alignment);
}
std::vector<std::unique_ptr<char[]>> custom_alloc_buffers_;
};
TEST_F(WhileTest, TestTriangularNumberSequence) {
const int kSeqNumber = 4;
const int kExpectedValue = 15;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
ModelWriter writer(interpreter_.get());
const std::string test_file = CreateFilePath("test_while.tflite");
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[0], {1});
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[1], {1});
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[0]), {1});
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[1]), {1});
ASSERT_EQ(new_interpreter->Invoke(), kTfLiteOk);
output1 = new_interpreter->tensor(new_interpreter->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
output2 = new_interpreter->tensor(new_interpreter->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
}
TEST_F(WhileTest, TestModelWriterFromSubgraphs) {
const int kSeqNumber = 4;
const int kExpectedValue = 15;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildWhileSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kSeqNumber + 1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue});
ModelWriter writer_1(interpreter_.get());
const std::string test_file_1 = CreateFilePath("test_while_1.tflite");
writer_1.Write(test_file_1);
std::vector<Subgraph*> subgraphs;
for (int i = 0; i < interpreter_->subgraphs_size(); ++i) {
subgraphs.push_back(interpreter_->subgraph(i));
}
ModelWriter writer_2(subgraphs);
const std::string test_file_2 = CreateFilePath("test_while_2.tflite");
writer_2.Write(test_file_2);
std::ifstream file_ifs_1(test_file_1, std::ios::in);
std::ostringstream model_content_1;
model_content_1 << file_ifs_1.rdbuf();
std::ifstream file_ifs_2(test_file_2, std::ios::in);
std::ostringstream model_content_2;
model_content_2 << file_ifs_2.rdbuf();
EXPECT_FALSE(model_content_1.str().empty());
EXPECT_EQ(model_content_1.str(), model_content_2.str());
}
TEST_F(WhileTest, TestUpdateSubgraphIndices) {
const int kSeqNumber1 = 4;
const int kSeqNumber2 = 5;
const int kExpectedValue1 = 15;
const int kExpectedValue2 = 21;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(4);
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(1), kSeqNumber1);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildLessEqualCondSubgraph(interpreter_->subgraph(3), kSeqNumber2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(4));
Subgraph* primary_subgraph = &interpreter_->primary_subgraph();
const int kInput1 = 0;
const int kInput2 = 1;
const int kUnused1 = 2;
const int kUnused2 = 3;
const int kOutput1 = 4;
const int kOutput2 = 5;
const int kTensorCount = 6;
int first_new_tensor_index;
ASSERT_EQ(primary_subgraph->AddTensors(kTensorCount, &first_new_tensor_index),
kTfLiteOk);
ASSERT_EQ(first_new_tensor_index, 0);
ASSERT_EQ(primary_subgraph->SetInputs({kInput1, kInput2}), kTfLiteOk);
ASSERT_EQ(primary_subgraph->SetOutputs({kOutput1, kOutput2}), kTfLiteOk);
for (int i = 0; i < kTensorCount; ++i) {
ASSERT_EQ(primary_subgraph->SetTensorParametersReadWrite(
i, kTfLiteInt32, "", 0, nullptr, {}, false),
kTfLiteOk);
}
auto* while_reg = ops::builtin::Register_WHILE();
while_reg->builtin_code = kTfLiteBuiltinWhile;
TfLiteWhileParams* params1 =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params1->cond_subgraph_index = 1;
params1->body_subgraph_index = 2;
TfLiteWhileParams* params2 =
reinterpret_cast<TfLiteWhileParams*>(malloc(sizeof(TfLiteWhileParams)));
params2->cond_subgraph_index = 3;
params2->body_subgraph_index = 4;
int while1_index, while2_index;
primary_subgraph->AddNodeWithParameters({kInput1, kInput2},
{kUnused1, kOutput1}, {}, nullptr, 0,
params1, while_reg, &while1_index);
primary_subgraph->AddNodeWithParameters({kInput1, kInput2},
{kUnused2, kOutput2}, {}, nullptr, 0,
params2, while_reg, &while2_index);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[0]), {1});
auto alloc =
NewCustomAlloc(interpreter_->tensor(interpreter_->inputs()[1])->bytes,
kDefaultTensorAlignment);
auto* input_data = reinterpret_cast<int*>(alloc.data);
input_data[0] = 1;
interpreter_->SetCustomAllocationForTensor(interpreter_->inputs()[1], alloc);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {kExpectedValue1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {kExpectedValue2});
ModelWriter writer({interpreter_->subgraph(0), interpreter_->subgraph(3),
interpreter_->subgraph(4)});
writer.SetCustomInputOutput(0, {kInput1, kInput2},
{kOutput2}, {while2_index});
const std::string test_file = CreateFilePath("test_while.tflite");
writer.Write(test_file);
std::unique_ptr<FlatBufferModel> model =
FlatBufferModel::BuildFromFile(test_file.c_str());
tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates resolver;
InterpreterBuilder builder(*model, resolver);
std::unique_ptr<Interpreter> new_interpreter;
builder(&new_interpreter);
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[0], {1});
new_interpreter->ResizeInputTensor(new_interpreter->inputs()[1], {1});
ASSERT_EQ(new_interpreter->AllocateTensors(), kTfLiteOk);
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[0]), {1});
FillIntTensor(new_interpreter->tensor(new_interpreter->inputs()[1]), {1});
ASSERT_EQ(new_interpreter->Invoke(), kTfLiteOk);
ASSERT_EQ(new_interpreter->outputs().size(), 1);
TfLiteTensor* output = new_interpreter->tensor(new_interpreter->outputs()[0]);
CheckIntTensor(output, {1}, {kExpectedValue2});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/serialization/writer_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90a28717-bee7-48d5-95db-a5733d865788 | cpp | google/arolla | peephole_optimizer | arolla/expr/optimization/peephole_optimizer.cc | arolla/expr/optimization/peephole_optimizer_test.cc | #include "arolla/expr/optimization/peephole_optimizer.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
struct MatchingCandidate {
const ExprNodePtr& candidate;
const ExprNodePtr& pattern;
};
using MatchersMap =
absl::flat_hash_map<std::string, PeepholeOptimization::NodeMatcher>;
bool PlaceholderMatches(absl::string_view key,
const MatchersMap& placeholder_matchers,
const ExprNodePtr& candidate) {
if (auto matcher_it = placeholder_matchers.find(key);
matcher_it != placeholder_matchers.end()) {
const auto& matcher = matcher_it->second;
return matcher(candidate);
}
return true;
}
absl::StatusOr<ExprNodePtr> DecayReferencesToRegisteredOperator(
const PostOrder& node_visitor_order,
const absl::flat_hash_map<std::string, ExprNodePtr>& subs) {
return TransformOnPostOrder(
node_visitor_order, [&](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
if (node->is_op() &&
typeid(*node->op()) == typeid(ReferenceToRegisteredOperator)) {
return BindOp(node->op()->display_name(), node->node_deps(), {});
}
if (node->is_placeholder()) {
if (subs.contains(node->placeholder_key())) {
return subs.at(node->placeholder_key());
} else {
return absl::InvalidArgumentError(absl::StrFormat(
"No value was provided for P.%s.", node->placeholder_key()));
}
}
return node;
});
}
struct PatternOptimizationData {
ExprNodePtr from;
PostOrder to_visitor_order;
MatchersMap placeholder_matchers;
PeepholeOptimization::PatternKey key;
};
class PatternOptimization : public PeepholeOptimization {
public:
explicit PatternOptimization(PatternOptimizationData data)
: data_(std::move(data)) {}
std::optional<PeepholeOptimization::PatternKey> GetKey() const final {
return data_.key;
}
absl::StatusOr<ExprNodePtr> ApplyToRoot(
const ExprNodePtr& root) const override {
absl::flat_hash_map<Fingerprint, Fingerprint> opt2root;
std::queue<MatchingCandidate> queue;
queue.push({.candidate = root, .pattern = data_.from});
auto add_to_queue = [&](MatchingCandidate candidate) -> bool {
if (auto [it, success] =
opt2root.emplace(candidate.pattern->fingerprint(),
candidate.candidate->fingerprint());
!success) {
return it->second == candidate.candidate->fingerprint();
}
queue.push(std::move(candidate));
return true;
};
absl::flat_hash_map<std::string, ExprNodePtr> placeholder_subs;
while (!queue.empty()) {
MatchingCandidate candidate = queue.front();
queue.pop();
if (candidate.pattern->is_literal()) {
if (!candidate.candidate->is_literal() ||
(candidate.pattern->fingerprint() !=
candidate.candidate->fingerprint())) {
return root;
}
continue;
}
if (candidate.pattern->is_leaf()) {
LOG(FATAL) << "Internal error: leaves are not expected.";
return root;
}
if (candidate.pattern->is_placeholder()) {
absl::string_view key = candidate.pattern->placeholder_key();
if (!PlaceholderMatches(key, data_.placeholder_matchers,
candidate.candidate)) {
return root;
}
auto [it, success] = placeholder_subs.emplace(key, candidate.candidate);
DCHECK(success)
<< "Internal error: each node of the pattern with the same "
"fingerprint must be added to the queue only once.";
continue;
}
DCHECK(candidate.pattern->is_op())
<< "Internal error: unexpected node type: "
<< ToDebugString(candidate.pattern);
if (!candidate.candidate->is_op()) {
return root;
}
if (candidate.pattern->op()->display_name() !=
candidate.candidate->op()->display_name()) {
return root;
}
ASSIGN_OR_RETURN(auto decayed_op,
DecayRegisteredOperator(candidate.candidate->op()));
if (!HasBackendExprOperatorTag(decayed_op) &&
!HasBuiltinExprOperatorTag(decayed_op)) {
return absl::InvalidArgumentError(absl::StrFormat(
"tried applying a peephole optimization to operator %s."
" which is neither backend nor builtin. Is "
"your peephole optimization correct?",
decayed_op->display_name()));
}
const auto& opt_deps = candidate.pattern->node_deps();
const auto& root_deps = candidate.candidate->node_deps();
if (opt_deps.size() != root_deps.size()) {
return root;
}
for (int64_t dep_id = 0; dep_id != root_deps.size(); ++dep_id) {
if (!add_to_queue({.candidate = root_deps[dep_id],
.pattern = opt_deps[dep_id]})) {
return root;
}
}
}
return DecayReferencesToRegisteredOperator(data_.to_visitor_order,
placeholder_subs);
}
private:
PatternOptimizationData data_;
};
class TransformOptimization : public PeepholeOptimization {
public:
explicit TransformOptimization(
std::function<absl::StatusOr<ExprNodePtr>(ExprNodePtr)> transform_fn)
: transform_fn_(std::move(transform_fn)) {}
absl::StatusOr<ExprNodePtr> ApplyToRoot(const ExprNodePtr& root) const final {
return transform_fn_(root);
}
private:
std::function<absl::StatusOr<ExprNodePtr>(ExprNodePtr)> transform_fn_;
};
}
ReferenceToRegisteredOperator::ReferenceToRegisteredOperator(
absl::string_view name)
: ExprOperator(
name, FingerprintHasher("arolla::expr::ReferenceToRegisteredOperator")
.Combine(name)
.Finish()) {}
absl::StatusOr<ExprOperatorSignature>
ReferenceToRegisteredOperator::GetSignature() const {
return ExprOperatorSignature::MakeVariadicArgs();
}
absl::StatusOr<ExprAttributes> ReferenceToRegisteredOperator::InferAttributes(
absl::Span<const ExprAttributes> ) const {
return ExprAttributes{};
}
absl::StatusOr<ExprNodePtr> CallOpReference(
absl::string_view op_name,
std::initializer_list<absl::StatusOr<ExprNodePtr>> status_or_args) {
return CallOp(std::make_shared<ReferenceToRegisteredOperator>(op_name),
status_or_args);
}
PeepholeOptimization::PatternKey::PatternKey(const ExprNodePtr& expr) {
if (expr->is_op()) {
tpe_ = Type::kOperator;
fingerprint_ =
FingerprintHasher("").Combine(expr->op()->display_name()).Finish();
} else if (expr->is_literal()) {
tpe_ = Type::kLiteral;
fingerprint_ = expr->qvalue()->GetFingerprint();
} else {
tpe_ = Type::kOther;
fingerprint_ = expr->fingerprint();
}
}
bool PeepholeOptimization::PatternKey::operator==(
const PatternKey& other) const {
return tpe_ == other.tpe_ && fingerprint_ == other.fingerprint_;
}
bool PeepholeOptimization::PatternKey::operator!=(
const PatternKey& other) const {
return !(*this == other);
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>>
PeepholeOptimization::CreatePatternOptimization(
ExprNodePtr from, ExprNodePtr to,
absl::flat_hash_map<std::string, NodeMatcher> placeholder_matchers) {
if (from->is_placeholder()) {
return absl::FailedPreconditionError(
absl::StrFormat("from EXPRession is placeholder, which would match "
"everything: %s -> %s",
ToDebugString(from), ToDebugString(to)));
}
if (!GetLeafKeys(from).empty() || !GetLeafKeys(to).empty()) {
return absl::FailedPreconditionError(
absl::StrFormat("leaves are not allowed in optimizations: %s -> %s",
ToDebugString(from), ToDebugString(to)));
}
absl::flat_hash_set<std::string> from_keys_set;
for (const auto& key : GetPlaceholderKeys(from)) {
from_keys_set.insert(key);
}
std::vector<std::string> unknown_to_keys;
for (const auto& key : GetPlaceholderKeys(to)) {
if (!from_keys_set.contains(key)) {
unknown_to_keys.push_back(key);
}
}
if (!unknown_to_keys.empty()) {
return absl::FailedPreconditionError(
absl::StrFormat("unknown placeholder keys in to expression: %s, %s->%s",
absl::StrJoin(unknown_to_keys, ","),
ToDebugString(from), ToDebugString(to)));
}
std::vector<std::string> unknown_matcher_keys;
for (const auto& [key, _] : placeholder_matchers) {
if (!from_keys_set.contains(key)) {
unknown_matcher_keys.push_back(key);
}
}
if (!unknown_matcher_keys.empty()) {
return absl::FailedPreconditionError(
absl::StrFormat("unknown placeholder matcher keys: %s, %s->%s",
absl::StrJoin(unknown_matcher_keys, ","),
ToDebugString(from), ToDebugString(to)));
}
PatternKey key(from);
return std::make_unique<PatternOptimization>(PatternOptimizationData{
std::move(from), PostOrder(to), std::move(placeholder_matchers), key});
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>>
PeepholeOptimization::CreateTransformOptimization(
std::function<absl::StatusOr<ExprNodePtr>(ExprNodePtr)> transform_fn) {
return std::make_unique<TransformOptimization>(std::move(transform_fn));
}
struct PeepholeOptimizer::Data {
absl::flat_hash_map<PeepholeOptimization::PatternKey,
std::vector<std::unique_ptr<PeepholeOptimization>>>
pattern_optimizations;
std::vector<std::unique_ptr<PeepholeOptimization>> transform_optimizations;
};
absl::StatusOr<ExprNodePtr> PeepholeOptimizer::ApplyToNode(
ExprNodePtr node) const {
const auto& pattern_optimizations = data_->pattern_optimizations;
PeepholeOptimization::PatternKey key(node);
if (auto it = pattern_optimizations.find(key);
it != pattern_optimizations.end()) {
for (const auto& optimization : it->second) {
ASSIGN_OR_RETURN(node, optimization->ApplyToRoot(node));
}
}
for (const auto& optimization : data_->transform_optimizations) {
ASSIGN_OR_RETURN(node, optimization->ApplyToRoot(node));
}
return node;
}
absl::StatusOr<ExprNodePtr> PeepholeOptimizer::Apply(ExprNodePtr root) const {
return Transform(root,
[this](ExprNodePtr node) -> absl::StatusOr<ExprNodePtr> {
return ApplyToNode(node);
});
}
PeepholeOptimizer::~PeepholeOptimizer() = default;
PeepholeOptimizer::PeepholeOptimizer(std::unique_ptr<Data> data)
: data_(std::move(data)) {}
absl::StatusOr<std::unique_ptr<PeepholeOptimizer>> PeepholeOptimizer::Create(
std::vector<std::unique_ptr<PeepholeOptimization>> optimizations) {
auto data = std::make_unique<Data>();
for (auto& opt : optimizations) {
std::optional<PeepholeOptimization::PatternKey> key = opt->GetKey();
if (key.has_value()) {
auto& opt_list = data->pattern_optimizations[*key];
opt_list.push_back(std::move(opt));
} else {
data->transform_optimizations.push_back(std::move(opt));
}
}
return absl::WrapUnique(new PeepholeOptimizer(std::move(data)));
}
absl::StatusOr<std::unique_ptr<PeepholeOptimizer>> CreatePeepholeOptimizer(
absl::Span<const PeepholeOptimizationPackFactory>
optimization_pack_factories) {
PeepholeOptimizationPack optimizations;
for (const auto& factory : optimization_pack_factories) {
ASSIGN_OR_RETURN(PeepholeOptimizationPack pack, factory());
optimizations.reserve(optimizations.size() + pack.size());
std::move(pack.begin(), pack.end(), std::back_inserter(optimizations));
}
return PeepholeOptimizer::Create(std::move(optimizations));
}
} | #include "arolla/expr/optimization/peephole_optimizer.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/hash/hash_testing.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/memory/optional_value.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsExpr;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::Ne;
TEST(Optimization, Errors) {
ExprNodePtr leaf = Leaf("x");
ExprNodePtr px = Placeholder("x");
ASSERT_OK_AND_ASSIGN(ExprNodePtr opx, CallOp("math.add", {px, px}));
ExprNodePtr py = Placeholder("y");
ASSERT_OK_AND_ASSIGN(ExprNodePtr opy, CallOp("math.add", {py, py}));
EXPECT_THAT(PeepholeOptimization::CreatePatternOptimization(opx, leaf),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("leaves are not allowed")));
EXPECT_THAT(PeepholeOptimization::CreatePatternOptimization(leaf, opx),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("leaves are not allowed")));
EXPECT_THAT(PeepholeOptimization::CreatePatternOptimization(opy, opx),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("unknown placeholder keys")));
EXPECT_THAT(PeepholeOptimization::CreatePatternOptimization(px, opx),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("match everything")));
EXPECT_THAT(PeepholeOptimization::CreatePatternOptimization(
opx, opx, {{"y", [](auto) { return true; }}}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("unknown placeholder matcher keys")));
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>> Plus2MinusOptimization() {
ASSIGN_OR_RETURN(
ExprNodePtr apb,
CallOpReference("math.add", {Placeholder("a"), Placeholder("b")}));
ASSIGN_OR_RETURN(
ExprNodePtr amb,
CallOpReference("math.subtract", {Placeholder("a"), Placeholder("b")}));
return PeepholeOptimization::CreatePatternOptimization(apb, amb);
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>> Pair2FirstOptimization() {
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("core.make_tuple", {Placeholder("a"), Placeholder("b")}));
ExprNodePtr to = Placeholder("a");
return PeepholeOptimization::CreatePatternOptimization(from, to);
}
TEST(Optimization, NoOptimizations) {
ASSERT_OK_AND_ASSIGN(auto optimization, Plus2MinusOptimization());
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.multiply", {Leaf("a"), Leaf("b")}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.subtract", {Leaf("a"), Leaf("b")}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ExprNodePtr expr = Placeholder("x");
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ExprNodePtr expr = Placeholder("x");
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ExprNodePtr expr = Literal(1.);
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
ASSERT_OK_AND_ASSIGN(auto pair_optimization, Pair2FirstOptimization());
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("core.make_tuple", {Leaf("x")}));
EXPECT_THAT(pair_optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
}
TEST(Optimization, Key) {
ASSERT_OK_AND_ASSIGN(auto optimization, Plus2MinusOptimization());
ASSERT_OK_AND_ASSIGN(ExprNodePtr plus,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr minus,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
ExprNodePtr leaf = Leaf("x");
ExprNodePtr leaf2 = Leaf("y");
ExprNodePtr placeholder = Placeholder("x");
ExprNodePtr placeholder2 = Placeholder("y");
ExprNodePtr literal = Literal(1.0);
ExprNodePtr literal2 = Literal(1.0f);
EXPECT_THAT(optimization->GetKey(),
Eq(PeepholeOptimization::PatternKey(plus)));
EXPECT_THAT(optimization->GetKey(),
Ne(PeepholeOptimization::PatternKey(minus)));
EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
PeepholeOptimization::PatternKey(plus),
PeepholeOptimization::PatternKey(minus),
PeepholeOptimization::PatternKey(leaf),
PeepholeOptimization::PatternKey(leaf2),
PeepholeOptimization::PatternKey(literal),
PeepholeOptimization::PatternKey(literal2),
PeepholeOptimization::PatternKey(placeholder),
PeepholeOptimization::PatternKey(placeholder2),
}));
}
TEST(Optimization, SimpleOptimizations) {
ASSERT_OK_AND_ASSIGN(auto optimization, Plus2MinusOptimization());
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.add", {Leaf("x"), Leaf("x")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("x")}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr subexpr,
CallOp("math.multiply", {Leaf("a"), Leaf("b")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.add", {Placeholder("x"), subexpr}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.subtract", {Placeholder("x"), subexpr}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.add", {Literal(1.f), Literal(2.f)}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.subtract", {Literal(1.f), Literal(2.f)}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
TEST(Optimization, BackendWrapperOperatorOptimizations) {
ASSERT_OK_AND_ASSIGN(auto optimization, Plus2MinusOptimization());
{
ASSERT_OK_AND_ASSIGN(
auto add_backend,
DecayRegisteredOperator(LookupOperator("math.add").value()));
ASSERT_TRUE(HasBackendExprOperatorTag(add_backend));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp(add_backend, {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.subtract", {Leaf("x"), Leaf("y")}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
constexpr auto kIsLiteral = [](const ExprNodePtr& expr) {
return expr->is_literal();
};
absl::StatusOr<std::unique_ptr<PeepholeOptimization>> HasLiteralOptimization() {
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference("core.has._array",
{CallOpReference("core.presence_or",
{Placeholder("a"), Placeholder("b")})}));
ASSIGN_OR_RETURN(
ExprNodePtr to,
CallOpReference("core.presence_or",
{CallOpReference("core.has", {Placeholder("a")}),
CallOpReference("core.has", {Placeholder("b")})}));
return PeepholeOptimization::CreatePatternOptimization(from, to,
{{"b", kIsLiteral}});
}
TEST(Optimization, RestrictedOptimizations) {
ASSERT_OK_AND_ASSIGN(auto optimization, HasLiteralOptimization());
OptionalValue<float> opt1 = 1.0f;
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("core.has._array",
{CallOp("core.presence_or", {Leaf("x"), Leaf("y")})}));
ASSERT_OK_AND_ASSIGN(expr, ToLowest(expr));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("core.has._array",
{CallOp("core.presence_or", {Leaf("x"), Literal(opt1)})}));
ASSERT_OK_AND_ASSIGN(expr, ToLowest(expr));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expected_expr,
CallOp("core.presence_or", {CallOp("core.has", {Leaf("x")}),
CallOp("core.has", {Literal(opt1)})}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>>
SquareA2AxAOptimization() {
ASSIGN_OR_RETURN(
ExprNodePtr square_a,
CallOpReference("math.pow", {Placeholder("a"), Literal(2.f)}));
ASSIGN_OR_RETURN(
ExprNodePtr axa,
CallOpReference("math.multiply", {Placeholder("a"), Placeholder("a")}));
return PeepholeOptimization::CreatePatternOptimization(square_a, axa);
}
TEST(Optimization, LiteralOptimizations) {
ASSERT_OK_AND_ASSIGN(auto optimization, SquareA2AxAOptimization());
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.pow", {Leaf("x"), Literal(2.f)}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.multiply", {Leaf("x"), Leaf("x")}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.pow", {Leaf("x"), Literal(3.f)}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
CallOp("math.pow", {Leaf("x"), Literal(2.)}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>> ApBxAmBOptimization() {
ASSIGN_OR_RETURN(
ExprNodePtr from,
CallOpReference(
"math.multiply",
{CallOpReference("math.add", {Placeholder("a"), Placeholder("b")}),
CallOpReference("math.subtract",
{Placeholder("a"), Placeholder("b")})}));
ASSIGN_OR_RETURN(
ExprNodePtr to,
CallOpReference("math.subtract",
{CallOpReference("math.multiply",
{Placeholder("a"), Placeholder("a")}),
CallOpReference("math.multiply",
{Placeholder("b"), Placeholder("b")})}));
return PeepholeOptimization::CreatePatternOptimization(from, to);
}
TEST(Optimization, SamePartsInOptimization) {
ASSERT_OK_AND_ASSIGN(auto optimization, ApBxAmBOptimization());
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("math.multiply",
{CallOp("math.add", {Leaf("x"), Leaf("y")}),
CallOp("math.subtract", {Leaf("x"), Leaf("y")})}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expected_expr,
CallOp("math.subtract",
{CallOp("math.multiply", {Leaf("x"), Leaf("x")}),
CallOp("math.multiply", {Leaf("y"), Leaf("y")})}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("math.multiply",
{CallOp("math.add", {Leaf("x"), Leaf("y")}),
CallOp("math.subtract", {Leaf("x"), Leaf("c")})}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expr,
CallOp("math.multiply",
{CallOp("math.add", {Leaf("x"), Leaf("y")}),
CallOp("math.subtract", {Leaf("x"), Leaf("x")})}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>> ApBPowerNOptimization(
int64_t n) {
ASSIGN_OR_RETURN(
ExprNodePtr apb,
CallOpReference("math.add", {Placeholder("a"), Placeholder("b")}));
ExprNodePtr from = apb;
for (int64_t i = 1; i != n; ++i) {
ASSIGN_OR_RETURN(from, CallOpReference("math.multiply", {from, apb}));
}
ASSIGN_OR_RETURN(ExprNodePtr to,
CallOpReference("math.pow", {apb, Literal<int64_t>(n)}));
return PeepholeOptimization::CreatePatternOptimization(from, to);
}
TEST(Optimization, ManySimilarNodes) {
constexpr int64_t n = 25;
ASSERT_OK_AND_ASSIGN(auto optimization, ApBPowerNOptimization(n));
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr xpy,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ExprNodePtr expr = xpy;
for (int64_t i = 1; i != n; ++i) {
ASSERT_OK_AND_ASSIGN(expr, CallOp("math.multiply", {expr, xpy}));
}
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
CallOp("math.pow", {xpy, Literal<int64_t>(n)}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
{
ASSERT_OK_AND_ASSIGN(ExprNodePtr xpy,
CallOp("math.add", {Leaf("x"), Leaf("y")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr ypx,
CallOp("math.add", {Leaf("y"), Leaf("x")}));
ExprNodePtr expr = xpy;
for (int64_t i = 1; i != n - 2; ++i) {
ASSERT_OK_AND_ASSIGN(expr, CallOp("math.multiply", {expr, xpy}));
}
ASSERT_OK_AND_ASSIGN(expr, CallOp("math.multiply", {expr, ypx}));
ASSERT_OK_AND_ASSIGN(expr, CallOp("math.multiply", {expr, xpy}));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expr)));
}
}
absl::StatusOr<ExprNodePtr> BigRandomExpr(int64_t placeholder_count,
int64_t op_count) {
std::vector<ExprNodePtr> exprs;
for (int64_t i = 0; i != placeholder_count; ++i) {
exprs.push_back(Placeholder(std::to_string(i)));
}
absl::BitGen gen;
auto binary_op = [&]() -> std::string {
std::vector<std::string> names = {"math.add", "math.multiply", "math.pow"};
return names[absl::Uniform(gen, 0u, names.size())];
};
for (int64_t i = 0; i != op_count; ++i) {
auto x = exprs[absl::Uniform(gen, 0u, exprs.size())];
auto y = exprs[absl::Uniform(gen, 0u, exprs.size())];
ASSIGN_OR_RETURN(ExprNodePtr op, CallOp(binary_op(), {x, y}));
}
auto unary_op = [&]() -> std::string {
std::vector<std::string> names = {"math.neg", "math.log", "math.log1p"};
return names[absl::Uniform(gen, 0u, names.size())];
};
ExprNodePtr res = exprs.back();
for (const ExprNodePtr& expr : exprs) {
ASSIGN_OR_RETURN(res, CallOp(binary_op(), {CallOp(unary_op(), {res}),
CallOp(unary_op(), {expr})}));
}
return res;
}
TEST(Optimization, StressTest) {
for (int64_t placeholder_count = 1; placeholder_count <= 64;
placeholder_count *= 4) {
for (int64_t op_count = 1; op_count <= 256; op_count *= 4) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr from,
BigRandomExpr(placeholder_count, op_count));
ASSERT_OK_AND_ASSIGN(ExprNodePtr to,
BigRandomExpr(placeholder_count, op_count));
ASSERT_OK_AND_ASSIGN(
auto optimization,
PeepholeOptimization::CreatePatternOptimization(from, to));
absl::flat_hash_map<std::string, ExprNodePtr> subs;
for (int i = 0; i != placeholder_count; ++i) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr sub, BigRandomExpr(i + 1, i * 2 + 1));
subs.emplace(std::to_string(i), sub);
}
ASSERT_OK_AND_ASSIGN(ExprNodePtr expr,
SubstitutePlaceholders(from, subs));
ASSERT_OK_AND_ASSIGN(ExprNodePtr expected_expr,
SubstitutePlaceholders(to, subs));
EXPECT_THAT(optimization->ApplyToRoot(expr),
IsOkAndHolds(EqualsExpr(expected_expr)));
}
}
}
TEST(Optimization, TwoOptimizations) {
std::vector<std::unique_ptr<PeepholeOptimization>> optimizations;
ASSERT_OK_AND_ASSIGN(auto a2_opt, SquareA2AxAOptimization());
optimizations.push_back(std::move(a2_opt));
ASSERT_OK_AND_ASSIGN(auto a3_opt, ApBPowerNOptimization(3));
optimizations.push_back(std::move(a3_opt));
ASSERT_OK_AND_ASSIGN(ExprNodePtr square,
CallOp("math.pow", {Leaf("x"), Literal(2.f)}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr square2,
CallOp("math.add", {square, square}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr cubic_square2,
CallOp("math.multiply",
{CallOp("math.multiply", {square2, square2}), square2}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr x2,
CallOp("math.multiply", {Leaf("x"), Leaf("x")}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr expected_cubic_square2_optimized,
CallOp("math.pow", {CallOp("math.add", {x2, x2}), Literal(int64_t{3})}));
ASSERT_OK_AND_ASSIGN(auto optimizer,
PeepholeOptimizer::Create(std::move(optimizations)));
EXPECT_THAT(optimizer->Apply(cubic_square2),
IsOkAndHolds(EqualsExpr(expected_cubic_square2_optimized)));
EXPECT_THAT(optimizer->Apply(expected_cubic_square2_optimized),
IsOkAndHolds(EqualsExpr(expected_cubic_square2_optimized)));
}
absl::StatusOr<std::unique_ptr<PeepholeOptimization>>
RemoveArithmeticOptimization() {
return PeepholeOptimization::CreateTransformOptimization(
[](ExprNodePtr expr) {
if (!expr->is_op()) {
return expr;
}
if (expr->op()->display_name() == "math.add" ||
expr->op()->display_name() == "math.multiply") {
return expr->node_deps().empty() ? expr : expr->node_deps()[0];
}
return expr;
});
}
TEST(Optimization, TransformOptimization) {
std::vector<std::unique_ptr<PeepholeOptimization>> optimizations;
ASSERT_OK_AND_ASSIGN(auto opt, RemoveArithmeticOptimization());
optimizations.push_back(std::move(opt));
ASSERT_OK_AND_ASSIGN(auto optimizer,
PeepholeOptimizer::Create(std::move(optimizations)));
ExprNodePtr z = Leaf("z");
ASSERT_OK_AND_ASSIGN(ExprNodePtr zx1,
CallOp("math.multiply", {z, Literal(1.f)}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr zx1p0,
CallOp("math.add", {zx1, Literal(0.f)}));
EXPECT_THAT(optimizer->Apply(zx1), IsOkAndHolds(EqualsExpr(z)));
EXPECT_THAT(optimizer->Apply(zx1p0), IsOkAndHolds(EqualsExpr(z)));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizer.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/optimization/peephole_optimizer_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
8ded5b53-11c9-465a-a926-7211f30afb8b | cpp | tensorflow/tensorflow | mhlo_import | third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.cc | third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import_test.cc | #include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/DialectConversion.h"
#include "shardy/dialect/sdy/ir/constants.h"
#include "shardy/dialect/sdy/ir/dialect.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/hlo/translate/mhlo_to_hlo/attribute_exporter.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/service/spmd/shardy/mhlo_round_trip/shard_map_import.h"
#include "xla/service/spmd/shardy/round_trip_common/pipeline_passes.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace sdy {
namespace {
using ::llvm::SmallDenseMap;
using ::llvm::SmallDenseSet;
using ::mlir::ArrayRef;
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::OpBuilder;
using ::mlir::OperationPass;
using ::mlir::Pass;
using ::mlir::PassWrapper;
using ::mlir::ShapedType;
using ::mlir::SmallVector;
using ::mlir::StringAttr;
using ::mlir::StringRef;
using ::mlir::func::FuncOp;
using ::mlir::sdy::AxisRefAttr;
using ::mlir::sdy::DimensionShardingAttr;
using ::mlir::sdy::kShardingAttr;
using ::mlir::sdy::MeshAttr;
using ::mlir::sdy::MeshAxisAttr;
using ::mlir::sdy::MeshOp;
using ::mlir::sdy::SdyDialect;
using ::mlir::sdy::TensorShardingAttr;
using ::mlir::sdy::TensorShardingPerValueAttr;
struct SubDimInfo {
int64_t tileDimIndex;
int64_t tileSubDimIndex;
int64_t reshapeDimIndex;
int64_t size;
};
struct AnalyzeTileAssignmentResult {
SmallVector<SubDimInfo> subDims;
SmallVector<int64_t> localMesh;
};
}
xla::HloSharding parseShardingFromString(const StringAttr& sharding) {
const std::optional<xla::OpSharding> shardingProto =
xla::ConvertSharding(sharding.getValue());
CHECK(shardingProto) << sharding.getValue().str();
absl::StatusOr<HloSharding> hloSharding =
xla::HloSharding::FromProto(*shardingProto);
CHECK_OK(hloSharding) << shardingProto->DebugString();
return *hloSharding;
}
namespace {
SmallVector<int64_t> shortestCommonFactorization(ArrayRef<int64_t> array1,
ArrayRef<int64_t> array2) {
SmallVector<int64_t> result;
result.reserve(std::max(array1.size(), array2.size()));
auto nextIndexWithNonOneElement = [](ArrayRef<int64_t> array,
int64_t index) -> int64_t {
while (index < array.size() && array[index] == 1) {
index++;
}
return index;
};
int64_t index1 = nextIndexWithNonOneElement(array1, 0);
int64_t index2 = nextIndexWithNonOneElement(array2, 0);
int64_t nextStride1 = 1;
int64_t nextStride2 = 1;
int64_t accumulatedFactor = 1;
while (index1 < array1.size() || index2 < array2.size()) {
if (index1 < array1.size() && nextStride1 == accumulatedFactor) {
nextStride1 *= array1[index1++];
}
if (index2 < array2.size() && nextStride2 == accumulatedFactor) {
nextStride2 *= array2[index2++];
}
const auto [smallFactor, largeFactor] = std::minmax(
{nextStride1 / accumulatedFactor, nextStride2 / accumulatedFactor});
if (largeFactor % smallFactor != 0 || smallFactor == 1) {
return {};
}
result.push_back(smallFactor);
accumulatedFactor *= smallFactor;
CHECK_EQ(accumulatedFactor, Product(result));
index1 = nextIndexWithNonOneElement(array1, index1);
index2 = nextIndexWithNonOneElement(array2, index2);
}
return result;
}
SmallVector<SubDimInfo> getOrderedSubDimsFromIotaTileAssignment(
const xla::IotaTileAssignment& iota) {
SmallVector<int64_t> deviceShape(iota.transpose_perm().size());
for (auto [index, perm_i] : llvm::enumerate(iota.transpose_perm())) {
deviceShape[index] = iota.reshape_dims()[perm_i];
}
const SmallVector<int64_t> axisSizes = shortestCommonFactorization(
ArrayRef<int64_t>(iota.dims().begin(), iota.dims().end()), deviceShape);
if (axisSizes.empty()) {
return {};
}
SmallVector<SubDimInfo> subDims;
subDims.reserve(axisSizes.size());
int64_t tileDimIndex = iota.ndims() - 1;
int64_t transPermIndex = iota.transpose_perm().size() - 1;
int64_t accTileSize = 1;
int64_t accDeviceSize = 1;
int64_t subDim = 0;
for (const int64_t axisSize : llvm::reverse(axisSizes)) {
while (iota.dim(tileDimIndex) == 1) {
tileDimIndex--;
}
subDims.push_back(SubDimInfo{
tileDimIndex,
subDim++,
iota.transpose_perm()[transPermIndex],
axisSize,
});
accTileSize *= axisSize;
accDeviceSize *= axisSize;
if (iota.dim(tileDimIndex) == accTileSize) {
tileDimIndex--;
accTileSize = 1;
subDim = 0;
}
if (deviceShape[transPermIndex] == accDeviceSize) {
accDeviceSize = 1;
transPermIndex--;
}
}
absl::c_sort(subDims, [](const SubDimInfo& a, const SubDimInfo& b) {
return std::forward_as_tuple(a.reshapeDimIndex, a.tileDimIndex) <
std::forward_as_tuple(b.reshapeDimIndex, b.tileDimIndex);
});
return subDims;
}
AnalyzeTileAssignmentResult analyzeTileAssignment(
const xla::TileAssignment& tileAssignment) {
const std::optional<IotaTileAssignment>& iota = tileAssignment.iota();
CHECK(iota.has_value()) << "tile assignment: " << tileAssignment.ToString();
const SmallVector<SubDimInfo> subDims =
getOrderedSubDimsFromIotaTileAssignment(*iota);
CHECK(!subDims.empty()) << "tile assignment: " << tileAssignment.ToString();
SmallVector<int64_t> mesh;
mesh.reserve(subDims.size());
for (SubDimInfo subDimInfo : subDims) {
mesh.push_back(subDimInfo.size);
}
return AnalyzeTileAssignmentResult{
std::move(subDims),
std::move(mesh),
};
}
absl::flat_hash_set<xla::HloSharding> collectXlaHloShardings(
ModuleOp moduleOp) {
absl::flat_hash_set<xla::HloSharding> oldShardings;
for (FuncOp funcOp : moduleOp.getOps<FuncOp>()) {
for (int64_t argNum = 0; argNum < funcOp.getNumArguments(); ++argNum) {
if (auto oldSharding =
funcOp.getArgAttrOfType<StringAttr>(argNum, kXlaShardingAttr)) {
oldShardings.insert(parseShardingFromString(oldSharding));
}
}
for (int64_t resNum = 0; resNum < funcOp.getNumResults(); ++resNum) {
if (auto oldSharding = funcOp.getResultAttrOfType<StringAttr>(
resNum, kXlaShardingAttr)) {
oldShardings.insert(parseShardingFromString(oldSharding));
}
}
funcOp.front().walk([&](mlir::Operation* op) {
if (auto oldSharding = op->getAttrOfType<StringAttr>(kXlaShardingAttr)) {
const xla::HloSharding hloSharding =
parseShardingFromString(oldSharding);
if (hloSharding.IsTuple()) {
for (const xla::HloSharding& element : hloSharding.tuple_elements()) {
oldShardings.insert(element);
}
} else {
oldShardings.insert(hloSharding);
}
}
});
}
return oldShardings;
}
struct MeshAxesAndIds {
SmallVector<MeshAxisAttr> namedAxes;
SmallVector<int64_t> maximalDeviceIds;
};
MeshAxesAndIds findMeshAxesAndIds(ModuleOp moduleOp) {
MeshAxesAndIds result;
auto& [namedAxes, maximalDeviceIds] = result;
const absl::flat_hash_set<xla::HloSharding> oldShardings =
collectXlaHloShardings(moduleOp);
SmallVector<int64_t> axes;
llvm::SmallDenseSet<int64_t> maximalDeviceIdSet;
for (const xla::HloSharding& hloSharding : oldShardings) {
if (hloSharding.HasUniqueDevice()) {
maximalDeviceIdSet.insert(hloSharding.GetUniqueDevice());
continue;
}
CHECK(!hloSharding.IsTuple());
if (hloSharding.IsReplicated() || hloSharding.IsManual() ||
hloSharding.IsUnknown()) {
continue;
}
CHECK(hloSharding.IsTiled());
const AnalyzeTileAssignmentResult result =
analyzeTileAssignment(hloSharding.tile_assignment());
axes = (axes.empty()) ? result.localMesh
: shortestCommonFactorization(result.localMesh, axes);
CHECK(!axes.empty());
}
namedAxes.reserve(axes.size());
for (auto [axisIndex, axisSize] : llvm::enumerate(axes)) {
auto name = StringAttr::get(moduleOp->getContext(),
absl::StrCat("axis_", axisIndex));
namedAxes.push_back(
MeshAxisAttr::get(moduleOp->getContext(), name, axisSize));
}
maximalDeviceIds = llvm::to_vector(maximalDeviceIdSet);
llvm::sort(maximalDeviceIds);
return result;
}
}
TensorShardingAttr convertToSdySharding(
const xla::HloSharding& hloSharding, MeshAttr globalMesh,
const SmallDenseMap<int64_t, StringRef>& deviceIdToMaximalMeshName,
int64_t rank, bool openDims) {
mlir::MLIRContext* ctx = globalMesh.getContext();
if (hloSharding.HasUniqueDevice()) {
return TensorShardingAttr::getFullyClosed(
ctx, rank,
deviceIdToMaximalMeshName.lookup(hloSharding.GetUniqueDevice()));
}
CHECK(!hloSharding.IsTuple());
if (hloSharding.IsReplicated() || hloSharding.IsManual() ||
hloSharding.IsUnknown()) {
return hloSharding.IsUnknown() || openDims
? TensorShardingAttr::getFullyOpen(ctx, rank, kGlobalMeshName)
: TensorShardingAttr::getFullyClosed(ctx, rank, kGlobalMeshName);
}
CHECK(hloSharding.IsTiled());
const AnalyzeTileAssignmentResult result =
analyzeTileAssignment(hloSharding.tile_assignment());
SmallVector<SmallVector<AxisRefAttr>> localAxisIndexToGlobalAxes;
localAxisIndexToGlobalAxes.reserve(result.localMesh.size());
int64_t globalAxisIndex = 0;
for (int64_t localAxisSize : result.localMesh) {
SmallVector<AxisRefAttr>& globalAxes =
localAxisIndexToGlobalAxes.emplace_back();
int64_t product = 1;
while (product < localAxisSize) {
MeshAxisAttr axisAttr = globalMesh.getAxes()[globalAxisIndex++];
if (axisAttr.getSize() == 1) {
continue;
}
globalAxes.push_back(AxisRefAttr::get(ctx, axisAttr.getName()));
product *= axisAttr.getSize();
}
CHECK_EQ(product, localAxisSize);
}
SmallVector<SmallVector<int64_t>> dimToSubDimToLocalAxisIndex(rank);
for (auto [localAxisIndex, subDimInfo] : llvm::enumerate(result.subDims)) {
if (subDimInfo.tileDimIndex >= rank) {
continue;
}
SmallVector<int64_t>& subDimToLocalAxisIndex =
dimToSubDimToLocalAxisIndex[subDimInfo.tileDimIndex];
if (subDimInfo.tileSubDimIndex >= subDimToLocalAxisIndex.size()) {
subDimToLocalAxisIndex.resize(subDimInfo.tileSubDimIndex + 1);
}
subDimToLocalAxisIndex[subDimInfo.tileSubDimIndex] = localAxisIndex;
}
SmallVector<DimensionShardingAttr> dimShardings;
dimShardings.reserve(rank);
for (ArrayRef<int64_t> subDimToLocalAxisIndex : dimToSubDimToLocalAxisIndex) {
SmallVector<AxisRefAttr> axes;
for (int64_t localAxisIndex : llvm::reverse(subDimToLocalAxisIndex)) {
absl::c_copy(localAxisIndexToGlobalAxes[localAxisIndex],
std::back_inserter(axes));
}
dimShardings.push_back(
DimensionShardingAttr::get(ctx, axes, !openDims));
}
return TensorShardingAttr::get(ctx, StringAttr::get(ctx, kGlobalMeshName),
dimShardings, {});
}
namespace {
bool shouldOpenDims(ArrayRef<bool> allowPropagationToTensors, int64_t index) {
if (allowPropagationToTensors.empty()) {
return false;
}
if (allowPropagationToTensors.size() == 1) {
return allowPropagationToTensors.front();
}
CHECK_LT(index, allowPropagationToTensors.size());
return allowPropagationToTensors[index];
}
LogicalResult importShardings(
FuncOp funcOp, MeshAttr globalMesh,
const SmallDenseMap<int64_t, StringRef>& deviceIdToMaximalMeshName,
ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults) {
for (auto [argNum, argType] : llvm::enumerate(funcOp.getArgumentTypes())) {
if (auto oldSharding =
funcOp.getArgAttrOfType<StringAttr>(argNum, kXlaShardingAttr)) {
funcOp.setArgAttr(
argNum, kShardingAttr,
convertToSdySharding(parseShardingFromString(oldSharding), globalMesh,
deviceIdToMaximalMeshName,
mlir::cast<ShapedType>(argType).getRank(),
shouldOpenDims(allowPropagationToArgs, argNum)));
funcOp.removeArgAttr(argNum, kXlaShardingAttr);
}
}
for (auto [resNum, resType] : llvm::enumerate(funcOp.getResultTypes())) {
if (auto oldSharding =
funcOp.getResultAttrOfType<StringAttr>(resNum, kXlaShardingAttr)) {
funcOp.setResultAttr(
resNum, kShardingAttr,
convertToSdySharding(
parseShardingFromString(oldSharding), globalMesh,
deviceIdToMaximalMeshName,
mlir::cast<ShapedType>(resType).getRank(),
shouldOpenDims(allowPropagationToResults, resNum)));
funcOp.removeResultAttr(
resNum, StringAttr::get(funcOp.getContext(), kXlaShardingAttr));
}
}
funcOp.front().walk([&](mlir::Operation* op) {
if (auto oldSharding = op->getAttrOfType<StringAttr>(kXlaShardingAttr)) {
const xla::HloSharding hloSharding = parseShardingFromString(oldSharding);
ArrayRef<xla::HloSharding> flatHloSharding = hloSharding;
if (hloSharding.IsTuple()) {
flatHloSharding = hloSharding.tuple_elements();
}
SmallVector<TensorShardingAttr> newShardings;
newShardings.reserve(op->getNumResults());
for (const auto& [resHloSharding, resType] :
llvm::zip_equal(flatHloSharding, op->getResultTypes())) {
newShardings.push_back(convertToSdySharding(
resHloSharding, globalMesh, deviceIdToMaximalMeshName,
mlir::cast<ShapedType>(resType).getRank(),
false));
}
op->setAttr(kShardingAttr, TensorShardingPerValueAttr::get(
globalMesh.getContext(), newShardings));
op->removeAttr(kXlaShardingAttr);
}
});
return mlir::success();
}
class ImportShardingsPass
: public PassWrapper<ImportShardingsPass, OperationPass<ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ImportShardingsPass)
ImportShardingsPass(ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults)
: allowPropagationToArgs(allowPropagationToArgs),
allowPropagationToResults(allowPropagationToResults) {}
void runOnOperation() final {
ModuleOp moduleOp = getOperation();
auto [namedAxes, deviceIdsForMaximalMesh] = findMeshAxesAndIds(moduleOp);
if (namedAxes.empty() && deviceIdsForMaximalMesh.empty()) {
return;
}
mlir::SymbolTableCollection symbolTableCollection;
mlir::SymbolTable& symbolTable =
symbolTableCollection.getSymbolTable(moduleOp);
OpBuilder opBuilder = mlir::OpBuilder::atBlockBegin(moduleOp.getBody());
symbolTable.insert(opBuilder.create<MeshOp>(
moduleOp.getLoc(), kGlobalMeshName,
MeshAttr::get(moduleOp.getContext(), namedAxes)));
SmallDenseMap<int64_t, StringRef> deviceIdToMaximalMeshName;
for (int64_t deviceId : deviceIdsForMaximalMesh) {
std::string meshName = absl::StrCat("maximal_mesh_", deviceId);
auto meshOp = opBuilder.create<MeshOp>(
moduleOp.getLoc(), meshName,
MeshAttr::get(moduleOp.getContext(), deviceId));
symbolTable.insert(meshOp);
deviceIdToMaximalMeshName[deviceId] = meshOp.getSymName();
}
for (FuncOp funcOp : moduleOp.getOps<FuncOp>()) {
bool isMain = funcOp.getSymName() == "main";
MeshAttr globalMesh = MeshAttr::get(moduleOp.getContext(), namedAxes);
if (mlir::failed(importShardings(
funcOp, globalMesh, deviceIdToMaximalMeshName,
isMain ? allowPropagationToArgs : ArrayRef<bool>(),
isMain ? allowPropagationToResults : ArrayRef<bool>()))) {
signalPassFailure();
}
}
}
StringRef getArgument() const override { return "xla-sdy-import-shardings"; }
StringRef getDescription() const override {
return "Builds the mesh and converts the shardings from kXlaShardingAttr "
"to kShardingAttr.";
}
void getDependentDialects(mlir::DialectRegistry& registry) const final {
registry.insert<SdyDialect>();
}
private:
ArrayRef<bool> allowPropagationToArgs;
ArrayRef<bool> allowPropagationToResults;
};
std::unique_ptr<mlir::Pass> createImportShardingsPass(
ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults) {
return std::make_unique<ImportShardingsPass>(allowPropagationToArgs,
allowPropagationToResults);
}
}
void registerMhloImportShardingsPass() {
mlir::registerPass(
std::bind(createImportShardingsPass, ArrayRef<bool>(), ArrayRef<bool>()));
}
void addMhloImportPipeline(mlir::OpPassManager& pm,
ArrayRef<bool> allowPropagationToArgs,
ArrayRef<bool> allowPropagationToResults) {
addCommonPreImportPasses(pm);
pm.addPass(createImportShardingsPass(allowPropagationToArgs,
allowPropagationToResults));
pm.addPass(createMhloRoundTripShardMapImportPass());
addCommonPostImportPasses(pm);
}
void registerMhloImportPipeline() {
mlir::PassPipelineRegistration<> importPipeline(
"xla-sdy-mhlo-import-pipeline",
"Run passes to import an mhlo module with `mhlo.shardings` into the SDY "
"(Shardy) dialect.",
std::bind(addMhloImportPipeline, std::placeholders::_1, ArrayRef<bool>(),
ArrayRef<bool>()));
}
}
} | #include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "shardy/dialect/sdy/ir/dialect.h"
#include "shardy/dialect/sdy/ir/register.h"
#include "shardy/dialect/sdy/ir/utils.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "tsl/platform/test.h"
namespace mlir::sdy {
namespace {
TEST(MhloImportTest, SkipFirstAxisOfSize1) {
MLIRContext context;
loadAllRequiredDialects(&context);
SmallVector<sdy::MeshAxisAttr> axes;
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "x", 1));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "y", 4));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "z", 2));
auto mesh = sdy::MeshAttr::get(&context, axes);
TensorShardingAttr sharding = xla::sdy::convertToSdySharding(
xla::HloSharding::IotaTile({4, 2}),
mesh,
llvm::SmallDenseMap<int64_t, mlir::StringRef>(), 2,
true);
EXPECT_EQ(attributeToString(sharding),
"#sdy.sharding<@mesh, [{\"y\", ?}, {\"z\", ?}]>");
}
TEST(MhloImportTest, SkipSecondAxisOfSize1) {
MLIRContext context;
loadAllRequiredDialects(&context);
SmallVector<sdy::MeshAxisAttr> axes;
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "y", 4));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "x", 1));
axes.emplace_back(mlir::sdy::MeshAxisAttr::get(&context, "z", 2));
auto mesh = sdy::MeshAttr::get(&context, axes);
TensorShardingAttr sharding = xla::sdy::convertToSdySharding(
xla::HloSharding::IotaTile({4, 2}),
mesh,
llvm::SmallDenseMap<int64_t, mlir::StringRef>(), 2,
true);
EXPECT_EQ(attributeToString(sharding),
"#sdy.sharding<@mesh, [{\"y\", ?}, {\"z\", ?}]>");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/mhlo_round_trip/mhlo_import_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6432a53e-abeb-4a52-84d7-ca55e4526dee | cpp | tensorflow/tensorflow | batch_parallelization | tensorflow/core/grappler/optimizers/data/batch_parallelization.cc | tensorflow/core/grappler/optimizers/data/batch_parallelization_test.cc | #include "tensorflow/core/grappler/optimizers/data/batch_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kBatchDataset[] = "BatchDatasetV2";
constexpr char kParallelBatchDataset[] = "ParallelBatchDataset";
NodeDef MakeParallelBatch(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_batch = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelBatchDataset, graph->graph(),
¶llel_batch);
parallel_batch.set_op(kParallelBatchDataset);
auto* num_parallel_calls =
graph_utils::AddScalarConstNode(data::model::kAutotune, graph);
string drop_remainder_name = parallel_batch.input(2);
parallel_batch.set_input(2, num_parallel_calls->name());
parallel_batch.add_input(drop_remainder_name);
return parallel_batch;
}
}
Status BatchParallelization::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization batch_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_batch_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kBatchDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* batch_node = get_batch_node(node);
if (!batch_node) continue;
auto* parallel_batch =
graph.AddNode(MakeParallelBatch(batch_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(batch_node->name(), parallel_batch->name()));
nodes_to_delete.insert(batch_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(BatchParallelization, "batch_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/batch_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithBatchParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
BatchParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeBatchV2Node;
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, BatchParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
false),
NDef("Sink", "Identity", {"batch"}, {})},
{});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("batch", output), !autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, BatchParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
false),
NDef("Sink", op, {"batch"}, {})},
{});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("batch", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
class ValueRewrites : public ::testing::TestWithParam<bool> {};
TEST_P(ValueRewrites, BatchParallelizationTest) {
const bool parallel_copy = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
NDef("batch_size", "Const", {}, {{"value", 2}, {"dtype", DT_INT32}}),
NDef("drop_remainder", "Const", {},
{{"value", false}, {"dtype", DT_BOOL}}),
MakeBatchV2Node("batch", "range", "batch_size", "drop_remainder",
parallel_copy),
NDef("Sink", "Identity", {"batch"}, {})},
{});
item.fetch.push_back("Sink");
NodeDef batch =
item.graph.node(graph_utils::FindGraphNodeWithName("batch", item.graph));
EXPECT_TRUE(batch.attr().find("parallel_copy") != batch.attr().end());
GraphDef output;
TF_ASSERT_OK(OptimizeWithBatchParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelBatchDataset", output));
NodeDef parallel_batch = output.node(
graph_utils::FindGraphNodeWithOp("ParallelBatchDataset", output));
EXPECT_EQ(parallel_batch.input_size(), 4);
EXPECT_EQ(parallel_batch.input(0), "range");
EXPECT_EQ(parallel_batch.input(1), "batch_size");
EXPECT_EQ(parallel_batch.input(3), "drop_remainder");
EXPECT_EQ(parallel_batch.attr().at("parallel_copy").b(), parallel_copy);
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallel_batch.input(2), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), -1);
}
INSTANTIATE_TEST_SUITE_P(Test, ValueRewrites, ::testing::Values(false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/batch_parallelization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/batch_parallelization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0186f4b7-533c-45e9-9879-a8ec368823e9 | cpp | tensorflow/tensorflow | error_reporter | tensorflow/compiler/mlir/lite/core/api/error_reporter.cc | tensorflow/compiler/mlir/lite/core/api/error_reporter_test.cc | #include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include <cstdarg>
namespace tflite {
int ErrorReporter::Report(const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
int ErrorReporter::ReportError(void*, const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
} | #include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include <cstdio>
#include <gtest/gtest.h>
namespace tflite {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() { buffer_[0] = 0; }
int Report(const char* format, va_list args) override {
vsnprintf(buffer_, kBufferSize, format, args);
return 0;
}
char* GetBuffer() { return buffer_; }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
};
TEST(ErrorReporter, TestReport) {
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
reporter->Report("Error: %d", 23);
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
}
TEST(ErrorReporter, TestReportMacro) {
MockErrorReporter mock_reporter;
#ifndef TF_LITE_STRIP_ERROR_STRINGS
ErrorReporter* reporter = &mock_reporter;
#endif
TF_LITE_REPORT_ERROR(reporter, "Error: %d", 23);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
#else
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), ""));
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/core/api/error_reporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/core/api/error_reporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
275d8910-5c63-4163-bbf3-e993c68436c4 | cpp | google/quiche | quiche_mem_slice | quiche/common/platform/api/quiche_mem_slice.h | quiche/common/platform/api/quiche_mem_slice_test.cc | #ifndef QUICHE_COMMON_PLATFORM_API_QUICHE_MEM_SLICE_H_
#define QUICHE_COMMON_PLATFORM_API_QUICHE_MEM_SLICE_H_
#include <cstddef>
#include <memory>
#include <utility>
#include "quiche_platform_impl/quiche_mem_slice_impl.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_callbacks.h"
namespace quiche {
class QUICHE_EXPORT QuicheMemSlice {
public:
QuicheMemSlice() = default;
explicit QuicheMemSlice(QuicheBuffer buffer) : impl_(std::move(buffer)) {}
QuicheMemSlice(std::unique_ptr<char[]> buffer, size_t length)
: impl_(std::move(buffer), length) {}
QuicheMemSlice(const char* buffer, size_t length,
quiche::SingleUseCallback<void(const char*)> done_callback)
: impl_(buffer, length, std::move(done_callback)) {}
struct InPlace {};
template <typename... Args>
explicit QuicheMemSlice(InPlace, Args&&... args)
: impl_{std::forward<Args>(args)...} {}
QuicheMemSlice(const QuicheMemSlice& other) = delete;
QuicheMemSlice& operator=(const QuicheMemSlice& other) = delete;
QuicheMemSlice(QuicheMemSlice&& other) = default;
QuicheMemSlice& operator=(QuicheMemSlice&& other) = default;
~QuicheMemSlice() = default;
void Reset() { impl_.Reset(); }
const char* data() const { return impl_.data(); }
size_t length() const { return impl_.length(); }
absl::string_view AsStringView() const {
return absl::string_view(data(), length());
}
bool empty() const { return impl_.empty(); }
private:
QuicheMemSliceImpl impl_;
};
}
#endif | #include "quiche/common/platform/api/quiche_mem_slice.h"
#include <cstring>
#include <memory>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_callbacks.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quiche {
namespace test {
namespace {
class QuicheMemSliceTest : public QuicheTest {
public:
QuicheMemSliceTest() {
size_t length = 1024;
slice_ = QuicheMemSlice(QuicheBuffer(&allocator_, length));
orig_data_ = slice_.data();
orig_length_ = slice_.length();
}
SimpleBufferAllocator allocator_;
QuicheMemSlice slice_;
const char* orig_data_;
size_t orig_length_;
};
TEST_F(QuicheMemSliceTest, MoveConstruct) {
QuicheMemSlice moved(std::move(slice_));
EXPECT_EQ(moved.data(), orig_data_);
EXPECT_EQ(moved.length(), orig_length_);
EXPECT_EQ(nullptr, slice_.data());
EXPECT_EQ(0u, slice_.length());
EXPECT_TRUE(slice_.empty());
}
TEST_F(QuicheMemSliceTest, MoveAssign) {
QuicheMemSlice moved;
moved = std::move(slice_);
EXPECT_EQ(moved.data(), orig_data_);
EXPECT_EQ(moved.length(), orig_length_);
EXPECT_EQ(nullptr, slice_.data());
EXPECT_EQ(0u, slice_.length());
EXPECT_TRUE(slice_.empty());
}
TEST_F(QuicheMemSliceTest, MoveAssignNonEmpty) {
const absl::string_view data("foo");
auto buffer = std::make_unique<char[]>(data.length());
std::memcpy(buffer.get(), data.data(), data.length());
QuicheMemSlice moved(std::move(buffer), data.length());
EXPECT_EQ(data, moved.AsStringView());
moved = std::move(slice_);
EXPECT_EQ(moved.data(), orig_data_);
EXPECT_EQ(moved.length(), orig_length_);
EXPECT_EQ(nullptr, slice_.data());
EXPECT_EQ(0u, slice_.length());
EXPECT_TRUE(slice_.empty());
}
TEST_F(QuicheMemSliceTest, SliceCustomDoneCallback) {
const absl::string_view data("foo");
bool deleted = false;
char* buffer = new char[data.length()];
std::memcpy(buffer, data.data(), data.length());
{
QuicheMemSlice slice(buffer, data.length(), [&deleted](const char* data) {
deleted = true;
delete[] data;
});
EXPECT_EQ(data, slice.AsStringView());
}
EXPECT_TRUE(deleted);
}
TEST_F(QuicheMemSliceTest, Reset) {
EXPECT_EQ(slice_.data(), orig_data_);
EXPECT_EQ(slice_.length(), orig_length_);
EXPECT_FALSE(slice_.empty());
slice_.Reset();
EXPECT_EQ(slice_.length(), 0u);
EXPECT_TRUE(slice_.empty());
}
TEST_F(QuicheMemSliceTest, SliceAllocatedOnHeap) {
auto buffer = std::make_unique<char[]>(128);
char* orig_data = buffer.get();
size_t used_length = 105;
QuicheMemSlice slice = QuicheMemSlice(std::move(buffer), used_length);
QuicheMemSlice moved = std::move(slice);
EXPECT_EQ(moved.data(), orig_data);
EXPECT_EQ(moved.length(), used_length);
}
TEST_F(QuicheMemSliceTest, SliceFromBuffer) {
const absl::string_view kTestString =
"RFC 9000 Release Celebration Memorial Test String";
auto buffer = QuicheBuffer::Copy(&allocator_, kTestString);
QuicheMemSlice slice(std::move(buffer));
EXPECT_EQ(buffer.data(), nullptr);
EXPECT_EQ(buffer.size(), 0u);
EXPECT_EQ(slice.AsStringView(), kTestString);
EXPECT_EQ(slice.length(), kTestString.length());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_mem_slice.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_mem_slice_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
3e7cdd1d-f8d7-4034-8f45-062e29e11fb3 | cpp | google/googletest | gtest-typed-test | googletest/src/gtest-typed-test.cc | googletest/test/gtest-typed-test_test.cc | #include "gtest/gtest-typed-test.h"
#include <set>
#include <string>
#include <vector>
#include "gtest/gtest.h"
namespace testing {
namespace internal {
static const char* SkipSpaces(const char* str) {
while (IsSpace(*str)) str++;
return str;
}
static std::vector<std::string> SplitIntoTestNames(const char* src) {
std::vector<std::string> name_vec;
src = SkipSpaces(src);
for (; src != nullptr; src = SkipComma(src)) {
name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src)));
}
return name_vec;
}
const char* TypedTestSuitePState::VerifyRegisteredTestNames(
const char* test_suite_name, const char* file, int line,
const char* registered_tests) {
RegisterTypeParameterizedTestSuite(test_suite_name, CodeLocation(file, line));
typedef RegisteredTestsMap::const_iterator RegisteredTestIter;
registered_ = true;
std::vector<std::string> name_vec = SplitIntoTestNames(registered_tests);
Message errors;
std::set<std::string> tests;
for (std::vector<std::string>::const_iterator name_it = name_vec.begin();
name_it != name_vec.end(); ++name_it) {
const std::string& name = *name_it;
if (tests.count(name) != 0) {
errors << "Test " << name << " is listed more than once.\n";
continue;
}
if (registered_tests_.count(name) != 0) {
tests.insert(name);
} else {
errors << "No test named " << name
<< " can be found in this test suite.\n";
}
}
for (RegisteredTestIter it = registered_tests_.begin();
it != registered_tests_.end(); ++it) {
if (tests.count(it->first) == 0) {
errors << "You forgot to list test " << it->first << ".\n";
}
}
const std::string& errors_str = errors.GetString();
if (!errors_str.empty()) {
fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
errors_str.c_str());
fflush(stderr);
posix::Abort();
}
return registered_tests;
}
}
} | #include "test/gtest-typed-test_test.h"
#include <set>
#include <string>
#include <type_traits>
#include <vector>
#include "gtest/gtest.h"
GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127 )
using testing::Test;
template <typename T>
class CommonTest : public Test {
public:
static void SetUpTestSuite() { shared_ = new T(5); }
static void TearDownTestSuite() {
delete shared_;
shared_ = nullptr;
}
protected:
typedef std::vector<T> Vector;
typedef std::set<int> IntSet;
CommonTest() : value_(1) {}
~CommonTest() override { EXPECT_EQ(3, value_); }
void SetUp() override {
EXPECT_EQ(1, value_);
value_++;
}
void TearDown() override {
EXPECT_EQ(2, value_);
value_++;
}
T value_;
static T* shared_;
};
template <typename T>
T* CommonTest<T>::shared_ = nullptr;
using testing::Types;
typedef Types<char, int> TwoTypes;
TYPED_TEST_SUITE(CommonTest, TwoTypes);
TYPED_TEST(CommonTest, ValuesAreCorrect) {
EXPECT_EQ(5, *TestFixture::shared_);
typename TestFixture::Vector empty;
EXPECT_EQ(0U, empty.size());
typename TestFixture::IntSet empty2;
EXPECT_EQ(0U, empty2.size());
EXPECT_EQ(2, this->value_);
}
TYPED_TEST(CommonTest, ValuesAreStillCorrect) {
ASSERT_TRUE(this->shared_ != nullptr);
EXPECT_EQ(5, *this->shared_);
EXPECT_EQ(static_cast<TypeParam>(2), this->value_);
}
template <typename T>
class TypedTest1 : public Test {};
TYPED_TEST_SUITE(TypedTest1, int);
TYPED_TEST(TypedTest1, A) {}
template <typename T>
class TypedTest2 : public Test {};
TYPED_TEST_SUITE(TypedTest2, Types<int>);
TYPED_TEST(TypedTest2, A) {}
namespace library1 {
template <typename T>
class NumericTest : public Test {};
typedef Types<int, long> NumericTypes;
TYPED_TEST_SUITE(NumericTest, NumericTypes);
TYPED_TEST(NumericTest, DefaultIsZero) { EXPECT_EQ(0, TypeParam()); }
}
template <typename T>
class TypedTestWithNames : public Test {};
class TypedTestNames {
public:
template <typename T>
static std::string GetName(int i) {
if (std::is_same<T, char>::value) {
return std::string("char") + ::testing::PrintToString(i);
}
if (std::is_same<T, int>::value) {
return std::string("int") + ::testing::PrintToString(i);
}
}
};
TYPED_TEST_SUITE(TypedTestWithNames, TwoTypes, TypedTestNames);
TYPED_TEST(TypedTestWithNames, TestSuiteName) {
if (std::is_same<TypeParam, char>::value) {
EXPECT_STREQ(::testing::UnitTest::GetInstance()
->current_test_info()
->test_suite_name(),
"TypedTestWithNames/char0");
}
if (std::is_same<TypeParam, int>::value) {
EXPECT_STREQ(::testing::UnitTest::GetInstance()
->current_test_info()
->test_suite_name(),
"TypedTestWithNames/int1");
}
}
using testing::Types;
using testing::internal::TypedTestSuitePState;
class TypedTestSuitePStateTest : public Test {
protected:
void SetUp() override {
state_.AddTestName("foo.cc", 0, "FooTest", "A");
state_.AddTestName("foo.cc", 0, "FooTest", "B");
state_.AddTestName("foo.cc", 0, "FooTest", "C");
}
TypedTestSuitePState state_;
};
TEST_F(TypedTestSuitePStateTest, SucceedsForMatchingList) {
const char* tests = "A, B, C";
EXPECT_EQ(tests,
state_.VerifyRegisteredTestNames("Suite", "foo.cc", 1, tests));
}
TEST_F(TypedTestSuitePStateTest, IgnoresOrderAndSpaces) {
const char* tests = "A,C, B";
EXPECT_EQ(tests,
state_.VerifyRegisteredTestNames("Suite", "foo.cc", 1, tests));
}
using TypedTestSuitePStateDeathTest = TypedTestSuitePStateTest;
TEST_F(TypedTestSuitePStateDeathTest, DetectsDuplicates) {
EXPECT_DEATH_IF_SUPPORTED(
state_.VerifyRegisteredTestNames("Suite", "foo.cc", 1, "A, B, A, C"),
"foo\\.cc.1.?: Test A is listed more than once\\.");
}
TEST_F(TypedTestSuitePStateDeathTest, DetectsExtraTest) {
EXPECT_DEATH_IF_SUPPORTED(
state_.VerifyRegisteredTestNames("Suite", "foo.cc", 1, "A, B, C, D"),
"foo\\.cc.1.?: No test named D can be found in this test suite\\.");
}
TEST_F(TypedTestSuitePStateDeathTest, DetectsMissedTest) {
EXPECT_DEATH_IF_SUPPORTED(
state_.VerifyRegisteredTestNames("Suite", "foo.cc", 1, "A, C"),
"foo\\.cc.1.?: You forgot to list test B\\.");
}
TEST_F(TypedTestSuitePStateDeathTest, DetectsTestAfterRegistration) {
state_.VerifyRegisteredTestNames("Suite", "foo.cc", 1, "A, B, C");
EXPECT_DEATH_IF_SUPPORTED(
state_.AddTestName("foo.cc", 2, "FooTest", "D"),
"foo\\.cc.2.?: Test D must be defined before REGISTER_TYPED_TEST_SUITE_P"
"\\(FooTest, \\.\\.\\.\\)\\.");
}
template <typename T>
class DerivedTest : public CommonTest<T> {};
TYPED_TEST_SUITE_P(DerivedTest);
TYPED_TEST_P(DerivedTest, ValuesAreCorrect) {
EXPECT_EQ(5, *TestFixture::shared_);
EXPECT_EQ(2, this->value_);
}
TYPED_TEST_P(DerivedTest, ValuesAreStillCorrect) {
ASSERT_TRUE(this->shared_ != nullptr);
EXPECT_EQ(5, *this->shared_);
EXPECT_EQ(2, this->value_);
}
REGISTER_TYPED_TEST_SUITE_P(DerivedTest, ValuesAreCorrect,
ValuesAreStillCorrect);
typedef Types<short, long> MyTwoTypes;
INSTANTIATE_TYPED_TEST_SUITE_P(My, DerivedTest, MyTwoTypes);
template <typename T>
class TypeParametrizedTestWithNames : public Test {};
TYPED_TEST_SUITE_P(TypeParametrizedTestWithNames);
TYPED_TEST_P(TypeParametrizedTestWithNames, TestSuiteName) {
if (std::is_same<TypeParam, char>::value) {
EXPECT_STREQ(::testing::UnitTest::GetInstance()
->current_test_info()
->test_suite_name(),
"CustomName/TypeParametrizedTestWithNames/parChar0");
}
if (std::is_same<TypeParam, int>::value) {
EXPECT_STREQ(::testing::UnitTest::GetInstance()
->current_test_info()
->test_suite_name(),
"CustomName/TypeParametrizedTestWithNames/parInt1");
}
}
REGISTER_TYPED_TEST_SUITE_P(TypeParametrizedTestWithNames, TestSuiteName);
class TypeParametrizedTestNames {
public:
template <typename T>
static std::string GetName(int i) {
if (std::is_same<T, char>::value) {
return std::string("parChar") + ::testing::PrintToString(i);
}
if (std::is_same<T, int>::value) {
return std::string("parInt") + ::testing::PrintToString(i);
}
}
};
INSTANTIATE_TYPED_TEST_SUITE_P(CustomName, TypeParametrizedTestWithNames,
TwoTypes, TypeParametrizedTestNames);
template <typename T>
class TypedTestP1 : public Test {};
TYPED_TEST_SUITE_P(TypedTestP1);
using IntAfterTypedTestSuiteP = int;
TYPED_TEST_P(TypedTestP1, A) {}
TYPED_TEST_P(TypedTestP1, B) {}
using IntBeforeRegisterTypedTestSuiteP = int;
REGISTER_TYPED_TEST_SUITE_P(TypedTestP1, A, B);
template <typename T>
class TypedTestP2 : public Test {};
TYPED_TEST_SUITE_P(TypedTestP2);
TYPED_TEST_P(TypedTestP2, A) {}
REGISTER_TYPED_TEST_SUITE_P(TypedTestP2, A);
IntAfterTypedTestSuiteP after = 0;
IntBeforeRegisterTypedTestSuiteP before = 0;
INSTANTIATE_TYPED_TEST_SUITE_P(Int, TypedTestP1, int);
INSTANTIATE_TYPED_TEST_SUITE_P(Int, TypedTestP2, Types<int>);
INSTANTIATE_TYPED_TEST_SUITE_P(Double, TypedTestP2, Types<double>);
typedef Types<std::vector<double>, std::set<char> > MyContainers;
INSTANTIATE_TYPED_TEST_SUITE_P(My, ContainerTest, MyContainers);
namespace library2 {
template <typename T>
class NumericTest : public Test {};
TYPED_TEST_SUITE_P(NumericTest);
TYPED_TEST_P(NumericTest, DefaultIsZero) { EXPECT_EQ(0, TypeParam()); }
TYPED_TEST_P(NumericTest, ZeroIsLessThanOne) {
EXPECT_LT(TypeParam(0), TypeParam(1));
}
REGISTER_TYPED_TEST_SUITE_P(NumericTest, DefaultIsZero, ZeroIsLessThanOne);
typedef Types<int, double> NumericTypes;
INSTANTIATE_TYPED_TEST_SUITE_P(My, NumericTest, NumericTypes);
static const char* GetTestName() {
return testing::UnitTest::GetInstance()->current_test_info()->name();
}
template <typename T>
class TrimmedTest : public Test {};
TYPED_TEST_SUITE_P(TrimmedTest);
TYPED_TEST_P(TrimmedTest, Test1) { EXPECT_STREQ("Test1", GetTestName()); }
TYPED_TEST_P(TrimmedTest, Test2) { EXPECT_STREQ("Test2", GetTestName()); }
TYPED_TEST_P(TrimmedTest, Test3) { EXPECT_STREQ("Test3", GetTestName()); }
TYPED_TEST_P(TrimmedTest, Test4) { EXPECT_STREQ("Test4", GetTestName()); }
TYPED_TEST_P(TrimmedTest, Test5) { EXPECT_STREQ("Test5", GetTestName()); }
REGISTER_TYPED_TEST_SUITE_P(TrimmedTest, Test1, Test2, Test3, Test4,
Test5);
template <typename T1, typename T2>
struct MyPair {};
typedef Types<int, double, MyPair<int, int> > TrimTypes;
INSTANTIATE_TYPED_TEST_SUITE_P(My, TrimmedTest, TrimTypes);
}
GTEST_DISABLE_MSC_WARNINGS_POP_() | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/src/gtest-typed-test.cc | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googletest/test/gtest-typed-test_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
9d63bd8b-9c7a-4345-b260-6a298972320a | cpp | google/googletest | gmock-internal-utils | googlemock/src/gmock-internal-utils.cc | googlemock/test/gmock-internal-utils_test.cc | #include "gmock/internal/gmock-internal-utils.h"
#include <ctype.h>
#include <array>
#include <cctype>
#include <cstdint>
#include <cstring>
#include <iostream>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gmock/internal/gmock-port.h"
#include "gtest/gtest.h"
namespace testing {
namespace internal {
GTEST_API_ std::string JoinAsKeyValueTuple(
const std::vector<const char*>& names, const Strings& values) {
GTEST_CHECK_(names.size() == values.size());
if (values.empty()) {
return "";
}
const auto build_one = [&](const size_t i) {
return std::string(names[i]) + ": " + values[i];
};
std::string result = "(" + build_one(0);
for (size_t i = 1; i < values.size(); i++) {
result += ", ";
result += build_one(i);
}
result += ")";
return result;
}
GTEST_API_ std::string ConvertIdentifierNameToWords(const char* id_name) {
std::string result;
char prev_char = '\0';
for (const char* p = id_name; *p != '\0'; prev_char = *(p++)) {
const bool starts_new_word = IsUpper(*p) ||
(!IsAlpha(prev_char) && IsLower(*p)) ||
(!IsDigit(prev_char) && IsDigit(*p));
if (IsAlNum(*p)) {
if (starts_new_word && !result.empty()) result += ' ';
result += ToLower(*p);
}
}
return result;
}
class GoogleTestFailureReporter : public FailureReporterInterface {
public:
void ReportFailure(FailureType type, const char* file, int line,
const std::string& message) override {
AssertHelper(type == kFatal ? TestPartResult::kFatalFailure
: TestPartResult::kNonFatalFailure,
file, line, message.c_str()) = Message();
if (type == kFatal) {
posix::Abort();
}
}
};
GTEST_API_ FailureReporterInterface* GetFailureReporter() {
static FailureReporterInterface* const failure_reporter =
new GoogleTestFailureReporter();
return failure_reporter;
}
static GTEST_DEFINE_STATIC_MUTEX_(g_log_mutex);
GTEST_API_ bool LogIsVisible(LogSeverity severity) {
if (GMOCK_FLAG_GET(verbose) == kInfoVerbosity) {
return true;
} else if (GMOCK_FLAG_GET(verbose) == kErrorVerbosity) {
return false;
} else {
return severity == kWarning;
}
}
GTEST_API_ void Log(LogSeverity severity, const std::string& message,
int stack_frames_to_skip) {
if (!LogIsVisible(severity)) return;
MutexLock l(&g_log_mutex);
if (severity == kWarning) {
std::cout << "\nGMOCK WARNING:";
}
if (message.empty() || message[0] != '\n') {
std::cout << "\n";
}
std::cout << message;
if (stack_frames_to_skip >= 0) {
#ifdef NDEBUG
const int actual_to_skip = 0;
#else
const int actual_to_skip = stack_frames_to_skip + 1;
#endif
if (!message.empty() && *message.rbegin() != '\n') {
std::cout << "\n";
}
std::cout << "Stack trace:\n"
<< ::testing::internal::GetCurrentOsStackTraceExceptTop(
actual_to_skip);
}
std::cout << ::std::flush;
}
GTEST_API_ WithoutMatchers GetWithoutMatchers() { return WithoutMatchers(); }
GTEST_API_ void IllegalDoDefault(const char* file, int line) {
internal::Assert(
false, file, line,
"You are using DoDefault() inside a composite action like "
"DoAll() or WithArgs(). This is not supported for technical "
"reasons. Please instead spell out the default action, or "
"assign the default action to an Action variable and use "
"the variable in various places.");
}
constexpr char UndoWebSafeEncoding(char c) {
return c == '-' ? '+' : c == '_' ? '/' : c;
}
constexpr char UnBase64Impl(char c, const char* const base64, char carry) {
return *base64 == 0 ? static_cast<char>(65)
: *base64 == c
? carry
: UnBase64Impl(c, base64 + 1, static_cast<char>(carry + 1));
}
template <size_t... I>
constexpr std::array<char, 256> UnBase64Impl(std::index_sequence<I...>,
const char* const base64) {
return {
{UnBase64Impl(UndoWebSafeEncoding(static_cast<char>(I)), base64, 0)...}};
}
constexpr std::array<char, 256> UnBase64(const char* const base64) {
return UnBase64Impl(std::make_index_sequence<256>{}, base64);
}
static constexpr char kBase64[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static constexpr std::array<char, 256> kUnBase64 = UnBase64(kBase64);
bool Base64Unescape(const std::string& encoded, std::string* decoded) {
decoded->clear();
size_t encoded_len = encoded.size();
decoded->reserve(3 * (encoded_len / 4) + (encoded_len % 4));
int bit_pos = 0;
char dst = 0;
for (int src : encoded) {
if (std::isspace(src) || src == '=') {
continue;
}
char src_bin = kUnBase64[static_cast<size_t>(src)];
if (src_bin >= 64) {
decoded->clear();
return false;
}
if (bit_pos == 0) {
dst |= static_cast<char>(src_bin << 2);
bit_pos = 6;
} else {
dst |= static_cast<char>(src_bin >> (bit_pos - 2));
decoded->push_back(dst);
dst = static_cast<char>(src_bin << (10 - bit_pos));
bit_pos = (bit_pos + 6) % 8;
}
}
return true;
}
}
} | #include "gmock/internal/gmock-internal-utils.h"
#include <stdlib.h>
#include <cstdint>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include "gmock/gmock.h"
#include "gmock/internal/gmock-port.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#define GTEST_IMPLEMENTATION_ 1
#include "src/gtest-internal-inl.h"
#undef GTEST_IMPLEMENTATION_
#ifdef GTEST_OS_CYGWIN
#include <sys/types.h>
#endif
namespace proto2 {
class Message;
}
namespace testing {
namespace internal {
namespace {
TEST(JoinAsKeyValueTupleTest, JoinsEmptyTuple) {
EXPECT_EQ("", JoinAsKeyValueTuple({}, Strings()));
}
TEST(JoinAsKeyValueTupleTest, JoinsOneTuple) {
EXPECT_EQ("(a: 1)", JoinAsKeyValueTuple({"a"}, {"1"}));
}
TEST(JoinAsKeyValueTupleTest, JoinsTwoTuple) {
EXPECT_EQ("(a: 1, b: 2)", JoinAsKeyValueTuple({"a", "b"}, {"1", "2"}));
}
TEST(JoinAsKeyValueTupleTest, JoinsTenTuple) {
EXPECT_EQ(
"(a: 1, b: 2, c: 3, d: 4, e: 5, f: 6, g: 7, h: 8, i: 9, j: 10)",
JoinAsKeyValueTuple({"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"},
{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10"}));
}
TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameContainsNoWord) {
EXPECT_EQ("", ConvertIdentifierNameToWords(""));
EXPECT_EQ("", ConvertIdentifierNameToWords("_"));
EXPECT_EQ("", ConvertIdentifierNameToWords("__"));
}
TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameContainsDigits) {
EXPECT_EQ("1", ConvertIdentifierNameToWords("_1"));
EXPECT_EQ("2", ConvertIdentifierNameToWords("2_"));
EXPECT_EQ("34", ConvertIdentifierNameToWords("_34_"));
EXPECT_EQ("34 56", ConvertIdentifierNameToWords("_34_56"));
}
TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameContainsCamelCaseWords) {
EXPECT_EQ("a big word", ConvertIdentifierNameToWords("ABigWord"));
EXPECT_EQ("foo bar", ConvertIdentifierNameToWords("FooBar"));
EXPECT_EQ("foo", ConvertIdentifierNameToWords("Foo_"));
EXPECT_EQ("foo bar", ConvertIdentifierNameToWords("_Foo_Bar_"));
EXPECT_EQ("foo and bar", ConvertIdentifierNameToWords("_Foo__And_Bar"));
}
TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameContains_SeparatedWords) {
EXPECT_EQ("foo bar", ConvertIdentifierNameToWords("foo_bar"));
EXPECT_EQ("foo", ConvertIdentifierNameToWords("_foo_"));
EXPECT_EQ("foo bar", ConvertIdentifierNameToWords("_foo_bar_"));
EXPECT_EQ("foo and bar", ConvertIdentifierNameToWords("_foo__and_bar"));
}
TEST(ConvertIdentifierNameToWordsTest, WorksWhenNameIsMixture) {
EXPECT_EQ("foo bar 123", ConvertIdentifierNameToWords("Foo_bar123"));
EXPECT_EQ("chapter 11 section 1",
ConvertIdentifierNameToWords("_Chapter11Section_1_"));
}
TEST(GetRawPointerTest, WorksForSmartPointers) {
const char* const raw_p1 = new const char('a');
const std::unique_ptr<const char> p1(raw_p1);
EXPECT_EQ(raw_p1, GetRawPointer(p1));
double* const raw_p2 = new double(2.5);
const std::shared_ptr<double> p2(raw_p2);
EXPECT_EQ(raw_p2, GetRawPointer(p2));
}
TEST(GetRawPointerTest, WorksForRawPointers) {
int* p = nullptr;
EXPECT_TRUE(nullptr == GetRawPointer(p));
int n = 1;
EXPECT_EQ(&n, GetRawPointer(&n));
}
TEST(GetRawPointerTest, WorksForStdReferenceWrapper) {
int n = 1;
EXPECT_EQ(&n, GetRawPointer(std::ref(n)));
EXPECT_EQ(&n, GetRawPointer(std::cref(n)));
}
class Base {};
class Derived : public Base {};
TEST(KindOfTest, Bool) {
EXPECT_EQ(kBool, GMOCK_KIND_OF_(bool));
}
TEST(KindOfTest, Integer) {
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(char));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(signed char));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned char));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(short));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned short));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(int));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned int));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(long));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned long));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(long long));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(unsigned long long));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(wchar_t));
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(size_t));
#if defined(GTEST_OS_LINUX) || defined(GTEST_OS_MAC) || defined(GTEST_OS_CYGWIN)
EXPECT_EQ(kInteger, GMOCK_KIND_OF_(ssize_t));
#endif
}
TEST(KindOfTest, FloatingPoint) {
EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(float));
EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(double));
EXPECT_EQ(kFloatingPoint, GMOCK_KIND_OF_(long double));
}
TEST(KindOfTest, Other) {
EXPECT_EQ(kOther, GMOCK_KIND_OF_(void*));
EXPECT_EQ(kOther, GMOCK_KIND_OF_(char**));
EXPECT_EQ(kOther, GMOCK_KIND_OF_(Base));
}
TEST(LosslessArithmeticConvertibleTest, BoolToBool) {
EXPECT_TRUE((LosslessArithmeticConvertible<bool, bool>::value));
}
TEST(LosslessArithmeticConvertibleTest, BoolToInteger) {
EXPECT_TRUE((LosslessArithmeticConvertible<bool, char>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<bool, int>::value));
EXPECT_TRUE(
(LosslessArithmeticConvertible<bool, unsigned long>::value));
}
TEST(LosslessArithmeticConvertibleTest, BoolToFloatingPoint) {
EXPECT_TRUE((LosslessArithmeticConvertible<bool, float>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<bool, double>::value));
}
TEST(LosslessArithmeticConvertibleTest, IntegerToBool) {
EXPECT_FALSE((LosslessArithmeticConvertible<unsigned char, bool>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<int, bool>::value));
}
TEST(LosslessArithmeticConvertibleTest, IntegerToInteger) {
EXPECT_TRUE((LosslessArithmeticConvertible<unsigned char, int>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<unsigned short,
uint64_t>::value));
EXPECT_FALSE(
(LosslessArithmeticConvertible<short, uint64_t>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<signed char,
unsigned int>::value));
EXPECT_TRUE(
(LosslessArithmeticConvertible<unsigned char, unsigned char>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<int, int>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<wchar_t, wchar_t>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<unsigned long,
unsigned long>::value));
EXPECT_FALSE(
(LosslessArithmeticConvertible<unsigned char, signed char>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<int, unsigned int>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<uint64_t, int64_t>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<long, char>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<int, signed char>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<int64_t, unsigned int>::value));
}
TEST(LosslessArithmeticConvertibleTest, IntegerToFloatingPoint) {
EXPECT_FALSE((LosslessArithmeticConvertible<char, float>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<int, double>::value));
EXPECT_FALSE(
(LosslessArithmeticConvertible<short, long double>::value));
}
TEST(LosslessArithmeticConvertibleTest, FloatingPointToBool) {
EXPECT_FALSE((LosslessArithmeticConvertible<float, bool>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<double, bool>::value));
}
TEST(LosslessArithmeticConvertibleTest, FloatingPointToInteger) {
EXPECT_FALSE((LosslessArithmeticConvertible<float, long>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<double, int64_t>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<long double, int>::value));
}
TEST(LosslessArithmeticConvertibleTest, FloatingPointToFloatingPoint) {
EXPECT_TRUE((LosslessArithmeticConvertible<float, double>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<float, long double>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<double, long double>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<float, float>::value));
EXPECT_TRUE((LosslessArithmeticConvertible<double, double>::value));
EXPECT_FALSE((LosslessArithmeticConvertible<double, float>::value));
GTEST_INTENTIONAL_CONST_COND_PUSH_()
if (sizeof(double) == sizeof(long double)) {
GTEST_INTENTIONAL_CONST_COND_POP_()
EXPECT_TRUE((LosslessArithmeticConvertible<long double, double>::value));
} else {
EXPECT_FALSE((LosslessArithmeticConvertible<long double, double>::value));
}
}
TEST(TupleMatchesTest, WorksForSize0) {
std::tuple<> matchers;
std::tuple<> values;
EXPECT_TRUE(TupleMatches(matchers, values));
}
TEST(TupleMatchesTest, WorksForSize1) {
std::tuple<Matcher<int>> matchers(Eq(1));
std::tuple<int> values1(1), values2(2);
EXPECT_TRUE(TupleMatches(matchers, values1));
EXPECT_FALSE(TupleMatches(matchers, values2));
}
TEST(TupleMatchesTest, WorksForSize2) {
std::tuple<Matcher<int>, Matcher<char>> matchers(Eq(1), Eq('a'));
std::tuple<int, char> values1(1, 'a'), values2(1, 'b'), values3(2, 'a'),
values4(2, 'b');
EXPECT_TRUE(TupleMatches(matchers, values1));
EXPECT_FALSE(TupleMatches(matchers, values2));
EXPECT_FALSE(TupleMatches(matchers, values3));
EXPECT_FALSE(TupleMatches(matchers, values4));
}
TEST(TupleMatchesTest, WorksForSize5) {
std::tuple<Matcher<int>, Matcher<char>, Matcher<bool>,
Matcher<long>,
Matcher<std::string>>
matchers(Eq(1), Eq('a'), Eq(true), Eq(2L), Eq("hi"));
std::tuple<int, char, bool, long, std::string>
values1(1, 'a', true, 2L, "hi"), values2(1, 'a', true, 2L, "hello"),
values3(2, 'a', true, 2L, "hi");
EXPECT_TRUE(TupleMatches(matchers, values1));
EXPECT_FALSE(TupleMatches(matchers, values2));
EXPECT_FALSE(TupleMatches(matchers, values3));
}
TEST(AssertTest, SucceedsOnTrue) {
Assert(true, __FILE__, __LINE__, "This should succeed.");
Assert(true, __FILE__, __LINE__);
}
TEST(AssertTest, FailsFatallyOnFalse) {
EXPECT_DEATH_IF_SUPPORTED(
{ Assert(false, __FILE__, __LINE__, "This should fail."); }, "");
EXPECT_DEATH_IF_SUPPORTED({ Assert(false, __FILE__, __LINE__); }, "");
}
TEST(ExpectTest, SucceedsOnTrue) {
Expect(true, __FILE__, __LINE__, "This should succeed.");
Expect(true, __FILE__, __LINE__);
}
TEST(ExpectTest, FailsNonfatallyOnFalse) {
EXPECT_NONFATAL_FAILURE(
{
Expect(false, __FILE__, __LINE__, "This should fail.");
},
"This should fail");
EXPECT_NONFATAL_FAILURE(
{
Expect(false, __FILE__, __LINE__);
},
"Expectation failed");
}
class LogIsVisibleTest : public ::testing::Test {
protected:
void SetUp() override { original_verbose_ = GMOCK_FLAG_GET(verbose); }
void TearDown() override { GMOCK_FLAG_SET(verbose, original_verbose_); }
std::string original_verbose_;
};
TEST_F(LogIsVisibleTest, AlwaysReturnsTrueIfVerbosityIsInfo) {
GMOCK_FLAG_SET(verbose, kInfoVerbosity);
EXPECT_TRUE(LogIsVisible(kInfo));
EXPECT_TRUE(LogIsVisible(kWarning));
}
TEST_F(LogIsVisibleTest, AlwaysReturnsFalseIfVerbosityIsError) {
GMOCK_FLAG_SET(verbose, kErrorVerbosity);
EXPECT_FALSE(LogIsVisible(kInfo));
EXPECT_FALSE(LogIsVisible(kWarning));
}
TEST_F(LogIsVisibleTest, WorksWhenVerbosityIsWarning) {
GMOCK_FLAG_SET(verbose, kWarningVerbosity);
EXPECT_FALSE(LogIsVisible(kInfo));
EXPECT_TRUE(LogIsVisible(kWarning));
}
#if GTEST_HAS_STREAM_REDIRECTION
void TestLogWithSeverity(const std::string& verbosity, LogSeverity severity,
bool should_print) {
const std::string old_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, verbosity);
CaptureStdout();
Log(severity, "Test log.\n", 0);
if (should_print) {
EXPECT_THAT(
GetCapturedStdout().c_str(),
ContainsRegex(severity == kWarning
? "^\nGMOCK WARNING:\nTest log\\.\nStack trace:\n"
: "^\nTest log\\.\nStack trace:\n"));
} else {
EXPECT_STREQ("", GetCapturedStdout().c_str());
}
GMOCK_FLAG_SET(verbose, old_flag);
}
TEST(LogTest, NoStackTraceWhenStackFramesToSkipIsNegative) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, kInfoVerbosity);
CaptureStdout();
Log(kInfo, "Test log.\n", -1);
EXPECT_STREQ("\nTest log.\n", GetCapturedStdout().c_str());
GMOCK_FLAG_SET(verbose, saved_flag);
}
struct MockStackTraceGetter : testing::internal::OsStackTraceGetterInterface {
std::string CurrentStackTrace(int max_depth, int skip_count) override {
return (testing::Message() << max_depth << "::" << skip_count << "\n")
.GetString();
}
void UponLeavingGTest() override {}
};
TEST(LogTest, NoSkippingStackFrameInOptMode) {
MockStackTraceGetter* mock_os_stack_trace_getter = new MockStackTraceGetter;
GetUnitTestImpl()->set_os_stack_trace_getter(mock_os_stack_trace_getter);
CaptureStdout();
Log(kWarning, "Test log.\n", 100);
const std::string log = GetCapturedStdout();
std::string expected_trace =
(testing::Message() << GTEST_FLAG_GET(stack_trace_depth) << "::")
.GetString();
std::string expected_message =
"\nGMOCK WARNING:\n"
"Test log.\n"
"Stack trace:\n" +
expected_trace;
EXPECT_THAT(log, HasSubstr(expected_message));
int skip_count = atoi(log.substr(expected_message.size()).c_str());
#if defined(NDEBUG)
const int expected_skip_count = 0;
#else
const int expected_skip_count = 100;
#endif
EXPECT_THAT(skip_count,
AllOf(Ge(expected_skip_count), Le(expected_skip_count + 10)));
GetUnitTestImpl()->set_os_stack_trace_getter(nullptr);
}
TEST(LogTest, AllLogsArePrintedWhenVerbosityIsInfo) {
TestLogWithSeverity(kInfoVerbosity, kInfo, true);
TestLogWithSeverity(kInfoVerbosity, kWarning, true);
}
TEST(LogTest, OnlyWarningsArePrintedWhenVerbosityIsWarning) {
TestLogWithSeverity(kWarningVerbosity, kInfo, false);
TestLogWithSeverity(kWarningVerbosity, kWarning, true);
}
TEST(LogTest, NoLogsArePrintedWhenVerbosityIsError) {
TestLogWithSeverity(kErrorVerbosity, kInfo, false);
TestLogWithSeverity(kErrorVerbosity, kWarning, false);
}
TEST(LogTest, OnlyWarningsArePrintedWhenVerbosityIsInvalid) {
TestLogWithSeverity("invalid", kInfo, false);
TestLogWithSeverity("invalid", kWarning, true);
}
std::string GrabOutput(void (*logger)(), const char* verbosity) {
const std::string saved_flag = GMOCK_FLAG_GET(verbose);
GMOCK_FLAG_SET(verbose, verbosity);
CaptureStdout();
logger();
GMOCK_FLAG_SET(verbose, saved_flag);
return GetCapturedStdout();
}
class DummyMock {
public:
MOCK_METHOD0(TestMethod, void());
MOCK_METHOD1(TestMethodArg, void(int dummy));
};
void ExpectCallLogger() {
DummyMock mock;
EXPECT_CALL(mock, TestMethod());
mock.TestMethod();
}
TEST(ExpectCallTest, LogsWhenVerbosityIsInfo) {
EXPECT_THAT(std::string(GrabOutput(ExpectCallLogger, kInfoVerbosity)),
HasSubstr("EXPECT_CALL(mock, TestMethod())"));
}
TEST(ExpectCallTest, DoesNotLogWhenVerbosityIsWarning) {
EXPECT_STREQ("", GrabOutput(ExpectCallLogger, kWarningVerbosity).c_str());
}
TEST(ExpectCallTest, DoesNotLogWhenVerbosityIsError) {
EXPECT_STREQ("", GrabOutput(ExpectCallLogger, kErrorVerbosity).c_str());
}
void OnCallLogger() {
DummyMock mock;
ON_CALL(mock, TestMethod());
}
TEST(OnCallTest, LogsWhenVerbosityIsInfo) {
EXPECT_THAT(std::string(GrabOutput(OnCallLogger, kInfoVerbosity)),
HasSubstr("ON_CALL(mock, TestMethod())"));
}
TEST(OnCallTest, DoesNotLogWhenVerbosityIsWarning) {
EXPECT_STREQ("", GrabOutput(OnCallLogger, kWarningVerbosity).c_str());
}
TEST(OnCallTest, DoesNotLogWhenVerbosityIsError) {
EXPECT_STREQ("", GrabOutput(OnCallLogger, kErrorVerbosity).c_str());
}
void OnCallAnyArgumentLogger() {
DummyMock mock;
ON_CALL(mock, TestMethodArg(_));
}
TEST(OnCallTest, LogsAnythingArgument) {
EXPECT_THAT(std::string(GrabOutput(OnCallAnyArgumentLogger, kInfoVerbosity)),
HasSubstr("ON_CALL(mock, TestMethodArg(_)"));
}
#endif
TEST(StlContainerViewTest, WorksForStlContainer) {
StaticAssertTypeEq<std::vector<int>,
StlContainerView<std::vector<int>>::type>();
StaticAssertTypeEq<const std::vector<double>&,
StlContainerView<std::vector<double>>::const_reference>();
typedef std::vector<char> Chars;
Chars v1;
const Chars& v2(StlContainerView<Chars>::ConstReference(v1));
EXPECT_EQ(&v1, &v2);
v1.push_back('a');
Chars v3 = StlContainerView<Chars>::Copy(v1);
EXPECT_THAT(v3, Eq(v3));
}
TEST(StlContainerViewTest, WorksForStaticNativeArray) {
StaticAssertTypeEq<NativeArray<int>, StlContainerView<int[3]>::type>();
StaticAssertTypeEq<NativeArray<double>,
StlContainerView<const double[4]>::type>();
StaticAssertTypeEq<NativeArray<char[3]>,
StlContainerView<const char[2][3]>::type>();
StaticAssertTypeEq<const NativeArray<int>,
StlContainerView<int[2]>::const_reference>();
int a1[3] = {0, 1, 2};
NativeArray<int> a2 = StlContainerView<int[3]>::ConstReference(a1);
EXPECT_EQ(3U, a2.size());
EXPECT_EQ(a1, a2.begin());
const NativeArray<int> a3 = StlContainerView<int[3]>::Copy(a1);
ASSERT_EQ(3U, a3.size());
EXPECT_EQ(0, a3.begin()[0]);
EXPECT_EQ(1, a3.begin()[1]);
EXPECT_EQ(2, a3.begin()[2]);
a1[0] = 3;
EXPECT_EQ(0, a3.begin()[0]);
}
TEST(StlContainerViewTest, WorksForDynamicNativeArray) {
StaticAssertTypeEq<NativeArray<int>,
StlContainerView<std::tuple<const int*, size_t>>::type>();
StaticAssertTypeEq<
NativeArray<double>,
StlContainerView<std::tuple<std::shared_ptr<double>, int>>::type>();
StaticAssertTypeEq<
const NativeArray<int>,
StlContainerView<std::tuple<const int*, int>>::const_reference>();
int a1[3] = {0, 1, 2};
const int* const p1 = a1;
NativeArray<int> a2 =
StlContainerView<std::tuple<const int*, int>>::ConstReference(
std::make_tuple(p1, 3));
EXPECT_EQ(3U, a2.size());
EXPECT_EQ(a1, a2.begin());
const NativeArray<int> a3 = StlContainerView<std::tuple<int*, size_t>>::Copy(
std::make_tuple(static_cast<int*>(a1), 3));
ASSERT_EQ(3U, a3.size());
EXPECT_EQ(0, a3.begin()[0]);
EXPECT_EQ(1, a3.begin()[1]);
EXPECT_EQ(2, a3.begin()[2]);
a1[0] = 3;
EXPECT_EQ(0, a3.begin()[0]);
}
TEST(FunctionTest, Nullary) {
typedef Function<int()> F;
EXPECT_EQ(0u, F::ArgumentCount);
EXPECT_TRUE((std::is_same<int, F::Result>::value));
EXPECT_TRUE((std::is_same<std::tuple<>, F::ArgumentTuple>::value));
EXPECT_TRUE((std::is_same<std::tuple<>, F::ArgumentMatcherTuple>::value));
EXPECT_TRUE((std::is_same<void(), F::MakeResultVoid>::value));
EXPECT_TRUE((std::is_same<IgnoredValue(), F::MakeResultIgnoredValue>::value));
}
TEST(FunctionTest, Unary) {
typedef Function<int(bool)> F;
EXPECT_EQ(1u, F::ArgumentCount);
EXPECT_TRUE((std::is_same<int, F::Result>::value));
EXPECT_TRUE((std::is_same<bool, F::Arg<0>::type>::value));
EXPECT_TRUE((std::is_same<std::tuple<bool>, F::ArgumentTuple>::value));
EXPECT_TRUE((
std::is_same<std::tuple<Matcher<bool>>, F::ArgumentMatcherTuple>::value));
EXPECT_TRUE((std::is_same<void(bool), F::MakeResultVoid>::value));
EXPECT_TRUE((std::is_same<IgnoredValue(bool),
F::MakeResultIgnoredValue>::value));
}
TEST(FunctionTest, Binary) {
typedef Function<int(bool, const long&)> F;
EXPECT_EQ(2u, F::ArgumentCount);
EXPECT_TRUE((std::is_same<int, F::Result>::value));
EXPECT_TRUE((std::is_same<bool, F::Arg<0>::type>::value));
EXPECT_TRUE((std::is_same<const long&, F::Arg<1>::type>::value));
EXPECT_TRUE((std::is_same<std::tuple<bool, const long&>,
F::ArgumentTuple>::value));
EXPECT_TRUE(
(std::is_same<std::tuple<Matcher<bool>, Matcher<const long&>>,
F::ArgumentMatcherTuple>::value));
EXPECT_TRUE((std::is_same<void(bool, const long&),
F::MakeResultVoid>::value));
EXPECT_TRUE((std::is_same<IgnoredValue(bool, const long&),
F::MakeResultIgnoredValue>::value));
}
TEST(FunctionTest, LongArgumentList) {
typedef Function<char(bool, int, char*, int&, const long&)> F;
EXPECT_EQ(5u, F::ArgumentCount);
EXPECT_TRUE((std::is_same<char, F::Result>::value));
EXPECT_TRUE((std::is_same<bool, F::Arg<0>::type>::value));
EXPECT_TRUE((std::is_same<int, F::Arg<1>::type>::value));
EXPECT_TRUE((std::is_same<char*, F::Arg<2>::type>::value));
EXPECT_TRUE((std::is_same<int&, F::Arg<3>::type>::value));
EXPECT_TRUE((std::is_same<const long&, F::Arg<4>::type>::value));
EXPECT_TRUE(
(std::is_same<std::tuple<bool, int, char*, int&, const long&>,
F::ArgumentTuple>::value));
EXPECT_TRUE(
(std::is_same<
std::tuple<Matcher<bool>, Matcher<int>, Matcher<char*>, Matcher<int&>,
Matcher<const long&>>,
F::ArgumentMatcherTuple>::value));
EXPECT_TRUE(
(std::is_same<void(bool, int, char*, int&, const long&),
F::MakeResultVoid>::value));
EXPECT_TRUE((
std::is_same<IgnoredValue(bool, int, char*, int&, const long&),
F::MakeResultIgnoredValue>::value));
}
TEST(Base64Unescape, InvalidString) {
std::string unescaped;
EXPECT_FALSE(Base64Unescape("(invalid)", &unescaped));
}
TEST(Base64Unescape, ShortString) {
std::string unescaped;
EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQh", &unescaped));
EXPECT_EQ("Hello world!", unescaped);
}
TEST(Base64Unescape, ShortStringWithPadding) {
std::string unescaped;
EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQ=", &unescaped));
EXPECT_EQ("Hello world", unescaped);
}
TEST(Base64Unescape, ShortStringWithoutPadding) {
std::string unescaped;
EXPECT_TRUE(Base64Unescape("SGVsbG8gd29ybGQ", &unescaped));
EXPECT_EQ("Hello world", unescaped);
}
TEST(Base64Unescape, LongStringWithWhiteSpaces) {
std::string escaped =
R"(TWFuIGlzIGRpc3Rpbmd1aXNoZWQsIG5vdCBvbmx5IGJ5IGhpcyByZWFzb24sIGJ1dCBieSB0aGlz
IHNpbmd1bGFyIHBhc3Npb24gZnJvbSBvdGhlciBhbmltYWxzLCB3aGljaCBpcyBhIGx1c3Qgb2Yg
dGhlIG1pbmQsIHRoYXQgYnkgYSBwZXJzZXZlcmFuY2Ugb2YgZGVsaWdodCBpbiB0aGUgY29udGlu
dWVkIGFuZCBpbmRlZmF0aWdhYmxlIGdlbmVyYXRpb24gb2Yga25vd2xlZGdlLCBleGNlZWRzIHRo
ZSBzaG9ydCB2ZWhlbWVuY2Ugb2YgYW55IGNhcm5hbCBwbGVhc3VyZS4=)";
std::string expected =
"Man is distinguished, not only by his reason, but by this singular "
"passion from other animals, which is a lust of the mind, that by a "
"perseverance of delight in the continued and indefatigable generation "
"of knowledge, exceeds the short vehemence of any carnal pleasure.";
std::string unescaped;
EXPECT_TRUE(Base64Unescape(escaped, &unescaped));
EXPECT_EQ(expected, unescaped);
}
}
}
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/src/gmock-internal-utils.cc | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-internal-utils_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
d1892025-52a7-41d5-b579-88f73051835a | cpp | google/quiche | quic_connection | quiche/quic/core/quic_connection.cc | quiche/quic/core/quic_connection_test.cc | #include "quiche/quic/core/quic_connection.h"
#include <string.h>
#include <sys/types.h>
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <set>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/congestion_control/rtt_stats.h"
#include "quiche/quic/core/congestion_control/send_algorithm_interface.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/quic_bandwidth.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_packet_creator.h"
#include "quiche/quic/core/quic_packet_writer.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_path_validator.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_client_stats.h"
#include "quiche/quic/platform/api/quic_exported_stats.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/platform/api/quiche_testvalue.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
class QuicDecrypter;
class QuicEncrypter;
namespace {
const QuicPacketCount kMaxConsecutiveNonRetransmittablePackets = 19;
const int kMinReleaseTimeIntoFutureMs = 1;
const size_t kMaxReceivedClientAddressSize = 20;
const uint8_t kEcnPtoLimit = 2;
class ScopedCoalescedPacketClearer {
public:
explicit ScopedCoalescedPacketClearer(QuicCoalescedPacket* coalesced)
: coalesced_(coalesced) {}
~ScopedCoalescedPacketClearer() { coalesced_->Clear(); }
private:
QuicCoalescedPacket* coalesced_;
};
bool PacketCanReplaceServerConnectionId(const QuicPacketHeader& header,
Perspective perspective) {
return perspective == Perspective::IS_CLIENT &&
header.form == IETF_QUIC_LONG_HEADER_PACKET &&
header.version.IsKnown() &&
header.version.AllowsVariableLengthConnectionIds() &&
(header.long_packet_type == INITIAL ||
header.long_packet_type == RETRY);
}
bool NewServerConnectionIdMightBeValid(const QuicPacketHeader& header,
Perspective perspective,
bool connection_id_already_replaced) {
return perspective == Perspective::IS_CLIENT &&
header.form == IETF_QUIC_LONG_HEADER_PACKET &&
header.version.IsKnown() &&
header.version.AllowsVariableLengthConnectionIds() &&
header.long_packet_type == HANDSHAKE &&
!connection_id_already_replaced;
}
CongestionControlType GetDefaultCongestionControlType() {
if (GetQuicReloadableFlag(quic_default_to_bbr_v2)) {
return kBBRv2;
}
if (GetQuicReloadableFlag(quic_default_to_bbr)) {
return kBBR;
}
return kCubicBytes;
}
bool ContainsNonProbingFrame(const SerializedPacket& packet) {
for (const QuicFrame& frame : packet.nonretransmittable_frames) {
if (!QuicUtils::IsProbingFrame(frame.type)) {
return true;
}
}
for (const QuicFrame& frame : packet.retransmittable_frames) {
if (!QuicUtils::IsProbingFrame(frame.type)) {
return true;
}
}
return false;
}
}
#define ENDPOINT \
(perspective_ == Perspective::IS_SERVER ? "Server: " : "Client: ")
QuicConnection::QuicConnection(
QuicConnectionId server_connection_id,
QuicSocketAddress initial_self_address,
QuicSocketAddress initial_peer_address,
QuicConnectionHelperInterface* helper, QuicAlarmFactory* alarm_factory,
QuicPacketWriter* writer, bool owns_writer, Perspective perspective,
const ParsedQuicVersionVector& supported_versions,
ConnectionIdGeneratorInterface& generator)
: framer_(supported_versions, helper->GetClock()->ApproximateNow(),
perspective, server_connection_id.length()),
current_packet_content_(NO_FRAMES_RECEIVED),
is_current_packet_connectivity_probing_(false),
has_path_challenge_in_current_packet_(false),
current_effective_peer_migration_type_(NO_CHANGE),
helper_(helper),
alarm_factory_(alarm_factory),
per_packet_options_(nullptr),
writer_(writer),
owns_writer_(owns_writer),
encryption_level_(ENCRYPTION_INITIAL),
clock_(helper->GetClock()),
random_generator_(helper->GetRandomGenerator()),
client_connection_id_is_set_(false),
direct_peer_address_(initial_peer_address),
default_path_(initial_self_address, QuicSocketAddress(),
EmptyQuicConnectionId(),
server_connection_id,
std::nullopt),
active_effective_peer_migration_type_(NO_CHANGE),
support_key_update_for_connection_(false),
current_packet_data_(nullptr),
should_last_packet_instigate_acks_(false),
max_undecryptable_packets_(0),
max_tracked_packets_(GetQuicFlag(quic_max_tracked_packet_count)),
idle_timeout_connection_close_behavior_(
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET),
num_rtos_for_blackhole_detection_(0),
uber_received_packet_manager_(&stats_),
pending_retransmission_alarm_(false),
defer_send_in_response_to_packets_(false),
arena_(),
alarms_(this, *alarm_factory_, arena_),
visitor_(nullptr),
debug_visitor_(nullptr),
packet_creator_(server_connection_id, &framer_, random_generator_, this),
last_received_packet_info_(clock_->ApproximateNow()),
sent_packet_manager_(perspective, clock_, random_generator_, &stats_,
GetDefaultCongestionControlType()),
version_negotiated_(false),
perspective_(perspective),
connected_(true),
can_truncate_connection_ids_(perspective == Perspective::IS_SERVER),
mtu_probe_count_(0),
previous_validated_mtu_(0),
peer_max_packet_size_(kDefaultMaxPacketSizeTransportParam),
largest_received_packet_size_(0),
write_error_occurred_(false),
consecutive_num_packets_with_no_retransmittable_frames_(0),
max_consecutive_num_packets_with_no_retransmittable_frames_(
kMaxConsecutiveNonRetransmittablePackets),
bundle_retransmittable_with_pto_ack_(false),
last_control_frame_id_(kInvalidControlFrameId),
is_path_degrading_(false),
processing_ack_frame_(false),
supports_release_time_(false),
release_time_into_future_(QuicTime::Delta::Zero()),
blackhole_detector_(this, &alarms_.network_blackhole_detector_alarm()),
idle_network_detector_(this, clock_->ApproximateNow(),
&alarms_.idle_network_detector_alarm()),
path_validator_(alarm_factory_, &arena_, this, random_generator_, clock_,
&context_),
ping_manager_(perspective, this, &alarms_.ping_alarm()),
multi_port_probing_interval_(kDefaultMultiPortProbingInterval),
connection_id_generator_(generator),
received_client_addresses_cache_(kMaxReceivedClientAddressSize) {
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT ||
default_path_.self_address.IsInitialized());
QUIC_DLOG(INFO) << ENDPOINT << "Created connection with server connection ID "
<< server_connection_id
<< " and version: " << ParsedQuicVersionToString(version());
QUIC_BUG_IF(quic_bug_12714_2, !QuicUtils::IsConnectionIdValidForVersion(
server_connection_id, transport_version()))
<< "QuicConnection: attempted to use server connection ID "
<< server_connection_id << " which is invalid with version " << version();
framer_.set_visitor(this);
stats_.connection_creation_time = clock_->ApproximateNow();
sent_packet_manager_.SetNetworkChangeVisitor(this);
SetMaxPacketLength(perspective_ == Perspective::IS_SERVER
? kDefaultServerMaxPacketSize
: kDefaultMaxPacketSize);
uber_received_packet_manager_.set_max_ack_ranges(255);
MaybeEnableMultiplePacketNumberSpacesSupport();
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT ||
supported_versions.size() == 1);
InstallInitialCrypters(default_path_.server_connection_id);
if (perspective_ == Perspective::IS_SERVER) {
version_negotiated_ = true;
}
if (default_enable_5rto_blackhole_detection_) {
num_rtos_for_blackhole_detection_ = 5;
if (GetQuicReloadableFlag(quic_disable_server_blackhole_detection) &&
perspective_ == Perspective::IS_SERVER) {
QUIC_RELOADABLE_FLAG_COUNT(quic_disable_server_blackhole_detection);
blackhole_detection_disabled_ = true;
}
}
if (perspective_ == Perspective::IS_CLIENT) {
AddKnownServerAddress(initial_peer_address);
}
packet_creator_.SetDefaultPeerAddress(initial_peer_address);
}
void QuicConnection::InstallInitialCrypters(QuicConnectionId connection_id) {
CrypterPair crypters;
CryptoUtils::CreateInitialObfuscators(perspective_, version(), connection_id,
&crypters);
SetEncrypter(ENCRYPTION_INITIAL, std::move(crypters.encrypter));
if (version().KnowsWhichDecrypterToUse()) {
InstallDecrypter(ENCRYPTION_INITIAL, std::move(crypters.decrypter));
} else {
SetDecrypter(ENCRYPTION_INITIAL, std::move(crypters.decrypter));
}
}
QuicConnection::~QuicConnection() {
QUICHE_DCHECK_GE(stats_.max_egress_mtu, long_term_mtu_);
if (owns_writer_) {
delete writer_;
}
ClearQueuedPackets();
if (stats_
.num_tls_server_zero_rtt_packets_received_after_discarding_decrypter >
0) {
QUIC_CODE_COUNT_N(
quic_server_received_tls_zero_rtt_packet_after_discarding_decrypter, 2,
3);
} else {
QUIC_CODE_COUNT_N(
quic_server_received_tls_zero_rtt_packet_after_discarding_decrypter, 3,
3);
}
}
void QuicConnection::ClearQueuedPackets() { buffered_packets_.clear(); }
bool QuicConnection::ValidateConfigConnectionIds(const QuicConfig& config) {
QUICHE_DCHECK(config.negotiated());
if (!version().UsesTls()) {
return true;
}
QuicConnectionId expected_initial_source_connection_id;
if (perspective_ == Perspective::IS_CLIENT) {
expected_initial_source_connection_id = default_path_.server_connection_id;
} else {
expected_initial_source_connection_id = default_path_.client_connection_id;
}
if (!config.HasReceivedInitialSourceConnectionId() ||
config.ReceivedInitialSourceConnectionId() !=
expected_initial_source_connection_id) {
std::string received_value;
if (config.HasReceivedInitialSourceConnectionId()) {
received_value = config.ReceivedInitialSourceConnectionId().ToString();
} else {
received_value = "none";
}
std::string error_details =
absl::StrCat("Bad initial_source_connection_id: expected ",
expected_initial_source_connection_id.ToString(),
", received ", received_value);
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (perspective_ == Perspective::IS_CLIENT) {
if (!config.HasReceivedOriginalConnectionId() ||
config.ReceivedOriginalConnectionId() !=
GetOriginalDestinationConnectionId()) {
std::string received_value;
if (config.HasReceivedOriginalConnectionId()) {
received_value = config.ReceivedOriginalConnectionId().ToString();
} else {
received_value = "none";
}
std::string error_details =
absl::StrCat("Bad original_destination_connection_id: expected ",
GetOriginalDestinationConnectionId().ToString(),
", received ", received_value);
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (retry_source_connection_id_.has_value()) {
if (!config.HasReceivedRetrySourceConnectionId() ||
config.ReceivedRetrySourceConnectionId() !=
*retry_source_connection_id_) {
std::string received_value;
if (config.HasReceivedRetrySourceConnectionId()) {
received_value = config.ReceivedRetrySourceConnectionId().ToString();
} else {
received_value = "none";
}
std::string error_details =
absl::StrCat("Bad retry_source_connection_id: expected ",
retry_source_connection_id_->ToString(), ", received ",
received_value);
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
} else {
if (config.HasReceivedRetrySourceConnectionId()) {
std::string error_details = absl::StrCat(
"Bad retry_source_connection_id: did not receive RETRY but "
"received ",
config.ReceivedRetrySourceConnectionId().ToString());
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
}
}
return true;
}
void QuicConnection::SetFromConfig(const QuicConfig& config) {
if (config.negotiated()) {
if (ShouldFixTimeouts(config)) {
if (!IsHandshakeComplete()) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_fix_timeouts, 1, 2);
SetNetworkTimeouts(config.max_time_before_crypto_handshake(),
config.max_idle_time_before_crypto_handshake());
} else {
QUIC_BUG(set_from_config_after_handshake_complete)
<< "SetFromConfig is called after Handshake complete";
}
} else {
SetNetworkTimeouts(QuicTime::Delta::Infinite(),
config.IdleNetworkTimeout());
}
idle_timeout_connection_close_behavior_ =
ConnectionCloseBehavior::SILENT_CLOSE;
if (perspective_ == Perspective::IS_SERVER) {
idle_timeout_connection_close_behavior_ = ConnectionCloseBehavior::
SILENT_CLOSE_WITH_CONNECTION_CLOSE_PACKET_SERIALIZED;
}
if (config.HasClientRequestedIndependentOption(kNSLC, perspective_)) {
idle_timeout_connection_close_behavior_ =
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET;
}
if (!ValidateConfigConnectionIds(config)) {
return;
}
support_key_update_for_connection_ = version().UsesTls();
framer_.SetKeyUpdateSupportForConnection(
support_key_update_for_connection_);
} else {
SetNetworkTimeouts(config.max_time_before_crypto_handshake(),
config.max_idle_time_before_crypto_handshake());
}
if (version().HasIetfQuicFrames() &&
config.HasReceivedPreferredAddressConnectionIdAndToken()) {
QuicNewConnectionIdFrame frame;
std::tie(frame.connection_id, frame.stateless_reset_token) =
config.ReceivedPreferredAddressConnectionIdAndToken();
frame.sequence_number = 1u;
frame.retire_prior_to = 0u;
OnNewConnectionIdFrameInner(frame);
}
if (config.DisableConnectionMigration()) {
active_migration_disabled_ = true;
}
sent_packet_manager_.SetFromConfig(config);
if (perspective_ == Perspective::IS_SERVER &&
config.HasClientSentConnectionOption(kAFF2, perspective_)) {
send_ack_frequency_on_handshake_completion_ = true;
}
if (config.HasReceivedBytesForConnectionId() &&
can_truncate_connection_ids_) {
packet_creator_.SetServerConnectionIdLength(
config.ReceivedBytesForConnectionId());
}
max_undecryptable_packets_ = config.max_undecryptable_packets();
if (!GetQuicReloadableFlag(quic_enable_mtu_discovery_at_server)) {
if (config.HasClientRequestedIndependentOption(kMTUH, perspective_)) {
SetMtuDiscoveryTarget(kMtuDiscoveryTargetPacketSizeHigh);
}
}
if (config.HasClientRequestedIndependentOption(kMTUL, perspective_)) {
SetMtuDiscoveryTarget(kMtuDiscoveryTargetPacketSizeLow);
}
if (default_enable_5rto_blackhole_detection_) {
if (config.HasClientRequestedIndependentOption(kCBHD, perspective_)) {
QUIC_CODE_COUNT(quic_client_only_blackhole_detection);
blackhole_detection_disabled_ = true;
}
if (config.HasClientSentConnectionOption(kNBHD, perspective_)) {
blackhole_detection_disabled_ = true;
}
}
if (config.HasClientRequestedIndependentOption(kFIDT, perspective_)) {
idle_network_detector_.enable_shorter_idle_timeout_on_sent_packet();
}
if (perspective_ == Perspective::IS_CLIENT && version().HasIetfQuicFrames()) {
if (config.HasClientRequestedIndependentOption(kROWF, perspective_)) {
retransmittable_on_wire_behavior_ = SEND_FIRST_FORWARD_SECURE_PACKET;
}
if (config.HasClientRequestedIndependentOption(kROWR, perspective_)) {
retransmittable_on_wire_behavior_ = SEND_RANDOM_BYTES;
}
}
if (config.HasClientRequestedIndependentOption(k3AFF, perspective_)) {
anti_amplification_factor_ = 3;
}
if (config.HasClientRequestedIndependentOption(k10AF, perspective_)) {
anti_amplification_factor_ = 10;
}
if (GetQuicReloadableFlag(quic_enable_server_on_wire_ping) &&
perspective_ == Perspective::IS_SERVER &&
config.HasClientSentConnectionOption(kSRWP, perspective_)) {
QUIC_RELOADABLE_FLAG_COUNT(quic_enable_server_on_wire_ping);
set_initial_retransmittable_on_wire_timeout(
QuicTime::Delta::FromMilliseconds(200));
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnSetFromConfig(config);
}
uber_received_packet_manager_.SetFromConfig(config, perspective_);
if (config.HasClientSentConnectionOption(k5RTO, perspective_)) {
num_rtos_for_blackhole_detection_ = 5;
}
if (config.HasClientSentConnectionOption(k6PTO, perspective_) ||
config.HasClientSentConnectionOption(k7PTO, perspective_) ||
config.HasClientSentConnectionOption(k8PTO, perspective_)) {
num_rtos_for_blackhole_detection_ = 5;
}
if (config.HasReceivedStatelessResetToken()) {
default_path_.stateless_reset_token = config.ReceivedStatelessResetToken();
}
if (config.HasReceivedAckDelayExponent()) {
framer_.set_peer_ack_delay_exponent(config.ReceivedAckDelayExponent());
}
if (config.HasClientSentConnectionOption(kEACK, perspective_)) {
bundle_retransmittable_with_pto_ack_ = true;
}
if (config.HasClientSentConnectionOption(kDFER, perspective_)) {
defer_send_in_response_to_packets_ = false;
}
if (perspective_ == Perspective::IS_CLIENT &&
config.HasClientSentConnectionOption(kCDFR, perspective_)) {
defer_send_in_response_to_packets_ = true;
}
if (config.HasClientRequestedIndependentOption(kINVC, perspective_)) {
send_connection_close_for_invalid_version_ = true;
}
if (version().HasIetfQuicFrames() &&
config.HasReceivedPreferredAddressConnectionIdAndToken() &&
config.SupportsServerPreferredAddress(perspective_)) {
if (self_address().host().IsIPv4() &&
config.HasReceivedIPv4AlternateServerAddress()) {
received_server_preferred_address_ =
config.ReceivedIPv4AlternateServerAddress();
} else if (self_address().host().IsIPv6() &&
config.HasReceivedIPv6AlternateServerAddress()) {
received_server_preferred_address_ =
config.ReceivedIPv6AlternateServerAddress();
}
if (received_server_preferred_address_.IsInitialized()) {
QUICHE_DLOG(INFO) << ENDPOINT << "Received server preferred address: "
<< received_server_preferred_address_;
if (config.HasClientRequestedIndependentOption(kSPA2, perspective_)) {
accelerated_server_preferred_address_ = true;
visitor_->OnServerPreferredAddressAvailable(
received_server_preferred_address_);
}
}
}
if (config.HasReceivedMaxPacketSize()) {
peer_max_packet_size_ = config.ReceivedMaxPacketSize();
packet_creator_.SetMaxPacketLength(
GetLimitedMaxPacketSize(packet_creator_.max_packet_length()));
}
if (config.HasReceivedMaxDatagramFrameSize()) {
packet_creator_.SetMaxDatagramFrameSize(
config.ReceivedMaxDatagramFrameSize());
}
supports_release_time_ =
writer_ != nullptr && writer_->SupportsReleaseTime() &&
!config.HasClientSentConnectionOption(kNPCO, perspective_);
if (supports_release_time_) {
UpdateReleaseTimeIntoFuture();
}
if (perspective_ == Perspective::IS_CLIENT && version().HasIetfQuicFrames() &&
config.HasClientRequestedIndependentOption(kMPQC, perspective_)) {
multi_port_stats_ = std::make_unique<MultiPortStats>();
if (config.HasClientRequestedIndependentOption(kMPQM, perspective_)) {
multi_port_migration_enabled_ = true;
}
}
reliable_stream_reset_ = config.SupportsReliableStreamReset();
framer_.set_process_reset_stream_at(reliable_stream_reset_);
}
void QuicConnection::AddDispatcherSentPackets(
absl::Span<const DispatcherSentPacket> dispatcher_sent_packets) {
QUICHE_DCHECK_EQ(stats_.packets_sent, 0u);
QUICHE_DCHECK_EQ(stats_.packets_sent_by_dispatcher, 0u);
QUICHE_DCHECK(!sent_packet_manager_.GetLargestSentPacket().IsInitialized());
if (dispatcher_sent_packets.empty()) {
return;
}
stats_.packets_sent_by_dispatcher = dispatcher_sent_packets.size();
for (const DispatcherSentPacket& packet : dispatcher_sent_packets) {
const QuicTransmissionInfo& info =
sent_packet_manager_.AddDispatcherSentPacket(packet);
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPacketSent(
packet.packet_number, info.bytes_sent, info.has_crypto_handshake,
info.transmission_type, info.encryption_level,
info.retransmittable_frames,
{}, info.sent_time,
0);
}
}
packet_creator_.set_packet_number(
dispatcher_sent_packets.back().packet_number);
}
bool QuicConnection::MaybeTestLiveness() {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
if (liveness_testing_disabled_ ||
encryption_level_ != ENCRYPTION_FORWARD_SECURE) {
return false;
}
const QuicTime idle_network_deadline =
idle_network_detector_.GetIdleNetworkDeadline();
if (!idle_network_deadline.IsInitialized()) {
return false;
}
const QuicTime now = clock_->ApproximateNow();
if (now > idle_network_deadline) {
QUIC_DLOG(WARNING) << "Idle network deadline has passed";
return false;
}
const QuicTime::Delta timeout = idle_network_deadline - now;
if (2 * timeout > idle_network_detector_.idle_network_timeout()) {
return false;
}
if (!sent_packet_manager_.IsLessThanThreePTOs(timeout)) {
return false;
}
QUIC_LOG_EVERY_N_SEC(INFO, 60)
<< "Testing liveness, idle_network_timeout: "
<< idle_network_detector_.idle_network_timeout()
<< ", timeout: " << timeout
<< ", Pto delay: " << sent_packet_manager_.GetPtoDelay()
<< ", smoothed_rtt: "
<< sent_packet_manager_.GetRttStats()->smoothed_rtt()
<< ", mean deviation: "
<< sent_packet_manager_.GetRttStats()->mean_deviation();
SendConnectivityProbingPacket(writer_, peer_address());
return true;
}
void QuicConnection::ApplyConnectionOptions(
const QuicTagVector& connection_options) {
sent_packet_manager_.ApplyConnectionOptions(connection_options);
}
void QuicConnection::OnSendConnectionState(
const CachedNetworkParameters& cached_network_params) {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnSendConnectionState(cached_network_params);
}
}
void QuicConnection::OnReceiveConnectionState(
const CachedNetworkParameters& cached_network_params) {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnReceiveConnectionState(cached_network_params);
}
}
void QuicConnection::ResumeConnectionState(
const CachedNetworkParameters& cached_network_params,
bool max_bandwidth_resumption) {
sent_packet_manager_.ResumeConnectionState(cached_network_params,
max_bandwidth_resumption);
}
void QuicConnection::SetMaxPacingRate(QuicBandwidth max_pacing_rate) {
sent_packet_manager_.SetMaxPacingRate(max_pacing_rate);
}
void QuicConnection::SetApplicationDrivenPacingRate(
QuicBandwidth application_driven_pacing_rate) {
sent_packet_manager_.SetApplicationDrivenPacingRate(
application_driven_pacing_rate);
}
void QuicConnection::AdjustNetworkParameters(
const SendAlgorithmInterface::NetworkParams& params) {
sent_packet_manager_.AdjustNetworkParameters(params);
}
void QuicConnection::SetLossDetectionTuner(
std::unique_ptr<LossDetectionTunerInterface> tuner) {
sent_packet_manager_.SetLossDetectionTuner(std::move(tuner));
}
void QuicConnection::OnConfigNegotiated() {
sent_packet_manager_.OnConfigNegotiated();
if (GetQuicReloadableFlag(quic_enable_mtu_discovery_at_server) &&
perspective_ == Perspective::IS_SERVER) {
QUIC_RELOADABLE_FLAG_COUNT(quic_enable_mtu_discovery_at_server);
SetMtuDiscoveryTarget(kMtuDiscoveryTargetPacketSizeHigh);
}
}
QuicBandwidth QuicConnection::MaxPacingRate() const {
return sent_packet_manager_.MaxPacingRate();
}
QuicBandwidth QuicConnection::ApplicationDrivenPacingRate() const {
return sent_packet_manager_.ApplicationDrivenPacingRate();
}
bool QuicConnection::SelectMutualVersion(
const ParsedQuicVersionVector& available_versions) {
const ParsedQuicVersionVector& supported_versions =
framer_.supported_versions();
for (size_t i = 0; i < supported_versions.size(); ++i) {
const ParsedQuicVersion& version = supported_versions[i];
if (std::find(available_versions.begin(), available_versions.end(),
version) != available_versions.end()) {
framer_.set_version(version);
return true;
}
}
return false;
}
void QuicConnection::OnError(QuicFramer* framer) {
if (!connected_ || !last_received_packet_info_.decrypted) {
return;
}
CloseConnection(framer->error(), framer->detailed_error(),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
void QuicConnection::OnPacket() {
last_received_packet_info_.decrypted = false;
}
bool QuicConnection::OnProtocolVersionMismatch(
ParsedQuicVersion received_version) {
QUIC_DLOG(INFO) << ENDPOINT << "Received packet with mismatched version "
<< ParsedQuicVersionToString(received_version);
if (perspective_ == Perspective::IS_CLIENT) {
const std::string error_details = "Protocol version mismatch.";
QUIC_BUG(quic_bug_10511_3) << ENDPOINT << error_details;
CloseConnection(QUIC_INTERNAL_ERROR, error_details,
ConnectionCloseBehavior::SILENT_CLOSE);
}
return false;
}
void QuicConnection::OnVersionNegotiationPacket(
const QuicVersionNegotiationPacket& packet) {
QUICHE_DCHECK_EQ(default_path_.server_connection_id, packet.connection_id);
if (perspective_ == Perspective::IS_SERVER) {
const std::string error_details =
"Server received version negotiation packet.";
QUIC_BUG(quic_bug_10511_4) << error_details;
QUIC_CODE_COUNT(quic_tear_down_local_connection_on_version_negotiation);
CloseConnection(QUIC_INTERNAL_ERROR, error_details,
ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnVersionNegotiationPacket(packet);
}
if (version_negotiated_) {
return;
}
if (std::find(packet.versions.begin(), packet.versions.end(), version()) !=
packet.versions.end()) {
const std::string error_details = absl::StrCat(
"Server already supports client's version ",
ParsedQuicVersionToString(version()),
" and should have accepted the connection instead of sending {",
ParsedQuicVersionVectorToString(packet.versions), "}.");
QUIC_DLOG(WARNING) << error_details;
CloseConnection(QUIC_INVALID_VERSION_NEGOTIATION_PACKET, error_details,
ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
server_supported_versions_ = packet.versions;
CloseConnection(
QUIC_INVALID_VERSION,
absl::StrCat(
"Client may support one of the versions in the server's list, but "
"it's going to close the connection anyway. Supported versions: {",
ParsedQuicVersionVectorToString(framer_.supported_versions()),
"}, peer supported versions: {",
ParsedQuicVersionVectorToString(packet.versions), "}"),
send_connection_close_for_invalid_version_
? ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET
: ConnectionCloseBehavior::SILENT_CLOSE);
}
void QuicConnection::OnRetryPacket(QuicConnectionId original_connection_id,
QuicConnectionId new_connection_id,
absl::string_view retry_token,
absl::string_view retry_integrity_tag,
absl::string_view retry_without_tag) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective_);
if (version().UsesTls()) {
if (!CryptoUtils::ValidateRetryIntegrityTag(
version(), default_path_.server_connection_id, retry_without_tag,
retry_integrity_tag)) {
QUIC_DLOG(ERROR) << "Ignoring RETRY with invalid integrity tag";
return;
}
} else {
if (original_connection_id != default_path_.server_connection_id) {
QUIC_DLOG(ERROR) << "Ignoring RETRY with original connection ID "
<< original_connection_id << " not matching expected "
<< default_path_.server_connection_id << " token "
<< absl::BytesToHexString(retry_token);
return;
}
}
framer_.set_drop_incoming_retry_packets(true);
stats_.retry_packet_processed = true;
QUIC_DLOG(INFO) << "Received RETRY, replacing connection ID "
<< default_path_.server_connection_id << " with "
<< new_connection_id << ", received token "
<< absl::BytesToHexString(retry_token);
if (!original_destination_connection_id_.has_value()) {
original_destination_connection_id_ = default_path_.server_connection_id;
}
QUICHE_DCHECK(!retry_source_connection_id_.has_value())
<< *retry_source_connection_id_;
retry_source_connection_id_ = new_connection_id;
ReplaceInitialServerConnectionId(new_connection_id);
packet_creator_.SetRetryToken(retry_token);
InstallInitialCrypters(default_path_.server_connection_id);
sent_packet_manager_.MarkInitialPacketsForRetransmission();
}
void QuicConnection::SetMultiPacketClientHello() {
if (debug_visitor_ != nullptr) {
debug_visitor_->SetMultiPacketClientHello();
}
}
void QuicConnection::SetOriginalDestinationConnectionId(
const QuicConnectionId& original_destination_connection_id) {
QUIC_DLOG(INFO) << "Setting original_destination_connection_id to "
<< original_destination_connection_id
<< " on connection with server_connection_id "
<< default_path_.server_connection_id;
QUICHE_DCHECK_NE(original_destination_connection_id,
default_path_.server_connection_id);
InstallInitialCrypters(original_destination_connection_id);
QUICHE_DCHECK(!original_destination_connection_id_.has_value())
<< *original_destination_connection_id_;
original_destination_connection_id_ = original_destination_connection_id;
original_destination_connection_id_replacement_ =
default_path_.server_connection_id;
}
QuicConnectionId QuicConnection::GetOriginalDestinationConnectionId() const {
if (original_destination_connection_id_.has_value()) {
return *original_destination_connection_id_;
}
return default_path_.server_connection_id;
}
void QuicConnection::RetireOriginalDestinationConnectionId() {
if (original_destination_connection_id_.has_value()) {
visitor_->OnServerConnectionIdRetired(*original_destination_connection_id_);
original_destination_connection_id_.reset();
}
}
void QuicConnection::OnDiscardZeroRttDecryptionKeysAlarm() {
QUICHE_DCHECK(connected());
QUIC_DLOG(INFO) << "0-RTT discard alarm fired";
RemoveDecrypter(ENCRYPTION_ZERO_RTT);
RetireOriginalDestinationConnectionId();
}
bool QuicConnection::ValidateServerConnectionId(
const QuicPacketHeader& header) const {
if (perspective_ == Perspective::IS_CLIENT &&
header.form == IETF_QUIC_SHORT_HEADER_PACKET) {
return true;
}
QuicConnectionId server_connection_id =
GetServerConnectionIdAsRecipient(header, perspective_);
if (server_connection_id == default_path_.server_connection_id ||
server_connection_id == original_destination_connection_id_) {
return true;
}
if (PacketCanReplaceServerConnectionId(header, perspective_)) {
QUIC_DLOG(INFO) << ENDPOINT << "Accepting packet with new connection ID "
<< server_connection_id << " instead of "
<< default_path_.server_connection_id;
return true;
}
if (version().HasIetfQuicFrames() && perspective_ == Perspective::IS_SERVER &&
self_issued_cid_manager_ != nullptr &&
self_issued_cid_manager_->IsConnectionIdInUse(server_connection_id)) {
return true;
}
if (NewServerConnectionIdMightBeValid(
header, perspective_, server_connection_id_replaced_by_initial_)) {
return true;
}
return false;
}
bool QuicConnection::OnUnauthenticatedPublicHeader(
const QuicPacketHeader& header) {
last_received_packet_info_.destination_connection_id =
header.destination_connection_id;
if (perspective_ == Perspective::IS_SERVER &&
original_destination_connection_id_.has_value() &&
last_received_packet_info_.destination_connection_id ==
*original_destination_connection_id_) {
last_received_packet_info_.destination_connection_id =
original_destination_connection_id_replacement_;
}
if (header.version_flag && header.long_packet_type == INITIAL) {
framer_.set_drop_incoming_retry_packets(true);
}
if (!ValidateServerConnectionId(header)) {
++stats_.packets_dropped;
QuicConnectionId server_connection_id =
GetServerConnectionIdAsRecipient(header, perspective_);
QUIC_DLOG(INFO) << ENDPOINT
<< "Ignoring packet from unexpected server connection ID "
<< server_connection_id << " instead of "
<< default_path_.server_connection_id;
if (debug_visitor_ != nullptr) {
debug_visitor_->OnIncorrectConnectionId(server_connection_id);
}
QUICHE_DCHECK_NE(Perspective::IS_SERVER, perspective_);
return false;
}
if (!version().SupportsClientConnectionIds()) {
return true;
}
if (perspective_ == Perspective::IS_SERVER &&
header.form == IETF_QUIC_SHORT_HEADER_PACKET) {
return true;
}
QuicConnectionId client_connection_id =
GetClientConnectionIdAsRecipient(header, perspective_);
if (client_connection_id == default_path_.client_connection_id) {
return true;
}
if (!client_connection_id_is_set_ && perspective_ == Perspective::IS_SERVER) {
QUIC_DLOG(INFO) << ENDPOINT
<< "Setting client connection ID from first packet to "
<< client_connection_id;
set_client_connection_id(client_connection_id);
return true;
}
if (version().HasIetfQuicFrames() && perspective_ == Perspective::IS_CLIENT &&
self_issued_cid_manager_ != nullptr &&
self_issued_cid_manager_->IsConnectionIdInUse(client_connection_id)) {
return true;
}
++stats_.packets_dropped;
QUIC_DLOG(INFO) << ENDPOINT
<< "Ignoring packet from unexpected client connection ID "
<< client_connection_id << " instead of "
<< default_path_.client_connection_id;
return false;
}
bool QuicConnection::OnUnauthenticatedHeader(const QuicPacketHeader& header) {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnUnauthenticatedHeader(header);
}
QUICHE_DCHECK(ValidateServerConnectionId(header));
if (packet_creator_.HasPendingFrames()) {
const std::string error_details =
"Pending frames must be serialized before incoming packets are "
"processed.";
QUIC_BUG(quic_pending_frames_not_serialized)
<< error_details << ", received header: " << header;
CloseConnection(QUIC_INTERNAL_ERROR, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
return true;
}
void QuicConnection::OnSuccessfulVersionNegotiation() {
visitor_->OnSuccessfulVersionNegotiation(version());
if (debug_visitor_ != nullptr) {
debug_visitor_->OnSuccessfulVersionNegotiation(version());
}
}
void QuicConnection::OnSuccessfulMigration(bool is_port_change) {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
if (IsPathDegrading() && !multi_port_stats_) {
OnForwardProgressMade();
}
if (IsAlternativePath(default_path_.self_address,
default_path_.peer_address)) {
alternative_path_.Clear();
}
if (version().HasIetfQuicFrames() && !is_port_change) {
sent_packet_manager_.OnConnectionMigration(true);
}
}
void QuicConnection::OnTransportParametersSent(
const TransportParameters& transport_parameters) const {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnTransportParametersSent(transport_parameters);
}
}
void QuicConnection::OnTransportParametersReceived(
const TransportParameters& transport_parameters) const {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnTransportParametersReceived(transport_parameters);
}
}
void QuicConnection::OnTransportParametersResumed(
const TransportParameters& transport_parameters) const {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnTransportParametersResumed(transport_parameters);
}
}
void QuicConnection::OnEncryptedClientHelloSent(
absl::string_view client_hello) const {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnEncryptedClientHelloSent(client_hello);
}
}
void QuicConnection::OnEncryptedClientHelloReceived(
absl::string_view client_hello) const {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnEncryptedClientHelloReceived(client_hello);
}
}
void QuicConnection::OnParsedClientHelloInfo(
const ParsedClientHello& client_hello) {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnParsedClientHelloInfo(client_hello);
}
}
bool QuicConnection::HasPendingAcks() const { return ack_alarm().IsSet(); }
void QuicConnection::OnUserAgentIdKnown(const std::string& ) {
sent_packet_manager_.OnUserAgentIdKnown();
}
void QuicConnection::OnDecryptedPacket(size_t ,
EncryptionLevel level) {
last_received_packet_info_.decrypted_level = level;
last_received_packet_info_.decrypted = true;
if (level == ENCRYPTION_FORWARD_SECURE &&
!have_decrypted_first_one_rtt_packet_) {
have_decrypted_first_one_rtt_packet_ = true;
if (version().UsesTls() && perspective_ == Perspective::IS_SERVER) {
discard_zero_rtt_decryption_keys_alarm().Set(
clock_->ApproximateNow() + sent_packet_manager_.GetPtoDelay() * 3);
}
}
if (EnforceAntiAmplificationLimit() && !IsHandshakeConfirmed() &&
(level == ENCRYPTION_HANDSHAKE || level == ENCRYPTION_FORWARD_SECURE)) {
default_path_.validated = true;
stats_.address_validated_via_decrypting_packet = true;
}
idle_network_detector_.OnPacketReceived(
last_received_packet_info_.receipt_time);
visitor_->OnPacketDecrypted(level);
}
QuicSocketAddress QuicConnection::GetEffectivePeerAddressFromCurrentPacket()
const {
return last_received_packet_info_.source_address;
}
bool QuicConnection::OnPacketHeader(const QuicPacketHeader& header) {
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPacketHeader(header, clock_->ApproximateNow(),
last_received_packet_info_.decrypted_level);
}
++stats_.packets_dropped;
if (!ProcessValidatedPacket(header)) {
return false;
}
current_packet_content_ = NO_FRAMES_RECEIVED;
is_current_packet_connectivity_probing_ = false;
has_path_challenge_in_current_packet_ = false;
current_effective_peer_migration_type_ = NO_CHANGE;
if (perspective_ == Perspective::IS_CLIENT) {
if (!GetLargestReceivedPacket().IsInitialized() ||
header.packet_number > GetLargestReceivedPacket()) {
if (version().HasIetfQuicFrames()) {
} else {
UpdatePeerAddress(last_received_packet_info_.source_address);
default_path_.peer_address = GetEffectivePeerAddressFromCurrentPacket();
}
}
} else {
current_effective_peer_migration_type_ =
QuicUtils::DetermineAddressChangeType(
default_path_.peer_address,
GetEffectivePeerAddressFromCurrentPacket());
if (version().HasIetfQuicFrames()) {
auto effective_peer_address = GetEffectivePeerAddressFromCurrentPacket();
if (IsDefaultPath(last_received_packet_info_.destination_address,
effective_peer_address)) {
default_path_.server_connection_id =
last_received_packet_info_.destination_connection_id;
} else if (IsAlternativePath(
last_received_packet_info_.destination_address,
effective_peer_address)) {
alternative_path_.server_connection_id =
last_received_packet_info_.destination_connection_id;
}
}
if (last_received_packet_info_.destination_connection_id !=
default_path_.server_connection_id &&
(!original_destination_connection_id_.has_value() ||
last_received_packet_info_.destination_connection_id !=
*original_destination_connection_id_)) {
QUIC_CODE_COUNT(quic_connection_id_change);
}
QUIC_DLOG_IF(INFO, current_effective_peer_migration_type_ != NO_CHANGE)
<< ENDPOINT << "Effective peer's ip:port changed from "
<< default_path_.peer_address.ToString() << " to "
<< GetEffectivePeerAddressFromCurrentPacket().ToString()
<< ", active_effective_peer_migration_type is "
<< active_effective_peer_migration_type_;
}
--stats_.packets_dropped;
QUIC_DVLOG(1) << ENDPOINT << "Received packet header: " << header;
last_received_packet_info_.header = header;
if (!stats_.first_decrypted_packet.IsInitialized()) {
stats_.first_decrypted_packet =
last_received_packet_info_.header.packet_number;
}
switch (last_received_packet_info_.ecn_codepoint) {
case ECN_NOT_ECT:
break;
case ECN_ECT0:
stats_.num_ecn_marks_received.ect0++;
break;
case ECN_ECT1:
stats_.num_ecn_marks_received.ect1++;
break;
case ECN_CE:
stats_.num_ecn_marks_received.ce++;
break;
}
QuicTime receipt_time = idle_network_detector_.time_of_last_received_packet();
if (SupportsMultiplePacketNumberSpaces()) {
receipt_time = last_received_packet_info_.receipt_time;
}
uber_received_packet_manager_.RecordPacketReceived(
last_received_packet_info_.decrypted_level,
last_received_packet_info_.header, receipt_time,
last_received_packet_info_.ecn_codepoint);
if (EnforceAntiAmplificationLimit() && !IsHandshakeConfirmed() &&
!header.retry_token.empty() &&
visitor_->ValidateToken(header.retry_token)) {
QUIC_DLOG(INFO) << ENDPOINT << "Address validated via token.";
QUIC_CODE_COUNT(quic_address_validated_via_token);
default_path_.validated = true;
stats_.address_validated_via_token = true;
}
QUICHE_DCHECK(connected_);
return true;
}
bool QuicConnection::OnStreamFrame(const QuicStreamFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_3, !connected_)
<< "Processing STREAM frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(STREAM_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnStreamFrame(frame);
}
if (!QuicUtils::IsCryptoStreamId(transport_version(), frame.stream_id) &&
last_received_packet_info_.decrypted_level == ENCRYPTION_INITIAL) {
if (MaybeConsiderAsMemoryCorruption(frame)) {
CloseConnection(QUIC_MAYBE_CORRUPTED_MEMORY,
"Received crypto frame on non crypto stream.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
QUIC_PEER_BUG(quic_peer_bug_10511_6)
<< ENDPOINT << "Received an unencrypted data frame: closing connection"
<< " packet_number:" << last_received_packet_info_.header.packet_number
<< " stream_id:" << frame.stream_id
<< " received_packets:" << ack_frame();
CloseConnection(QUIC_UNENCRYPTED_STREAM_DATA,
"Unencrypted stream data seen.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
MaybeUpdateAckTimeout();
visitor_->OnStreamFrame(frame);
stats_.stream_bytes_received += frame.data_length;
ping_manager_.reset_consecutive_retransmittable_on_wire_count();
return connected_;
}
bool QuicConnection::OnCryptoFrame(const QuicCryptoFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_4, !connected_)
<< "Processing CRYPTO frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(CRYPTO_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnCryptoFrame(frame);
}
MaybeUpdateAckTimeout();
visitor_->OnCryptoFrame(frame);
return connected_;
}
bool QuicConnection::OnAckFrameStart(QuicPacketNumber largest_acked,
QuicTime::Delta ack_delay_time) {
QUIC_BUG_IF(quic_bug_12714_5, !connected_)
<< "Processing ACK frame start when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (processing_ack_frame_) {
CloseConnection(QUIC_INVALID_ACK_DATA,
"Received a new ack while processing an ack frame.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (!UpdatePacketContent(ACK_FRAME)) {
return false;
}
QUIC_DVLOG(1) << ENDPOINT
<< "OnAckFrameStart, largest_acked: " << largest_acked;
if (GetLargestReceivedPacketWithAck().IsInitialized() &&
last_received_packet_info_.header.packet_number <=
GetLargestReceivedPacketWithAck()) {
QUIC_DLOG(INFO) << ENDPOINT << "Received an old ack frame: ignoring";
return true;
}
if (!sent_packet_manager_.GetLargestSentPacket().IsInitialized() ||
largest_acked > sent_packet_manager_.GetLargestSentPacket()) {
QUIC_DLOG(WARNING) << ENDPOINT
<< "Peer's observed unsent packet:" << largest_acked
<< " vs " << sent_packet_manager_.GetLargestSentPacket()
<< ". SupportsMultiplePacketNumberSpaces():"
<< SupportsMultiplePacketNumberSpaces()
<< ", last_received_packet_info_.decrypted_level:"
<< last_received_packet_info_.decrypted_level;
CloseConnection(QUIC_INVALID_ACK_DATA, "Largest observed too high.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
processing_ack_frame_ = true;
sent_packet_manager_.OnAckFrameStart(
largest_acked, ack_delay_time,
idle_network_detector_.time_of_last_received_packet());
return true;
}
bool QuicConnection::OnAckRange(QuicPacketNumber start, QuicPacketNumber end) {
QUIC_BUG_IF(quic_bug_12714_6, !connected_)
<< "Processing ACK frame range when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
QUIC_DVLOG(1) << ENDPOINT << "OnAckRange: [" << start << ", " << end << ")";
if (GetLargestReceivedPacketWithAck().IsInitialized() &&
last_received_packet_info_.header.packet_number <=
GetLargestReceivedPacketWithAck()) {
QUIC_DLOG(INFO) << ENDPOINT << "Received an old ack frame: ignoring";
return true;
}
sent_packet_manager_.OnAckRange(start, end);
return true;
}
bool QuicConnection::OnAckTimestamp(QuicPacketNumber packet_number,
QuicTime timestamp) {
QUIC_BUG_IF(quic_bug_10511_7, !connected_)
<< "Processing ACK frame time stamp when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
QUIC_DVLOG(1) << ENDPOINT << "OnAckTimestamp: [" << packet_number << ", "
<< timestamp.ToDebuggingValue() << ")";
if (GetLargestReceivedPacketWithAck().IsInitialized() &&
last_received_packet_info_.header.packet_number <=
GetLargestReceivedPacketWithAck()) {
QUIC_DLOG(INFO) << ENDPOINT << "Received an old ack frame: ignoring";
return true;
}
sent_packet_manager_.OnAckTimestamp(packet_number, timestamp);
return true;
}
bool QuicConnection::OnAckFrameEnd(
QuicPacketNumber start, const std::optional<QuicEcnCounts>& ecn_counts) {
QUIC_BUG_IF(quic_bug_12714_7, !connected_)
<< "Processing ACK frame end when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
QUIC_DVLOG(1) << ENDPOINT << "OnAckFrameEnd, start: " << start;
if (GetLargestReceivedPacketWithAck().IsInitialized() &&
last_received_packet_info_.header.packet_number <=
GetLargestReceivedPacketWithAck()) {
QUIC_DLOG(INFO) << ENDPOINT << "Received an old ack frame: ignoring";
return true;
}
const bool one_rtt_packet_was_acked =
sent_packet_manager_.one_rtt_packet_acked();
const bool zero_rtt_packet_was_acked =
sent_packet_manager_.zero_rtt_packet_acked();
const AckResult ack_result = sent_packet_manager_.OnAckFrameEnd(
idle_network_detector_.time_of_last_received_packet(),
last_received_packet_info_.header.packet_number,
last_received_packet_info_.decrypted_level, ecn_counts);
if (ack_result != PACKETS_NEWLY_ACKED &&
ack_result != NO_PACKETS_NEWLY_ACKED) {
QUIC_DLOG(ERROR) << ENDPOINT
<< "Error occurred when processing an ACK frame: "
<< QuicUtils::AckResultToString(ack_result);
return false;
}
if (SupportsMultiplePacketNumberSpaces() && !one_rtt_packet_was_acked &&
sent_packet_manager_.one_rtt_packet_acked()) {
visitor_->OnOneRttPacketAcknowledged();
}
if (debug_visitor_ != nullptr && version().UsesTls() &&
!zero_rtt_packet_was_acked &&
sent_packet_manager_.zero_rtt_packet_acked()) {
debug_visitor_->OnZeroRttPacketAcked();
}
if (send_alarm().IsSet()) {
send_alarm().Cancel();
}
if (supports_release_time_) {
UpdateReleaseTimeIntoFuture();
}
SetLargestReceivedPacketWithAck(
last_received_packet_info_.header.packet_number);
PostProcessAfterAckFrame(ack_result == PACKETS_NEWLY_ACKED);
processing_ack_frame_ = false;
return connected_;
}
bool QuicConnection::OnStopWaitingFrame(const QuicStopWaitingFrame& ) {
QUIC_BUG_IF(quic_bug_12714_8, !connected_)
<< "Processing STOP_WAITING frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(STOP_WAITING_FRAME)) {
return false;
}
return connected_;
}
bool QuicConnection::OnPaddingFrame(const QuicPaddingFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_9, !connected_)
<< "Processing PADDING frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(PADDING_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPaddingFrame(frame);
}
return true;
}
bool QuicConnection::OnPingFrame(const QuicPingFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_10, !connected_)
<< "Processing PING frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(PING_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
QuicTime::Delta ping_received_delay = QuicTime::Delta::Zero();
const QuicTime now = clock_->ApproximateNow();
if (now > stats_.connection_creation_time) {
ping_received_delay = now - stats_.connection_creation_time;
}
debug_visitor_->OnPingFrame(frame, ping_received_delay);
}
MaybeUpdateAckTimeout();
return true;
}
bool QuicConnection::OnRstStreamFrame(const QuicRstStreamFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_11, !connected_)
<< "Processing RST_STREAM frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(RST_STREAM_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnRstStreamFrame(frame);
}
QUIC_DLOG(INFO) << ENDPOINT
<< "RST_STREAM_FRAME received for stream: " << frame.stream_id
<< " with error: "
<< QuicRstStreamErrorCodeToString(frame.error_code);
MaybeUpdateAckTimeout();
visitor_->OnRstStream(frame);
return connected_;
}
bool QuicConnection::OnStopSendingFrame(const QuicStopSendingFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_12, !connected_)
<< "Processing STOP_SENDING frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(STOP_SENDING_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnStopSendingFrame(frame);
}
QUIC_DLOG(INFO) << ENDPOINT << "STOP_SENDING frame received for stream: "
<< frame.stream_id
<< " with error: " << frame.ietf_error_code;
MaybeUpdateAckTimeout();
visitor_->OnStopSendingFrame(frame);
return connected_;
}
class ReversePathValidationContext : public QuicPathValidationContext {
public:
ReversePathValidationContext(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicSocketAddress& effective_peer_address,
QuicConnection* connection)
: QuicPathValidationContext(self_address, peer_address,
effective_peer_address),
connection_(connection) {}
QuicPacketWriter* WriterToUse() override { return connection_->writer(); }
private:
QuicConnection* connection_;
};
bool QuicConnection::OnPathChallengeFrame(const QuicPathChallengeFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_8, !connected_)
<< "Processing PATH_CHALLENGE frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (has_path_challenge_in_current_packet_) {
return true;
}
should_proactively_validate_peer_address_on_path_challenge_ = false;
if (!UpdatePacketContent(PATH_CHALLENGE_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPathChallengeFrame(frame);
}
const QuicSocketAddress effective_peer_address_to_respond =
perspective_ == Perspective::IS_CLIENT
? effective_peer_address()
: GetEffectivePeerAddressFromCurrentPacket();
const QuicSocketAddress direct_peer_address_to_respond =
perspective_ == Perspective::IS_CLIENT
? direct_peer_address_
: last_received_packet_info_.source_address;
QuicConnectionId client_cid, server_cid;
FindOnPathConnectionIds(last_received_packet_info_.destination_address,
effective_peer_address_to_respond, &client_cid,
&server_cid);
{
QuicPacketCreator::ScopedPeerAddressContext context(
&packet_creator_, direct_peer_address_to_respond, client_cid,
server_cid);
if (should_proactively_validate_peer_address_on_path_challenge_) {
QUIC_DVLOG(1) << "Proactively validate the effective peer address "
<< effective_peer_address_to_respond;
QUIC_CODE_COUNT_N(quic_kick_off_client_address_validation, 2, 6);
ValidatePath(
std::make_unique<ReversePathValidationContext>(
default_path_.self_address, direct_peer_address_to_respond,
effective_peer_address_to_respond, this),
std::make_unique<ReversePathValidationResultDelegate>(this,
peer_address()),
PathValidationReason::kReversePathValidation);
}
has_path_challenge_in_current_packet_ = true;
MaybeUpdateAckTimeout();
if (!SendPathResponse(frame.data_buffer, direct_peer_address_to_respond,
effective_peer_address_to_respond)) {
QUIC_CODE_COUNT(quic_failed_to_send_path_response);
}
++stats_.num_connectivity_probing_received;
}
return connected_;
}
bool QuicConnection::OnPathResponseFrame(const QuicPathResponseFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_9, !connected_)
<< "Processing PATH_RESPONSE frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
++stats_.num_path_response_received;
if (!UpdatePacketContent(PATH_RESPONSE_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPathResponseFrame(frame);
}
MaybeUpdateAckTimeout();
path_validator_.OnPathResponse(
frame.data_buffer, last_received_packet_info_.destination_address);
return connected_;
}
bool QuicConnection::OnConnectionCloseFrame(
const QuicConnectionCloseFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_10, !connected_)
<< "Processing CONNECTION_CLOSE frame when connection is closed. "
"Received packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(CONNECTION_CLOSE_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnConnectionCloseFrame(frame);
}
switch (frame.close_type) {
case GOOGLE_QUIC_CONNECTION_CLOSE:
QUIC_DLOG(INFO) << ENDPOINT << "Received ConnectionClose for connection: "
<< connection_id() << ", with error: "
<< QuicErrorCodeToString(frame.quic_error_code) << " ("
<< frame.error_details << ")";
break;
case IETF_QUIC_TRANSPORT_CONNECTION_CLOSE:
QUIC_DLOG(INFO) << ENDPOINT
<< "Received Transport ConnectionClose for connection: "
<< connection_id() << ", with error: "
<< QuicErrorCodeToString(frame.quic_error_code) << " ("
<< frame.error_details << ")"
<< ", transport error code: "
<< QuicIetfTransportErrorCodeString(
static_cast<QuicIetfTransportErrorCodes>(
frame.wire_error_code))
<< ", error frame type: "
<< frame.transport_close_frame_type;
break;
case IETF_QUIC_APPLICATION_CONNECTION_CLOSE:
QUIC_DLOG(INFO) << ENDPOINT
<< "Received Application ConnectionClose for connection: "
<< connection_id() << ", with error: "
<< QuicErrorCodeToString(frame.quic_error_code) << " ("
<< frame.error_details << ")"
<< ", application error code: " << frame.wire_error_code;
break;
}
if (frame.quic_error_code == QUIC_BAD_MULTIPATH_FLAG) {
QUIC_LOG_FIRST_N(ERROR, 10)
<< "Unexpected QUIC_BAD_MULTIPATH_FLAG error."
<< " last_received_header: " << last_received_packet_info_.header
<< " encryption_level: " << encryption_level_;
}
TearDownLocalConnectionState(frame, ConnectionCloseSource::FROM_PEER);
return connected_;
}
bool QuicConnection::OnMaxStreamsFrame(const QuicMaxStreamsFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_13, !connected_)
<< "Processing MAX_STREAMS frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(MAX_STREAMS_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnMaxStreamsFrame(frame);
}
MaybeUpdateAckTimeout();
return visitor_->OnMaxStreamsFrame(frame) && connected_;
}
bool QuicConnection::OnStreamsBlockedFrame(
const QuicStreamsBlockedFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_11, !connected_)
<< "Processing STREAMS_BLOCKED frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(STREAMS_BLOCKED_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnStreamsBlockedFrame(frame);
}
MaybeUpdateAckTimeout();
return visitor_->OnStreamsBlockedFrame(frame) && connected_;
}
bool QuicConnection::OnGoAwayFrame(const QuicGoAwayFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_14, !connected_)
<< "Processing GOAWAY frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(GOAWAY_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnGoAwayFrame(frame);
}
QUIC_DLOG(INFO) << ENDPOINT << "GOAWAY_FRAME received with last good stream: "
<< frame.last_good_stream_id
<< " and error: " << QuicErrorCodeToString(frame.error_code)
<< " and reason: " << frame.reason_phrase;
MaybeUpdateAckTimeout();
visitor_->OnGoAway(frame);
return connected_;
}
bool QuicConnection::OnWindowUpdateFrame(const QuicWindowUpdateFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_12, !connected_)
<< "Processing WINDOW_UPDATE frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(WINDOW_UPDATE_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnWindowUpdateFrame(
frame, idle_network_detector_.time_of_last_received_packet());
}
QUIC_DVLOG(1) << ENDPOINT << "WINDOW_UPDATE_FRAME received " << frame;
MaybeUpdateAckTimeout();
visitor_->OnWindowUpdateFrame(frame);
return connected_;
}
void QuicConnection::OnClientConnectionIdAvailable() {
QUICHE_DCHECK(perspective_ == Perspective::IS_SERVER);
if (!peer_issued_cid_manager_->HasUnusedConnectionId()) {
return;
}
if (default_path_.client_connection_id.IsEmpty()) {
const QuicConnectionIdData* unused_cid_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
QUIC_DVLOG(1) << ENDPOINT << "Patch connection ID "
<< unused_cid_data->connection_id << " to default path";
default_path_.client_connection_id = unused_cid_data->connection_id;
default_path_.stateless_reset_token =
unused_cid_data->stateless_reset_token;
QUICHE_DCHECK(!packet_creator_.HasPendingFrames());
QUICHE_DCHECK(packet_creator_.GetDestinationConnectionId().IsEmpty());
packet_creator_.SetClientConnectionId(default_path_.client_connection_id);
return;
}
if (alternative_path_.peer_address.IsInitialized() &&
alternative_path_.client_connection_id.IsEmpty()) {
const QuicConnectionIdData* unused_cid_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
QUIC_DVLOG(1) << ENDPOINT << "Patch connection ID "
<< unused_cid_data->connection_id << " to alternative path";
alternative_path_.client_connection_id = unused_cid_data->connection_id;
alternative_path_.stateless_reset_token =
unused_cid_data->stateless_reset_token;
}
}
NewConnectionIdResult QuicConnection::OnNewConnectionIdFrameInner(
const QuicNewConnectionIdFrame& frame) {
if (peer_issued_cid_manager_ == nullptr) {
CloseConnection(
IETF_QUIC_PROTOCOL_VIOLATION,
"Receives NEW_CONNECTION_ID while peer uses zero length connection ID",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return NewConnectionIdResult::kProtocolViolation;
}
std::string error_detail;
bool duplicate_new_connection_id = false;
QuicErrorCode error = peer_issued_cid_manager_->OnNewConnectionIdFrame(
frame, &error_detail, &duplicate_new_connection_id);
if (error != QUIC_NO_ERROR) {
CloseConnection(error, error_detail,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return NewConnectionIdResult::kProtocolViolation;
}
if (duplicate_new_connection_id) {
return NewConnectionIdResult::kDuplicateFrame;
}
if (perspective_ == Perspective::IS_SERVER) {
OnClientConnectionIdAvailable();
}
MaybeUpdateAckTimeout();
return NewConnectionIdResult::kOk;
}
bool QuicConnection::OnNewConnectionIdFrame(
const QuicNewConnectionIdFrame& frame) {
QUICHE_DCHECK(version().HasIetfQuicFrames());
QUIC_BUG_IF(quic_bug_10511_13, !connected_)
<< "Processing NEW_CONNECTION_ID frame when connection is closed. "
"Received packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(NEW_CONNECTION_ID_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnNewConnectionIdFrame(frame);
}
NewConnectionIdResult result = OnNewConnectionIdFrameInner(frame);
switch (result) {
case NewConnectionIdResult::kOk:
if (multi_port_stats_ != nullptr) {
MaybeCreateMultiPortPath();
}
break;
case NewConnectionIdResult::kProtocolViolation:
return false;
case NewConnectionIdResult::kDuplicateFrame:
break;
}
return true;
}
bool QuicConnection::OnRetireConnectionIdFrame(
const QuicRetireConnectionIdFrame& frame) {
QUICHE_DCHECK(version().HasIetfQuicFrames());
QUIC_BUG_IF(quic_bug_10511_14, !connected_)
<< "Processing RETIRE_CONNECTION_ID frame when connection is closed. "
"Received packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(RETIRE_CONNECTION_ID_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnRetireConnectionIdFrame(frame);
}
if (self_issued_cid_manager_ == nullptr) {
CloseConnection(
IETF_QUIC_PROTOCOL_VIOLATION,
"Receives RETIRE_CONNECTION_ID while new connection ID is never issued",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
std::string error_detail;
QuicErrorCode error = self_issued_cid_manager_->OnRetireConnectionIdFrame(
frame, sent_packet_manager_.GetPtoDelay(), &error_detail);
if (error != QUIC_NO_ERROR) {
CloseConnection(error, error_detail,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
MaybeUpdateAckTimeout();
return true;
}
bool QuicConnection::OnNewTokenFrame(const QuicNewTokenFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_15, !connected_)
<< "Processing NEW_TOKEN frame when connection is closed. Received "
"packet info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(NEW_TOKEN_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnNewTokenFrame(frame);
}
if (perspective_ == Perspective::IS_SERVER) {
CloseConnection(QUIC_INVALID_NEW_TOKEN, "Server received new token frame.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
MaybeUpdateAckTimeout();
visitor_->OnNewTokenReceived(frame.token);
return true;
}
bool QuicConnection::OnMessageFrame(const QuicMessageFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_16, !connected_)
<< "Processing MESSAGE frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(MESSAGE_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnMessageFrame(frame);
}
MaybeUpdateAckTimeout();
visitor_->OnMessageReceived(
absl::string_view(frame.data, frame.message_length));
return connected_;
}
bool QuicConnection::OnHandshakeDoneFrame(const QuicHandshakeDoneFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_15, !connected_)
<< "Processing HANDSHAKE_DONE frame when connection "
"is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!version().UsesTls()) {
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION,
"Handshake done frame is unsupported",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (perspective_ == Perspective::IS_SERVER) {
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION,
"Server received handshake done frame.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (!UpdatePacketContent(HANDSHAKE_DONE_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnHandshakeDoneFrame(frame);
}
MaybeUpdateAckTimeout();
visitor_->OnHandshakeDoneReceived();
return connected_;
}
bool QuicConnection::OnAckFrequencyFrame(const QuicAckFrequencyFrame& frame) {
QUIC_BUG_IF(quic_bug_10511_16, !connected_)
<< "Processing ACK_FREQUENCY frame when connection "
"is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (debug_visitor_ != nullptr) {
debug_visitor_->OnAckFrequencyFrame(frame);
}
if (!UpdatePacketContent(ACK_FREQUENCY_FRAME)) {
return false;
}
if (!can_receive_ack_frequency_frame_) {
QUIC_LOG_EVERY_N_SEC(ERROR, 120) << "Get unexpected AckFrequencyFrame.";
return false;
}
if (auto packet_number_space =
QuicUtils::GetPacketNumberSpace(
last_received_packet_info_.decrypted_level) == APPLICATION_DATA) {
uber_received_packet_manager_.OnAckFrequencyFrame(frame);
} else {
QUIC_LOG_EVERY_N_SEC(ERROR, 120)
<< "Get AckFrequencyFrame in packet number space "
<< packet_number_space;
}
MaybeUpdateAckTimeout();
return true;
}
bool QuicConnection::OnResetStreamAtFrame(const QuicResetStreamAtFrame& frame) {
QUIC_BUG_IF(OnResetStreamAtFrame_connection_closed, !connected_)
<< "Processing RESET_STREAM_AT frame while the connection is closed. "
"Received packet info: "
<< last_received_packet_info_;
if (debug_visitor_ != nullptr) {
debug_visitor_->OnResetStreamAtFrame(frame);
}
if (!UpdatePacketContent(RESET_STREAM_AT_FRAME)) {
return false;
}
if (!reliable_stream_reset_) {
CloseConnection(IETF_QUIC_PROTOCOL_VIOLATION,
"Received RESET_STREAM_AT while not negotiated.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
MaybeUpdateAckTimeout();
visitor_->OnResetStreamAt(frame);
return true;
}
bool QuicConnection::OnBlockedFrame(const QuicBlockedFrame& frame) {
QUIC_BUG_IF(quic_bug_12714_17, !connected_)
<< "Processing BLOCKED frame when connection is closed. Received packet "
"info: "
<< last_received_packet_info_;
if (!UpdatePacketContent(BLOCKED_FRAME)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnBlockedFrame(frame);
}
QUIC_DLOG(INFO) << ENDPOINT
<< "BLOCKED_FRAME received for stream: " << frame.stream_id;
MaybeUpdateAckTimeout();
visitor_->OnBlockedFrame(frame);
stats_.blocked_frames_received++;
return connected_;
}
void QuicConnection::OnPacketComplete() {
if (!connected_) {
ClearLastFrames();
return;
}
if (IsCurrentPacketConnectivityProbing()) {
QUICHE_DCHECK(!version().HasIetfQuicFrames() && !ignore_gquic_probing_);
++stats_.num_connectivity_probing_received;
}
QUIC_DVLOG(1) << ENDPOINT << "Got"
<< (SupportsMultiplePacketNumberSpaces()
? (" " +
EncryptionLevelToString(
last_received_packet_info_.decrypted_level))
: "")
<< " packet " << last_received_packet_info_.header.packet_number
<< " for "
<< GetServerConnectionIdAsRecipient(
last_received_packet_info_.header, perspective_);
QUIC_DLOG_IF(INFO, current_packet_content_ == SECOND_FRAME_IS_PADDING)
<< ENDPOINT << "Received a padded PING packet. is_probing: "
<< IsCurrentPacketConnectivityProbing();
if (!version().HasIetfQuicFrames() && !ignore_gquic_probing_) {
MaybeRespondToConnectivityProbingOrMigration();
}
current_effective_peer_migration_type_ = NO_CHANGE;
if (!should_last_packet_instigate_acks_) {
uber_received_packet_manager_.MaybeUpdateAckTimeout(
should_last_packet_instigate_acks_,
last_received_packet_info_.decrypted_level,
last_received_packet_info_.header.packet_number,
last_received_packet_info_.receipt_time, clock_->ApproximateNow(),
sent_packet_manager_.GetRttStats());
}
ClearLastFrames();
CloseIfTooManyOutstandingSentPackets();
}
void QuicConnection::MaybeRespondToConnectivityProbingOrMigration() {
QUICHE_DCHECK(!version().HasIetfQuicFrames());
if (IsCurrentPacketConnectivityProbing()) {
visitor_->OnPacketReceived(last_received_packet_info_.destination_address,
last_received_packet_info_.source_address,
true);
return;
}
if (perspective_ == Perspective::IS_CLIENT) {
QUIC_DVLOG(1) << ENDPOINT
<< "Received a speculative connectivity probing packet for "
<< GetServerConnectionIdAsRecipient(
last_received_packet_info_.header, perspective_)
<< " from ip:port: "
<< last_received_packet_info_.source_address.ToString()
<< " to ip:port: "
<< last_received_packet_info_.destination_address.ToString();
visitor_->OnPacketReceived(last_received_packet_info_.destination_address,
last_received_packet_info_.source_address,
false);
return;
}
}
bool QuicConnection::IsValidStatelessResetToken(
const StatelessResetToken& token) const {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
return default_path_.stateless_reset_token.has_value() &&
QuicUtils::AreStatelessResetTokensEqual(
token, *default_path_.stateless_reset_token);
}
void QuicConnection::OnAuthenticatedIetfStatelessResetPacket(
const QuicIetfStatelessResetPacket& ) {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
if (!IsDefaultPath(last_received_packet_info_.destination_address,
last_received_packet_info_.source_address)) {
if (IsAlternativePath(last_received_packet_info_.destination_address,
GetEffectivePeerAddressFromCurrentPacket())) {
QUIC_BUG_IF(quic_bug_12714_18, alternative_path_.validated)
<< "STATELESS_RESET received on alternate path after it's "
"validated.";
path_validator_.CancelPathValidation();
++stats_.num_stateless_resets_on_alternate_path;
} else {
QUIC_BUG(quic_bug_10511_17)
<< "Received Stateless Reset on unknown socket.";
}
return;
}
const std::string error_details = "Received stateless reset.";
QUIC_CODE_COUNT(quic_tear_down_local_connection_on_stateless_reset);
TearDownLocalConnectionState(QUIC_PUBLIC_RESET, NO_IETF_QUIC_ERROR,
error_details, ConnectionCloseSource::FROM_PEER);
}
void QuicConnection::OnKeyUpdate(KeyUpdateReason reason) {
QUICHE_DCHECK(support_key_update_for_connection_);
QUIC_DLOG(INFO) << ENDPOINT << "Key phase updated for " << reason;
lowest_packet_sent_in_current_key_phase_.Clear();
stats_.key_update_count++;
discard_previous_one_rtt_keys_alarm().Cancel();
visitor_->OnKeyUpdate(reason);
}
void QuicConnection::OnDecryptedFirstPacketInKeyPhase() {
QUIC_DLOG(INFO) << ENDPOINT << "OnDecryptedFirstPacketInKeyPhase";
discard_previous_one_rtt_keys_alarm().Set(
clock_->ApproximateNow() + sent_packet_manager_.GetPtoDelay() * 3);
}
std::unique_ptr<QuicDecrypter>
QuicConnection::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
QUIC_DLOG(INFO) << ENDPOINT << "AdvanceKeysAndCreateCurrentOneRttDecrypter";
return visitor_->AdvanceKeysAndCreateCurrentOneRttDecrypter();
}
std::unique_ptr<QuicEncrypter> QuicConnection::CreateCurrentOneRttEncrypter() {
QUIC_DLOG(INFO) << ENDPOINT << "CreateCurrentOneRttEncrypter";
return visitor_->CreateCurrentOneRttEncrypter();
}
void QuicConnection::ClearLastFrames() {
should_last_packet_instigate_acks_ = false;
}
void QuicConnection::CloseIfTooManyOutstandingSentPackets() {
const bool should_close =
sent_packet_manager_.GetLargestSentPacket().IsInitialized() &&
sent_packet_manager_.GetLargestSentPacket() >
sent_packet_manager_.GetLeastUnacked() + max_tracked_packets_;
if (should_close) {
CloseConnection(
QUIC_TOO_MANY_OUTSTANDING_SENT_PACKETS,
absl::StrCat("More than ", max_tracked_packets_,
" outstanding, least_unacked: ",
sent_packet_manager_.GetLeastUnacked().ToUint64(),
", packets_processed: ", stats_.packets_processed,
", last_decrypted_packet_level: ",
EncryptionLevelToString(
last_received_packet_info_.decrypted_level)),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
}
const QuicFrame QuicConnection::GetUpdatedAckFrame() {
QUICHE_DCHECK(!uber_received_packet_manager_.IsAckFrameEmpty(
QuicUtils::GetPacketNumberSpace(encryption_level_)))
<< "Try to retrieve an empty ACK frame";
return uber_received_packet_manager_.GetUpdatedAckFrame(
QuicUtils::GetPacketNumberSpace(encryption_level_),
clock_->ApproximateNow());
}
QuicPacketNumber QuicConnection::GetLeastUnacked() const {
return sent_packet_manager_.GetLeastUnacked();
}
bool QuicConnection::HandleWriteBlocked() {
if (!writer_->IsWriteBlocked()) {
return false;
}
visitor_->OnWriteBlocked();
return true;
}
void QuicConnection::MaybeSendInResponseToPacket() {
if (!connected_) {
return;
}
if (IsMissingDestinationConnectionID()) {
return;
}
if (HandleWriteBlocked()) {
return;
}
if (!defer_send_in_response_to_packets_) {
WriteIfNotBlocked();
return;
}
if (!visitor_->WillingAndAbleToWrite()) {
QUIC_DVLOG(1)
<< "No send alarm after processing packet. !WillingAndAbleToWrite.";
return;
}
QuicTime max_deadline = QuicTime::Infinite();
if (send_alarm().IsSet()) {
QUIC_DVLOG(1) << "Send alarm already set to " << send_alarm().deadline();
max_deadline = send_alarm().deadline();
send_alarm().Cancel();
}
if (CanWrite(HAS_RETRANSMITTABLE_DATA)) {
QUIC_BUG_IF(quic_send_alarm_set_with_data_to_send, send_alarm().IsSet());
QUIC_DVLOG(1) << "Immediate send alarm scheduled after processing packet.";
send_alarm().Set(clock_->ApproximateNow() +
sent_packet_manager_.GetDeferredSendAlarmDelay());
return;
}
if (send_alarm().IsSet()) {
if (send_alarm().deadline() > max_deadline) {
QUIC_DVLOG(1)
<< "Send alarm restored after processing packet. previous deadline:"
<< max_deadline
<< ", deadline from CanWrite:" << send_alarm().deadline();
send_alarm().Update(max_deadline, QuicTime::Delta::Zero());
} else {
QUIC_DVLOG(1) << "Future send alarm scheduled after processing packet.";
}
return;
}
if (max_deadline != QuicTime::Infinite()) {
QUIC_DVLOG(1) << "Send alarm restored after processing packet.";
send_alarm().Set(max_deadline);
return;
}
QUIC_DVLOG(1) << "No send alarm after processing packet. Other reasons.";
}
size_t QuicConnection::SendCryptoData(EncryptionLevel level,
size_t write_length,
QuicStreamOffset offset) {
if (write_length == 0) {
QUIC_BUG(quic_bug_10511_18) << "Attempt to send empty crypto frame";
return 0;
}
ScopedPacketFlusher flusher(this);
return packet_creator_.ConsumeCryptoData(level, write_length, offset);
}
QuicConsumedData QuicConnection::SendStreamData(QuicStreamId id,
size_t write_length,
QuicStreamOffset offset,
StreamSendingState state) {
if (state == NO_FIN && write_length == 0) {
QUIC_BUG(quic_bug_10511_19) << "Attempt to send empty stream frame";
return QuicConsumedData(0, false);
}
if (perspective_ == Perspective::IS_SERVER &&
version().CanSendCoalescedPackets() && !IsHandshakeConfirmed()) {
if (in_probe_time_out_ && coalesced_packet_.NumberOfPackets() == 0u) {
QUIC_CODE_COUNT(quic_try_to_send_half_rtt_data_when_pto_fires);
return QuicConsumedData(0, false);
}
if (coalesced_packet_.ContainsPacketOfEncryptionLevel(ENCRYPTION_INITIAL) &&
coalesced_packet_.NumberOfPackets() == 1u) {
sent_packet_manager_.RetransmitDataOfSpaceIfAny(HANDSHAKE_DATA);
}
}
ScopedPacketFlusher flusher(this);
return packet_creator_.ConsumeData(id, write_length, offset, state);
}
bool QuicConnection::SendControlFrame(const QuicFrame& frame) {
if (SupportsMultiplePacketNumberSpaces() &&
(encryption_level_ == ENCRYPTION_INITIAL ||
encryption_level_ == ENCRYPTION_HANDSHAKE) &&
frame.type != PING_FRAME) {
QUIC_DVLOG(1) << ENDPOINT << "Failed to send control frame: " << frame
<< " at encryption level: " << encryption_level_;
return false;
}
ScopedPacketFlusher flusher(this);
const bool consumed =
packet_creator_.ConsumeRetransmittableControlFrame(frame);
if (!consumed) {
QUIC_DVLOG(1) << ENDPOINT << "Failed to send control frame: " << frame;
return false;
}
if (frame.type == PING_FRAME) {
packet_creator_.FlushCurrentPacket();
stats_.ping_frames_sent++;
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPingSent();
}
}
if (frame.type == BLOCKED_FRAME) {
stats_.blocked_frames_sent++;
}
return true;
}
void QuicConnection::OnStreamReset(QuicStreamId id,
QuicRstStreamErrorCode error) {
if (error == QUIC_STREAM_NO_ERROR) {
return;
}
if (packet_creator_.HasPendingStreamFramesOfStream(id)) {
ScopedPacketFlusher flusher(this);
packet_creator_.FlushCurrentPacket();
}
}
const QuicConnectionStats& QuicConnection::GetStats() {
const RttStats* rtt_stats = sent_packet_manager_.GetRttStats();
QuicTime::Delta min_rtt = rtt_stats->min_rtt();
if (min_rtt.IsZero()) {
min_rtt = rtt_stats->initial_rtt();
}
stats_.min_rtt_us = min_rtt.ToMicroseconds();
QuicTime::Delta srtt = rtt_stats->SmoothedOrInitialRtt();
stats_.srtt_us = srtt.ToMicroseconds();
stats_.estimated_bandwidth = sent_packet_manager_.BandwidthEstimate();
sent_packet_manager_.GetSendAlgorithm()->PopulateConnectionStats(&stats_);
stats_.egress_mtu = long_term_mtu_;
stats_.ingress_mtu = largest_received_packet_size_;
return stats_;
}
void QuicConnection::OnCoalescedPacket(const QuicEncryptedPacket& packet) {
QueueCoalescedPacket(packet);
}
void QuicConnection::OnUndecryptablePacket(const QuicEncryptedPacket& packet,
EncryptionLevel decryption_level,
bool has_decryption_key) {
QUIC_DVLOG(1) << ENDPOINT << "Received undecryptable packet of length "
<< packet.length() << " with"
<< (has_decryption_key ? "" : "out") << " key at level "
<< decryption_level
<< " while connection is at encryption level "
<< encryption_level_;
QUICHE_DCHECK(EncryptionLevelIsValid(decryption_level));
if (encryption_level_ != ENCRYPTION_FORWARD_SECURE) {
++stats_.undecryptable_packets_received_before_handshake_complete;
}
const bool should_enqueue =
ShouldEnqueueUnDecryptablePacket(decryption_level, has_decryption_key);
if (should_enqueue) {
QueueUndecryptablePacket(packet, decryption_level);
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnUndecryptablePacket(decryption_level,
!should_enqueue);
}
if (has_decryption_key) {
stats_.num_failed_authentication_packets_received++;
if (version().UsesTls()) {
QUICHE_DCHECK(framer_.GetDecrypter(decryption_level));
const QuicPacketCount integrity_limit =
framer_.GetDecrypter(decryption_level)->GetIntegrityLimit();
QUIC_DVLOG(2) << ENDPOINT << "Checking AEAD integrity limits:"
<< " num_failed_authentication_packets_received="
<< stats_.num_failed_authentication_packets_received
<< " integrity_limit=" << integrity_limit;
if (stats_.num_failed_authentication_packets_received >=
integrity_limit) {
const std::string error_details = absl::StrCat(
"decrypter integrity limit reached:"
" num_failed_authentication_packets_received=",
stats_.num_failed_authentication_packets_received,
" integrity_limit=", integrity_limit);
CloseConnection(QUIC_AEAD_LIMIT_REACHED, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
}
}
if (version().UsesTls() && perspective_ == Perspective::IS_SERVER &&
decryption_level == ENCRYPTION_ZERO_RTT && !has_decryption_key &&
had_zero_rtt_decrypter_) {
QUIC_CODE_COUNT_N(
quic_server_received_tls_zero_rtt_packet_after_discarding_decrypter, 1,
3);
stats_
.num_tls_server_zero_rtt_packets_received_after_discarding_decrypter++;
}
}
bool QuicConnection::ShouldEnqueueUnDecryptablePacket(
EncryptionLevel decryption_level, bool has_decryption_key) const {
if (has_decryption_key) {
return false;
}
if (IsHandshakeComplete()) {
return false;
}
if (undecryptable_packets_.size() >= max_undecryptable_packets_) {
return false;
}
if (version().KnowsWhichDecrypterToUse() &&
decryption_level == ENCRYPTION_INITIAL) {
return false;
}
if (perspective_ == Perspective::IS_CLIENT && version().UsesTls() &&
decryption_level == ENCRYPTION_ZERO_RTT) {
QUIC_PEER_BUG(quic_peer_bug_client_received_zero_rtt)
<< "Client received a Zero RTT packet, not buffering.";
return false;
}
return true;
}
std::string QuicConnection::UndecryptablePacketsInfo() const {
std::string info = absl::StrCat(
"num_undecryptable_packets: ", undecryptable_packets_.size(), " {");
for (const auto& packet : undecryptable_packets_) {
absl::StrAppend(&info, "[",
EncryptionLevelToString(packet.encryption_level), ", ",
packet.packet->length(), "]");
}
absl::StrAppend(&info, "}");
return info;
}
void QuicConnection::ProcessUdpPacket(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet) {
if (!connected_) {
return;
}
QUIC_DVLOG(2) << ENDPOINT << "Received encrypted " << packet.length()
<< " bytes:" << std::endl
<< quiche::QuicheTextUtils::HexDump(
absl::string_view(packet.data(), packet.length()));
QUIC_BUG_IF(quic_bug_12714_21, current_packet_data_ != nullptr)
<< "ProcessUdpPacket must not be called while processing a packet.";
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPacketReceived(self_address, peer_address, packet);
}
last_received_packet_info_ =
ReceivedPacketInfo(self_address, peer_address, packet.receipt_time(),
packet.length(), packet.ecn_codepoint());
current_packet_data_ = packet.data();
if (!default_path_.self_address.IsInitialized()) {
default_path_.self_address = last_received_packet_info_.destination_address;
} else if (default_path_.self_address != self_address &&
expected_server_preferred_address_.IsInitialized() &&
self_address.Normalized() ==
expected_server_preferred_address_.Normalized()) {
last_received_packet_info_.destination_address = default_path_.self_address;
last_received_packet_info_.actual_destination_address = self_address;
}
if (!direct_peer_address_.IsInitialized()) {
if (perspective_ == Perspective::IS_CLIENT) {
AddKnownServerAddress(last_received_packet_info_.source_address);
}
UpdatePeerAddress(last_received_packet_info_.source_address);
}
if (!default_path_.peer_address.IsInitialized()) {
const QuicSocketAddress effective_peer_addr =
GetEffectivePeerAddressFromCurrentPacket();
default_path_.peer_address = effective_peer_addr.IsInitialized()
? effective_peer_addr
: direct_peer_address_;
}
stats_.bytes_received += packet.length();
++stats_.packets_received;
if (IsDefaultPath(last_received_packet_info_.destination_address,
last_received_packet_info_.source_address) &&
EnforceAntiAmplificationLimit()) {
last_received_packet_info_.received_bytes_counted = true;
default_path_.bytes_received_before_address_validation +=
last_received_packet_info_.length;
}
if (std::abs((packet.receipt_time() - clock_->ApproximateNow()).ToSeconds()) >
2 * 60) {
QUIC_LOG(WARNING) << "(Formerly quic_bug_10511_21): Packet receipt time: "
<< packet.receipt_time().ToDebuggingValue()
<< " too far from current time: "
<< clock_->ApproximateNow().ToDebuggingValue();
}
QUIC_DVLOG(1) << ENDPOINT << "time of last received packet: "
<< packet.receipt_time().ToDebuggingValue() << " from peer "
<< last_received_packet_info_.source_address << ", to "
<< last_received_packet_info_.destination_address;
ScopedPacketFlusher flusher(this);
if (!framer_.ProcessPacket(packet)) {
QUIC_DVLOG(1) << ENDPOINT
<< "Unable to process packet. Last packet processed: "
<< last_received_packet_info_.header.packet_number;
current_packet_data_ = nullptr;
is_current_packet_connectivity_probing_ = false;
MaybeProcessCoalescedPackets();
return;
}
++stats_.packets_processed;
QUIC_DLOG_IF(INFO, active_effective_peer_migration_type_ != NO_CHANGE)
<< "sent_packet_manager_.GetLargestObserved() = "
<< sent_packet_manager_.GetLargestObserved()
<< ", highest_packet_sent_before_effective_peer_migration_ = "
<< highest_packet_sent_before_effective_peer_migration_;
if (!framer_.version().HasIetfQuicFrames() &&
active_effective_peer_migration_type_ != NO_CHANGE &&
sent_packet_manager_.GetLargestObserved().IsInitialized() &&
(!highest_packet_sent_before_effective_peer_migration_.IsInitialized() ||
sent_packet_manager_.GetLargestObserved() >
highest_packet_sent_before_effective_peer_migration_)) {
if (perspective_ == Perspective::IS_SERVER) {
OnEffectivePeerMigrationValidated(true);
}
}
if (!MaybeProcessCoalescedPackets()) {
MaybeProcessUndecryptablePackets();
MaybeSendInResponseToPacket();
}
SetPingAlarm();
RetirePeerIssuedConnectionIdsNoLongerOnPath();
current_packet_data_ = nullptr;
is_current_packet_connectivity_probing_ = false;
}
void QuicConnection::OnBlockedWriterCanWrite() {
writer_->SetWritable();
OnCanWrite();
}
void QuicConnection::OnCanWrite() {
if (!connected_) {
return;
}
if (writer_->IsWriteBlocked()) {
const std::string error_details =
"Writer is blocked while calling OnCanWrite.";
QUIC_BUG(quic_bug_10511_22) << ENDPOINT << error_details;
CloseConnection(QUIC_INTERNAL_ERROR, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
ScopedPacketFlusher flusher(this);
WriteQueuedPackets();
const QuicTime ack_timeout =
uber_received_packet_manager_.GetEarliestAckTimeout();
if (ack_timeout.IsInitialized() && ack_timeout <= clock_->ApproximateNow()) {
if (SupportsMultiplePacketNumberSpaces()) {
SendAllPendingAcks();
} else {
SendAck();
}
}
if (!CanWrite(HAS_RETRANSMITTABLE_DATA)) {
return;
}
visitor_->OnCanWrite();
if (visitor_->WillingAndAbleToWrite() && !send_alarm().IsSet() &&
CanWrite(HAS_RETRANSMITTABLE_DATA)) {
send_alarm().Set(clock_->ApproximateNow());
}
}
void QuicConnection::OnSendAlarm() {
QUICHE_DCHECK(connected());
WriteIfNotBlocked();
}
void QuicConnection::WriteIfNotBlocked() {
if (framer().is_processing_packet()) {
QUIC_BUG(connection_write_mid_packet_processing)
<< ENDPOINT << "Tried to write in mid of packet processing";
return;
}
if (IsMissingDestinationConnectionID()) {
return;
}
if (!HandleWriteBlocked()) {
OnCanWrite();
}
}
void QuicConnection::MaybeClearQueuedPacketsOnPathChange() {
if (version().HasIetfQuicFrames() && peer_issued_cid_manager_ != nullptr &&
HasQueuedPackets()) {
ClearQueuedPackets();
}
}
void QuicConnection::ReplaceInitialServerConnectionId(
const QuicConnectionId& new_server_connection_id) {
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT);
if (version().HasIetfQuicFrames()) {
if (new_server_connection_id.IsEmpty()) {
peer_issued_cid_manager_ = nullptr;
} else {
if (peer_issued_cid_manager_ != nullptr) {
QUIC_BUG_IF(quic_bug_12714_22,
!peer_issued_cid_manager_->IsConnectionIdActive(
default_path_.server_connection_id))
<< "Connection ID replaced header is no longer active. old id: "
<< default_path_.server_connection_id
<< " new_id: " << new_server_connection_id;
peer_issued_cid_manager_->ReplaceConnectionId(
default_path_.server_connection_id, new_server_connection_id);
} else {
peer_issued_cid_manager_ =
std::make_unique<QuicPeerIssuedConnectionIdManager>(
kMinNumOfActiveConnectionIds, new_server_connection_id, clock_,
alarm_factory_, this, context());
}
}
}
default_path_.server_connection_id = new_server_connection_id;
packet_creator_.SetServerConnectionId(default_path_.server_connection_id);
}
void QuicConnection::FindMatchingOrNewClientConnectionIdOrToken(
const PathState& default_path, const PathState& alternative_path,
const QuicConnectionId& server_connection_id,
QuicConnectionId* client_connection_id,
std::optional<StatelessResetToken>* stateless_reset_token) {
QUICHE_DCHECK(perspective_ == Perspective::IS_SERVER &&
version().HasIetfQuicFrames());
if (peer_issued_cid_manager_ == nullptr ||
server_connection_id == default_path.server_connection_id) {
*client_connection_id = default_path.client_connection_id;
*stateless_reset_token = default_path.stateless_reset_token;
return;
}
if (server_connection_id == alternative_path_.server_connection_id) {
*client_connection_id = alternative_path.client_connection_id;
*stateless_reset_token = alternative_path.stateless_reset_token;
return;
}
auto* connection_id_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
if (connection_id_data == nullptr) {
return;
}
*client_connection_id = connection_id_data->connection_id;
*stateless_reset_token = connection_id_data->stateless_reset_token;
}
bool QuicConnection::FindOnPathConnectionIds(
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
QuicConnectionId* client_connection_id,
QuicConnectionId* server_connection_id) const {
if (IsDefaultPath(self_address, peer_address)) {
*client_connection_id = default_path_.client_connection_id,
*server_connection_id = default_path_.server_connection_id;
return true;
}
if (IsAlternativePath(self_address, peer_address)) {
*client_connection_id = alternative_path_.client_connection_id,
*server_connection_id = alternative_path_.server_connection_id;
return true;
}
QUIC_BUG_IF(failed to find on path connection ids,
perspective_ == Perspective::IS_CLIENT)
<< "Fails to find on path connection IDs";
return false;
}
void QuicConnection::SetDefaultPathState(PathState new_path_state) {
QUICHE_DCHECK(version().HasIetfQuicFrames());
default_path_ = std::move(new_path_state);
packet_creator_.SetClientConnectionId(default_path_.client_connection_id);
packet_creator_.SetServerConnectionId(default_path_.server_connection_id);
}
bool QuicConnection::PeerAddressChanged() const {
if (quic_test_peer_addr_change_after_normalize_) {
return direct_peer_address_.Normalized() !=
last_received_packet_info_.source_address.Normalized();
}
return direct_peer_address_ != last_received_packet_info_.source_address;
}
bool QuicConnection::ProcessValidatedPacket(const QuicPacketHeader& header) {
if (perspective_ == Perspective::IS_CLIENT && version().HasIetfQuicFrames() &&
direct_peer_address_.IsInitialized() &&
last_received_packet_info_.source_address.IsInitialized() &&
PeerAddressChanged() &&
!IsKnownServerAddress(last_received_packet_info_.source_address)) {
return false;
}
if (perspective_ == Perspective::IS_SERVER &&
default_path_.self_address.IsInitialized() &&
last_received_packet_info_.destination_address.IsInitialized() &&
default_path_.self_address !=
last_received_packet_info_.destination_address) {
if (default_path_.self_address.port() !=
last_received_packet_info_.destination_address.port() ||
default_path_.self_address.host().Normalized() !=
last_received_packet_info_.destination_address.host()
.Normalized()) {
if (!visitor_->AllowSelfAddressChange()) {
const std::string error_details = absl::StrCat(
"Self address migration is not supported at the server, current "
"address: ",
default_path_.self_address.ToString(),
", expected server preferred address: ",
expected_server_preferred_address_.ToString(),
", received packet address: ",
last_received_packet_info_.destination_address.ToString(),
", size: ", last_received_packet_info_.length,
", packet number: ", header.packet_number.ToString(),
", encryption level: ",
EncryptionLevelToString(
last_received_packet_info_.decrypted_level));
QUIC_LOG_EVERY_N_SEC(INFO, 100) << error_details;
QUIC_CODE_COUNT(quic_dropped_packets_with_changed_server_address);
return false;
}
}
default_path_.self_address = last_received_packet_info_.destination_address;
}
if (GetQuicReloadableFlag(quic_use_received_client_addresses_cache) &&
perspective_ == Perspective::IS_SERVER &&
!last_received_packet_info_.actual_destination_address.IsInitialized() &&
last_received_packet_info_.source_address.IsInitialized()) {
QUIC_RELOADABLE_FLAG_COUNT(quic_use_received_client_addresses_cache);
received_client_addresses_cache_.Insert(
last_received_packet_info_.source_address,
std::make_unique<bool>(true));
}
if (perspective_ == Perspective::IS_SERVER &&
last_received_packet_info_.actual_destination_address.IsInitialized() &&
!IsHandshakeConfirmed() &&
GetEffectivePeerAddressFromCurrentPacket() !=
default_path_.peer_address) {
QUICHE_DCHECK(expected_server_preferred_address_.IsInitialized());
last_received_packet_info_.source_address = direct_peer_address_;
}
if (PacketCanReplaceServerConnectionId(header, perspective_) &&
default_path_.server_connection_id != header.source_connection_id) {
QUICHE_DCHECK_EQ(header.long_packet_type, INITIAL);
if (server_connection_id_replaced_by_initial_) {
QUIC_DLOG(ERROR) << ENDPOINT << "Refusing to replace connection ID "
<< default_path_.server_connection_id << " with "
<< header.source_connection_id;
return false;
}
server_connection_id_replaced_by_initial_ = true;
QUIC_DLOG(INFO) << ENDPOINT << "Replacing connection ID "
<< default_path_.server_connection_id << " with "
<< header.source_connection_id;
if (!original_destination_connection_id_.has_value()) {
original_destination_connection_id_ = default_path_.server_connection_id;
}
ReplaceInitialServerConnectionId(header.source_connection_id);
}
if (!ValidateReceivedPacketNumber(header.packet_number)) {
return false;
}
if (!version_negotiated_) {
if (perspective_ == Perspective::IS_CLIENT) {
QUICHE_DCHECK(!header.version_flag || header.form != GOOGLE_QUIC_PACKET);
version_negotiated_ = true;
OnSuccessfulVersionNegotiation();
}
}
if (last_received_packet_info_.length > largest_received_packet_size_) {
largest_received_packet_size_ = last_received_packet_info_.length;
}
if (perspective_ == Perspective::IS_SERVER &&
encryption_level_ == ENCRYPTION_INITIAL &&
last_received_packet_info_.length > packet_creator_.max_packet_length()) {
if (GetQuicFlag(quic_use_lower_server_response_mtu_for_test)) {
SetMaxPacketLength(
std::min(last_received_packet_info_.length, QuicByteCount(1250)));
} else {
SetMaxPacketLength(last_received_packet_info_.length);
}
}
return true;
}
bool QuicConnection::ValidateReceivedPacketNumber(
QuicPacketNumber packet_number) {
if (!uber_received_packet_manager_.IsAwaitingPacket(
last_received_packet_info_.decrypted_level, packet_number)) {
QUIC_DLOG(INFO) << ENDPOINT << "Packet " << packet_number
<< " no longer being waited for at level "
<< static_cast<int>(
last_received_packet_info_.decrypted_level)
<< ". Discarding.";
if (debug_visitor_ != nullptr) {
debug_visitor_->OnDuplicatePacket(packet_number);
}
return false;
}
return true;
}
void QuicConnection::WriteQueuedPackets() {
QUICHE_DCHECK(!writer_->IsWriteBlocked());
QUIC_CLIENT_HISTOGRAM_COUNTS("QuicSession.NumQueuedPacketsBeforeWrite",
buffered_packets_.size(), 1, 1000, 50, "");
while (!buffered_packets_.empty()) {
if (HandleWriteBlocked()) {
break;
}
const BufferedPacket& packet = buffered_packets_.front();
WriteResult result = SendPacketToWriter(
packet.data.get(), packet.length, packet.self_address.host(),
packet.peer_address, writer_, packet.ecn_codepoint);
QUIC_DVLOG(1) << ENDPOINT << "Sending buffered packet, result: " << result;
if (IsMsgTooBig(writer_, result) && packet.length > long_term_mtu_) {
mtu_discoverer_.Disable();
mtu_discovery_alarm().Cancel();
buffered_packets_.pop_front();
continue;
}
if (IsWriteError(result.status)) {
OnWriteError(result.error_code);
break;
}
if (result.status == WRITE_STATUS_OK ||
result.status == WRITE_STATUS_BLOCKED_DATA_BUFFERED) {
buffered_packets_.pop_front();
}
if (IsWriteBlockedStatus(result.status)) {
visitor_->OnWriteBlocked();
break;
}
}
}
void QuicConnection::MarkZeroRttPacketsForRetransmission(int reject_reason) {
sent_packet_manager_.MarkZeroRttPacketsForRetransmission();
if (debug_visitor_ != nullptr && version().UsesTls()) {
debug_visitor_->OnZeroRttRejected(reject_reason);
}
}
void QuicConnection::NeuterUnencryptedPackets() {
sent_packet_manager_.NeuterUnencryptedPackets();
SetRetransmissionAlarm();
if (default_enable_5rto_blackhole_detection_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_default_enable_5rto_blackhole_detection2,
1, 3);
OnForwardProgressMade();
}
if (SupportsMultiplePacketNumberSpaces()) {
uber_received_packet_manager_.ResetAckStates(ENCRYPTION_INITIAL);
ack_alarm().Update(uber_received_packet_manager_.GetEarliestAckTimeout(),
kAlarmGranularity);
}
}
bool QuicConnection::IsMissingDestinationConnectionID() const {
return peer_issued_cid_manager_ != nullptr &&
packet_creator_.GetDestinationConnectionId().IsEmpty();
}
bool QuicConnection::ShouldGeneratePacket(
HasRetransmittableData retransmittable, IsHandshake handshake) {
QUICHE_DCHECK(handshake != IS_HANDSHAKE ||
QuicVersionUsesCryptoFrames(transport_version()))
<< ENDPOINT
<< "Handshake in STREAM frames should not check ShouldGeneratePacket";
if (IsMissingDestinationConnectionID()) {
QUICHE_DCHECK(version().HasIetfQuicFrames());
QUIC_CODE_COUNT(quic_generate_packet_blocked_by_no_connection_id);
QUIC_BUG_IF(quic_bug_90265_1, perspective_ == Perspective::IS_CLIENT);
QUIC_DLOG(INFO) << ENDPOINT
<< "There is no destination connection ID available to "
"generate packet.";
return false;
}
if (IsDefaultPath(default_path_.self_address,
packet_creator_.peer_address())) {
return CanWrite(retransmittable);
}
return connected_ && !HandleWriteBlocked();
}
void QuicConnection::MaybeBundleOpportunistically(
TransmissionType transmission_type) {
const bool should_bundle_ack_frequency =
!ack_frequency_sent_ && sent_packet_manager_.CanSendAckFrequency() &&
transmission_type == NOT_RETRANSMISSION &&
packet_creator_.NextSendingPacketNumber() >=
FirstSendingPacketNumber() + kMinReceivedBeforeAckDecimation;
if (should_bundle_ack_frequency) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_can_send_ack_frequency, 3, 3);
ack_frequency_sent_ = true;
auto frame = sent_packet_manager_.GetUpdatedAckFrequencyFrame();
visitor_->SendAckFrequency(frame);
}
if (transmission_type == NOT_RETRANSMISSION) {
visitor_->MaybeBundleOpportunistically();
}
if (packet_creator_.has_ack() || !CanWrite(NO_RETRANSMITTABLE_DATA)) {
return;
}
QuicFrames frames;
const bool has_pending_ack =
uber_received_packet_manager_
.GetAckTimeout(QuicUtils::GetPacketNumberSpace(encryption_level_))
.IsInitialized();
if (!has_pending_ack) {
return;
}
ResetAckStates();
QUIC_DVLOG(1) << ENDPOINT << "Bundle an ACK opportunistically";
QuicFrame updated_ack_frame = GetUpdatedAckFrame();
QUIC_BUG_IF(quic_bug_12714_23, updated_ack_frame.ack_frame->packets.Empty())
<< ENDPOINT << "Attempted to opportunistically bundle an empty "
<< encryption_level_ << " ACK, " << (has_pending_ack ? "" : "!")
<< "has_pending_ack";
frames.push_back(updated_ack_frame);
const bool flushed = packet_creator_.FlushAckFrame(frames);
QUIC_BUG_IF(failed_to_flush_ack, !flushed)
<< ENDPOINT << "Failed to flush ACK frame";
}
bool QuicConnection::CanWrite(HasRetransmittableData retransmittable) {
if (!connected_) {
return false;
}
if (IsMissingDestinationConnectionID()) {
return false;
}
if (version().CanSendCoalescedPackets() &&
framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_INITIAL) &&
framer_.is_processing_packet()) {
QUIC_DVLOG(1) << ENDPOINT
<< "Suppress sending in the mid of packet processing";
return false;
}
if (fill_coalesced_packet_) {
return packet_creator_.HasSoftMaxPacketLength();
}
if (sent_packet_manager_.pending_timer_transmission_count() > 0) {
return true;
}
if (LimitedByAmplificationFactor(packet_creator_.max_packet_length())) {
QUIC_CODE_COUNT(quic_throttled_by_amplification_limit);
QUIC_DVLOG(1) << ENDPOINT
<< "Constrained by amplification restriction to peer address "
<< default_path_.peer_address << " bytes received "
<< default_path_.bytes_received_before_address_validation
<< ", bytes sent"
<< default_path_.bytes_sent_before_address_validation;
++stats_.num_amplification_throttling;
return false;
}
if (HandleWriteBlocked()) {
return false;
}
if (retransmittable == NO_RETRANSMITTABLE_DATA) {
return true;
}
if (send_alarm().IsSet()) {
return false;
}
QuicTime now = clock_->Now();
QuicTime::Delta delay = sent_packet_manager_.TimeUntilSend(now);
if (delay.IsInfinite()) {
send_alarm().Cancel();
return false;
}
if (!delay.IsZero()) {
if (delay <= release_time_into_future_) {
return true;
}
send_alarm().Update(now + delay, kAlarmGranularity);
QUIC_DVLOG(1) << ENDPOINT << "Delaying sending " << delay.ToMilliseconds()
<< "ms";
return false;
}
return true;
}
QuicTime QuicConnection::CalculatePacketSentTime() {
const QuicTime now = clock_->Now();
if (!supports_release_time_) {
return now;
}
auto next_release_time_result = sent_packet_manager_.GetNextReleaseTime();
QuicTime next_release_time =
std::max(now, next_release_time_result.release_time);
packet_writer_params_.release_time_delay = next_release_time - now;
packet_writer_params_.allow_burst = next_release_time_result.allow_burst;
return next_release_time;
}
bool QuicConnection::WritePacket(SerializedPacket* packet) {
if (sent_packet_manager_.GetLargestSentPacket().IsInitialized() &&
packet->packet_number < sent_packet_manager_.GetLargestSentPacket()) {
QUIC_BUG(quic_bug_10511_23)
<< "Attempt to write packet:" << packet->packet_number
<< " after:" << sent_packet_manager_.GetLargestSentPacket();
CloseConnection(QUIC_INTERNAL_ERROR, "Packet written out of order.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return true;
}
const bool is_mtu_discovery = QuicUtils::ContainsFrameType(
packet->nonretransmittable_frames, MTU_DISCOVERY_FRAME);
const SerializedPacketFate fate = packet->fate;
QuicErrorCode error_code = QUIC_NO_ERROR;
const bool is_termination_packet = IsTerminationPacket(*packet, &error_code);
QuicPacketNumber packet_number = packet->packet_number;
QuicPacketLength encrypted_length = packet->encrypted_length;
if (is_termination_packet) {
if (termination_packets_ == nullptr) {
termination_packets_.reset(
new std::vector<std::unique_ptr<QuicEncryptedPacket>>);
}
char* buffer_copy = CopyBuffer(*packet);
termination_packets_->emplace_back(
new QuicEncryptedPacket(buffer_copy, encrypted_length, true));
if (error_code == QUIC_SILENT_IDLE_TIMEOUT) {
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, perspective_);
QUIC_DVLOG(1) << ENDPOINT
<< "Added silent connection close to termination packets, "
"num of termination packets: "
<< termination_packets_->size();
return true;
}
}
QUICHE_DCHECK_LE(encrypted_length, kMaxOutgoingPacketSize);
QUICHE_DCHECK(is_mtu_discovery ||
encrypted_length <= packet_creator_.max_packet_length())
<< " encrypted_length=" << encrypted_length
<< " > packet_creator max_packet_length="
<< packet_creator_.max_packet_length();
QUIC_DVLOG(1) << ENDPOINT << "Sending packet " << packet_number << " : "
<< (IsRetransmittable(*packet) == HAS_RETRANSMITTABLE_DATA
? "data bearing "
: " ack or probing only ")
<< ", encryption level: " << packet->encryption_level
<< ", encrypted length:" << encrypted_length
<< ", fate: " << fate << " to peer " << packet->peer_address;
QUIC_DVLOG(2) << ENDPOINT << packet->encryption_level << " packet number "
<< packet_number << " of length " << encrypted_length << ": "
<< std::endl
<< quiche::QuicheTextUtils::HexDump(absl::string_view(
packet->encrypted_buffer, encrypted_length));
QuicTime packet_send_time = CalculatePacketSentTime();
WriteResult result(WRITE_STATUS_OK, encrypted_length);
QuicSocketAddress send_to_address = packet->peer_address;
QuicSocketAddress send_from_address = self_address();
if (perspective_ == Perspective::IS_SERVER &&
expected_server_preferred_address_.IsInitialized() &&
received_client_addresses_cache_.Lookup(send_to_address) ==
received_client_addresses_cache_.end()) {
send_from_address = expected_server_preferred_address_;
}
const bool send_on_current_path = send_to_address == peer_address();
if (!send_on_current_path) {
QUIC_BUG_IF(quic_send_non_probing_frames_on_alternative_path,
ContainsNonProbingFrame(*packet))
<< "Packet " << packet->packet_number
<< " with non-probing frames was sent on alternative path: "
"nonretransmittable_frames: "
<< QuicFramesToString(packet->nonretransmittable_frames)
<< " retransmittable_frames: "
<< QuicFramesToString(packet->retransmittable_frames);
}
switch (fate) {
case DISCARD:
++stats_.packets_discarded;
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPacketDiscarded(*packet);
}
return true;
case COALESCE:
QUIC_BUG_IF(quic_bug_12714_24,
!version().CanSendCoalescedPackets() || coalescing_done_);
if (!coalesced_packet_.MaybeCoalescePacket(
*packet, send_from_address, send_to_address,
helper_->GetStreamSendBufferAllocator(),
packet_creator_.max_packet_length(),
GetEcnCodepointToSend(send_to_address))) {
if (!FlushCoalescedPacket()) {
QUIC_BUG_IF(quic_connection_connected_after_flush_coalesced_failure,
connected_)
<< "QUIC connection is still connected after failing to flush "
"coalesced packet.";
return false;
}
if (!coalesced_packet_.MaybeCoalescePacket(
*packet, send_from_address, send_to_address,
helper_->GetStreamSendBufferAllocator(),
packet_creator_.max_packet_length(),
GetEcnCodepointToSend(send_to_address))) {
QUIC_DLOG(ERROR) << ENDPOINT << "Failed to coalesce packet";
result.error_code = WRITE_STATUS_FAILED_TO_COALESCE_PACKET;
break;
}
}
if (coalesced_packet_.length() < coalesced_packet_.max_packet_length()) {
QUIC_DVLOG(1) << ENDPOINT << "Trying to set soft max packet length to "
<< coalesced_packet_.max_packet_length() -
coalesced_packet_.length();
packet_creator_.SetSoftMaxPacketLength(
coalesced_packet_.max_packet_length() - coalesced_packet_.length());
}
last_ecn_codepoint_sent_ = coalesced_packet_.ecn_codepoint();
break;
case BUFFER:
QUIC_DVLOG(1) << ENDPOINT << "Adding packet: " << packet->packet_number
<< " to buffered packets";
last_ecn_codepoint_sent_ = GetEcnCodepointToSend(send_to_address);
buffered_packets_.emplace_back(*packet, send_from_address,
send_to_address, last_ecn_codepoint_sent_);
break;
case SEND_TO_WRITER:
coalescing_done_ = true;
packet->release_encrypted_buffer = nullptr;
result = SendPacketToWriter(
packet->encrypted_buffer, encrypted_length, send_from_address.host(),
send_to_address, writer_, GetEcnCodepointToSend(send_to_address));
if (is_mtu_discovery && writer_->IsBatchMode()) {
result = writer_->Flush();
}
break;
default:
QUICHE_DCHECK(false);
break;
}
QUIC_HISTOGRAM_ENUM(
"QuicConnection.WritePacketStatus", result.status,
WRITE_STATUS_NUM_VALUES,
"Status code returned by writer_->WritePacket() in QuicConnection.");
if (IsWriteBlockedStatus(result.status)) {
QUICHE_DCHECK(writer_->IsWriteBlocked());
visitor_->OnWriteBlocked();
if (result.status != WRITE_STATUS_BLOCKED_DATA_BUFFERED) {
QUIC_DVLOG(1) << ENDPOINT << "Adding packet: " << packet->packet_number
<< " to buffered packets";
buffered_packets_.emplace_back(*packet, send_from_address,
send_to_address, last_ecn_codepoint_sent_);
}
}
if (IsMsgTooBig(writer_, result)) {
if (is_mtu_discovery) {
QUIC_DVLOG(1) << ENDPOINT
<< " MTU probe packet too big, size:" << encrypted_length
<< ", long_term_mtu_:" << long_term_mtu_;
mtu_discoverer_.Disable();
mtu_discovery_alarm().Cancel();
return true;
}
if (!send_on_current_path) {
return true;
}
}
if (IsWriteError(result.status)) {
QUIC_LOG_FIRST_N(ERROR, 10)
<< ENDPOINT << "Failed writing packet " << packet_number << " of "
<< encrypted_length << " bytes from " << send_from_address.host()
<< " to " << send_to_address << ", with error code "
<< result.error_code << ". long_term_mtu_:" << long_term_mtu_
<< ", previous_validated_mtu_:" << previous_validated_mtu_
<< ", max_packet_length():" << max_packet_length()
<< ", is_mtu_discovery:" << is_mtu_discovery;
if (MaybeRevertToPreviousMtu()) {
return true;
}
OnWriteError(result.error_code);
return false;
}
if (result.status == WRITE_STATUS_OK) {
packet_send_time = packet_send_time + result.send_time_offset;
}
if (IsRetransmittable(*packet) == HAS_RETRANSMITTABLE_DATA &&
!is_termination_packet) {
if (!blackhole_detector_.IsDetectionInProgress()) {
blackhole_detector_.RestartDetection(GetPathDegradingDeadline(),
GetNetworkBlackholeDeadline(),
GetPathMtuReductionDeadline());
}
idle_network_detector_.OnPacketSent(packet_send_time,
sent_packet_manager_.GetPtoDelay());
}
MaybeSetMtuAlarm(packet_number);
QUIC_DVLOG(1) << ENDPOINT << "time we began writing last sent packet: "
<< packet_send_time.ToDebuggingValue();
if (IsDefaultPath(default_path_.self_address, send_to_address)) {
if (EnforceAntiAmplificationLimit()) {
default_path_.bytes_sent_before_address_validation += encrypted_length;
}
} else {
MaybeUpdateBytesSentToAlternativeAddress(send_to_address, encrypted_length);
}
QUIC_DLOG_IF(INFO, !send_on_current_path)
<< ENDPOINT << " Sent packet " << packet->packet_number
<< " on a different path with remote address " << send_to_address
<< " while current path has peer address " << peer_address();
const bool in_flight = sent_packet_manager_.OnPacketSent(
packet, packet_send_time, packet->transmission_type,
IsRetransmittable(*packet), send_on_current_path,
last_ecn_codepoint_sent_);
QUIC_BUG_IF(quic_bug_12714_25,
perspective_ == Perspective::IS_SERVER &&
default_enable_5rto_blackhole_detection_ &&
blackhole_detector_.IsDetectionInProgress() &&
!sent_packet_manager_.HasInFlightPackets())
<< ENDPOINT
<< "Trying to start blackhole detection without no bytes in flight";
if (debug_visitor_ != nullptr) {
if (sent_packet_manager_.unacked_packets().empty()) {
QUIC_BUG(quic_bug_10511_25)
<< "Unacked map is empty right after packet is sent";
} else {
debug_visitor_->OnPacketSent(
packet->packet_number, packet->encrypted_length,
packet->has_crypto_handshake, packet->transmission_type,
packet->encryption_level,
sent_packet_manager_.unacked_packets()
.rbegin()
->retransmittable_frames,
packet->nonretransmittable_frames, packet_send_time, result.batch_id);
}
}
if (packet->encryption_level == ENCRYPTION_HANDSHAKE) {
handshake_packet_sent_ = true;
}
if (packet->encryption_level == ENCRYPTION_FORWARD_SECURE) {
if (!lowest_packet_sent_in_current_key_phase_.IsInitialized()) {
QUIC_DLOG(INFO) << ENDPOINT
<< "lowest_packet_sent_in_current_key_phase_ = "
<< packet_number;
lowest_packet_sent_in_current_key_phase_ = packet_number;
}
if (!is_termination_packet &&
MaybeHandleAeadConfidentialityLimits(*packet)) {
return true;
}
}
if (in_flight || !retransmission_alarm().IsSet()) {
SetRetransmissionAlarm();
}
SetPingAlarm();
RetirePeerIssuedConnectionIdsNoLongerOnPath();
packet_creator_.UpdatePacketNumberLength(
sent_packet_manager_.GetLeastPacketAwaitedByPeer(encryption_level_),
sent_packet_manager_.EstimateMaxPacketsInFlight(max_packet_length()));
stats_.bytes_sent += encrypted_length;
++stats_.packets_sent;
if (packet->has_ack_ecn) {
stats_.num_ack_frames_sent_with_ecn++;
}
QuicByteCount bytes_not_retransmitted =
packet->bytes_not_retransmitted.value_or(0);
if (packet->transmission_type != NOT_RETRANSMISSION) {
if (static_cast<uint64_t>(encrypted_length) < bytes_not_retransmitted) {
QUIC_BUG(quic_packet_bytes_written_lt_bytes_not_retransmitted)
<< "Total bytes written to the packet should be larger than the "
"bytes in not-retransmitted frames. Bytes written: "
<< encrypted_length
<< ", bytes not retransmitted: " << bytes_not_retransmitted;
} else {
stats_.bytes_retransmitted +=
(encrypted_length - bytes_not_retransmitted);
}
++stats_.packets_retransmitted;
}
return true;
}
bool QuicConnection::MaybeHandleAeadConfidentialityLimits(
const SerializedPacket& packet) {
if (!version().UsesTls()) {
return false;
}
if (packet.encryption_level != ENCRYPTION_FORWARD_SECURE) {
QUIC_BUG(quic_bug_12714_26)
<< "MaybeHandleAeadConfidentialityLimits called on non 1-RTT packet";
return false;
}
if (!lowest_packet_sent_in_current_key_phase_.IsInitialized()) {
QUIC_BUG(quic_bug_10511_26)
<< "lowest_packet_sent_in_current_key_phase_ must be initialized "
"before calling MaybeHandleAeadConfidentialityLimits";
return false;
}
if (packet.packet_number < lowest_packet_sent_in_current_key_phase_) {
const std::string error_details =
absl::StrCat("packet_number(", packet.packet_number.ToString(),
") < lowest_packet_sent_in_current_key_phase_ (",
lowest_packet_sent_in_current_key_phase_.ToString(), ")");
QUIC_BUG(quic_bug_10511_27) << error_details;
CloseConnection(QUIC_INTERNAL_ERROR, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return true;
}
const QuicPacketCount num_packets_encrypted_in_current_key_phase =
packet.packet_number - lowest_packet_sent_in_current_key_phase_ + 1;
const QuicPacketCount confidentiality_limit =
framer_.GetOneRttEncrypterConfidentialityLimit();
constexpr QuicPacketCount kKeyUpdateConfidentialityLimitOffset = 1000;
QuicPacketCount key_update_limit = 0;
if (confidentiality_limit > kKeyUpdateConfidentialityLimitOffset) {
key_update_limit =
confidentiality_limit - kKeyUpdateConfidentialityLimitOffset;
}
const QuicPacketCount key_update_limit_override =
GetQuicFlag(quic_key_update_confidentiality_limit);
if (key_update_limit_override) {
key_update_limit = key_update_limit_override;
}
QUIC_DVLOG(2) << ENDPOINT << "Checking AEAD confidentiality limits: "
<< "num_packets_encrypted_in_current_key_phase="
<< num_packets_encrypted_in_current_key_phase
<< " key_update_limit=" << key_update_limit
<< " confidentiality_limit=" << confidentiality_limit
<< " IsKeyUpdateAllowed()=" << IsKeyUpdateAllowed();
if (num_packets_encrypted_in_current_key_phase >= confidentiality_limit) {
const std::string error_details = absl::StrCat(
"encrypter confidentiality limit reached: "
"num_packets_encrypted_in_current_key_phase=",
num_packets_encrypted_in_current_key_phase,
" key_update_limit=", key_update_limit,
" confidentiality_limit=", confidentiality_limit,
" IsKeyUpdateAllowed()=", IsKeyUpdateAllowed());
CloseConnection(QUIC_AEAD_LIMIT_REACHED, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return true;
}
if (IsKeyUpdateAllowed() &&
num_packets_encrypted_in_current_key_phase >= key_update_limit) {
KeyUpdateReason reason = KeyUpdateReason::kLocalAeadConfidentialityLimit;
if (key_update_limit_override) {
QUIC_DLOG(INFO) << ENDPOINT
<< "reached FLAGS_quic_key_update_confidentiality_limit, "
"initiating key update: "
<< "num_packets_encrypted_in_current_key_phase="
<< num_packets_encrypted_in_current_key_phase
<< " key_update_limit=" << key_update_limit
<< " confidentiality_limit=" << confidentiality_limit;
reason = KeyUpdateReason::kLocalKeyUpdateLimitOverride;
} else {
QUIC_DLOG(INFO) << ENDPOINT
<< "approaching AEAD confidentiality limit, "
"initiating key update: "
<< "num_packets_encrypted_in_current_key_phase="
<< num_packets_encrypted_in_current_key_phase
<< " key_update_limit=" << key_update_limit
<< " confidentiality_limit=" << confidentiality_limit;
}
InitiateKeyUpdate(reason);
}
return false;
}
void QuicConnection::FlushPackets() {
if (!connected_) {
return;
}
if (!writer_->IsBatchMode()) {
return;
}
if (HandleWriteBlocked()) {
QUIC_DLOG(INFO) << ENDPOINT << "FlushPackets called while blocked.";
return;
}
WriteResult result = writer_->Flush();
QUIC_HISTOGRAM_ENUM("QuicConnection.FlushPacketStatus", result.status,
WRITE_STATUS_NUM_VALUES,
"Status code returned by writer_->Flush() in "
"QuicConnection::FlushPackets.");
if (HandleWriteBlocked()) {
QUICHE_DCHECK_EQ(WRITE_STATUS_BLOCKED, result.status)
<< "Unexpected flush result:" << result;
QUIC_DLOG(INFO) << ENDPOINT << "Write blocked in FlushPackets.";
return;
}
if (IsWriteError(result.status) && !MaybeRevertToPreviousMtu()) {
OnWriteError(result.error_code);
}
}
bool QuicConnection::IsMsgTooBig(const QuicPacketWriter* writer,
const WriteResult& result) {
std::optional<int> writer_error_code = writer->MessageTooBigErrorCode();
return (result.status == WRITE_STATUS_MSG_TOO_BIG) ||
(writer_error_code.has_value() && IsWriteError(result.status) &&
result.error_code == *writer_error_code);
}
bool QuicConnection::ShouldDiscardPacket(EncryptionLevel encryption_level) {
if (!connected_) {
QUIC_DLOG(INFO) << ENDPOINT
<< "Not sending packet as connection is disconnected.";
return true;
}
if (encryption_level_ == ENCRYPTION_FORWARD_SECURE &&
encryption_level == ENCRYPTION_INITIAL) {
QUIC_DLOG(INFO) << ENDPOINT
<< "Dropping NULL encrypted packet since the connection is "
"forward secure.";
return true;
}
return false;
}
QuicTime QuicConnection::GetPathMtuReductionDeadline() const {
if (previous_validated_mtu_ == 0) {
return QuicTime::Zero();
}
QuicTime::Delta delay = sent_packet_manager_.GetMtuReductionDelay(
num_rtos_for_blackhole_detection_);
if (delay.IsZero()) {
return QuicTime::Zero();
}
return clock_->ApproximateNow() + delay;
}
bool QuicConnection::MaybeRevertToPreviousMtu() {
if (previous_validated_mtu_ == 0) {
return false;
}
SetMaxPacketLength(previous_validated_mtu_);
mtu_discoverer_.Disable();
mtu_discovery_alarm().Cancel();
previous_validated_mtu_ = 0;
return true;
}
void QuicConnection::OnWriteError(int error_code) {
if (write_error_occurred_) {
return;
}
write_error_occurred_ = true;
const std::string error_details = absl::StrCat(
"Write failed with error: ", error_code, " (", strerror(error_code), ")");
QUIC_LOG_FIRST_N(ERROR, 2) << ENDPOINT << error_details;
std::optional<int> writer_error_code = writer_->MessageTooBigErrorCode();
if (writer_error_code.has_value() && error_code == *writer_error_code) {
CloseConnection(QUIC_PACKET_WRITE_ERROR, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QUIC_CODE_COUNT(quic_tear_down_local_connection_on_write_error_ietf);
CloseConnection(QUIC_PACKET_WRITE_ERROR, error_details,
ConnectionCloseBehavior::SILENT_CLOSE);
}
QuicPacketBuffer QuicConnection::GetPacketBuffer() {
if (version().CanSendCoalescedPackets() && !coalescing_done_) {
return {nullptr, nullptr};
}
return writer_->GetNextWriteLocation(self_address().host(), peer_address());
}
void QuicConnection::OnSerializedPacket(SerializedPacket serialized_packet) {
if (serialized_packet.encrypted_buffer == nullptr) {
QUIC_CODE_COUNT(quic_tear_down_local_connection_on_serialized_packet_ietf);
CloseConnection(QUIC_ENCRYPTION_FAILURE,
"Serialized packet does not have an encrypted buffer.",
ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
if (serialized_packet.retransmittable_frames.empty()) {
++consecutive_num_packets_with_no_retransmittable_frames_;
} else {
consecutive_num_packets_with_no_retransmittable_frames_ = 0;
}
if (retransmittable_on_wire_behavior_ == SEND_FIRST_FORWARD_SECURE_PACKET &&
first_serialized_one_rtt_packet_ == nullptr &&
serialized_packet.encryption_level == ENCRYPTION_FORWARD_SECURE) {
first_serialized_one_rtt_packet_ = std::make_unique<BufferedPacket>(
serialized_packet, self_address(), peer_address(),
GetEcnCodepointToSend(peer_address()));
}
SendOrQueuePacket(std::move(serialized_packet));
}
void QuicConnection::OnUnrecoverableError(QuicErrorCode error,
const std::string& error_details) {
QUIC_CODE_COUNT(quic_tear_down_local_connection_on_unrecoverable_error_ietf);
CloseConnection(error, error_details, ConnectionCloseBehavior::SILENT_CLOSE);
}
void QuicConnection::OnCongestionChange() {
visitor_->OnCongestionWindowChange(clock_->ApproximateNow());
QuicTime::Delta rtt = sent_packet_manager_.GetRttStats()->smoothed_rtt();
if (rtt.IsZero()) {
rtt = sent_packet_manager_.GetRttStats()->initial_rtt();
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnRttChanged(rtt);
}
}
void QuicConnection::OnPathMtuIncreased(QuicPacketLength packet_size) {
if (packet_size > max_packet_length()) {
previous_validated_mtu_ = max_packet_length();
SetMaxPacketLength(packet_size);
mtu_discoverer_.OnMaxPacketLengthUpdated(previous_validated_mtu_,
max_packet_length());
}
}
void QuicConnection::OnInFlightEcnPacketAcked() {
QUIC_BUG_IF(quic_bug_518619343_01, !GetQuicRestartFlag(quic_support_ect1))
<< "Unexpected call to OnInFlightEcnPacketAcked()";
if (!default_path_.ecn_marked_packet_acked) {
QUIC_DVLOG(1) << ENDPOINT << "First ECT packet acked on active path.";
QUIC_RESTART_FLAG_COUNT_N(quic_support_ect1, 2, 9);
default_path_.ecn_marked_packet_acked = true;
}
}
void QuicConnection::OnInvalidEcnFeedback() {
QUIC_BUG_IF(quic_bug_518619343_02, !GetQuicRestartFlag(quic_support_ect1))
<< "Unexpected call to OnInvalidEcnFeedback().";
if (disable_ecn_codepoint_validation_) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "ECN feedback is invalid, stop marking.";
packet_writer_params_.ecn_codepoint = ECN_NOT_ECT;
}
std::unique_ptr<QuicSelfIssuedConnectionIdManager>
QuicConnection::MakeSelfIssuedConnectionIdManager() {
QUICHE_DCHECK((perspective_ == Perspective::IS_CLIENT &&
!default_path_.client_connection_id.IsEmpty()) ||
(perspective_ == Perspective::IS_SERVER &&
!default_path_.server_connection_id.IsEmpty()));
return std::make_unique<QuicSelfIssuedConnectionIdManager>(
kMinNumOfActiveConnectionIds,
perspective_ == Perspective::IS_CLIENT
? default_path_.client_connection_id
: default_path_.server_connection_id,
clock_, alarm_factory_, this, context(), connection_id_generator_);
}
void QuicConnection::MaybeSendConnectionIdToClient() {
if (perspective_ == Perspective::IS_CLIENT) {
return;
}
QUICHE_DCHECK(self_issued_cid_manager_ != nullptr);
self_issued_cid_manager_->MaybeSendNewConnectionIds();
}
void QuicConnection::OnHandshakeComplete() {
sent_packet_manager_.SetHandshakeConfirmed();
if (version().HasIetfQuicFrames() && perspective_ == Perspective::IS_SERVER &&
self_issued_cid_manager_ != nullptr) {
self_issued_cid_manager_->MaybeSendNewConnectionIds();
}
if (send_ack_frequency_on_handshake_completion_ &&
sent_packet_manager_.CanSendAckFrequency()) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_can_send_ack_frequency, 2, 3);
auto ack_frequency_frame =
sent_packet_manager_.GetUpdatedAckFrequencyFrame();
ack_frequency_frame.packet_tolerance =
kDefaultRetransmittablePacketsBeforeAck;
visitor_->SendAckFrequency(ack_frequency_frame);
if (!connected_) {
return;
}
}
SetRetransmissionAlarm();
if (default_enable_5rto_blackhole_detection_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_default_enable_5rto_blackhole_detection2,
2, 3);
OnForwardProgressMade();
}
if (!SupportsMultiplePacketNumberSpaces()) {
if (perspective_ == Perspective::IS_CLIENT && ack_frame_updated()) {
ack_alarm().Update(clock_->ApproximateNow(), QuicTime::Delta::Zero());
}
return;
}
uber_received_packet_manager_.ResetAckStates(ENCRYPTION_HANDSHAKE);
ack_alarm().Update(uber_received_packet_manager_.GetEarliestAckTimeout(),
kAlarmGranularity);
if (!accelerated_server_preferred_address_ &&
received_server_preferred_address_.IsInitialized()) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective_);
visitor_->OnServerPreferredAddressAvailable(
received_server_preferred_address_);
}
}
void QuicConnection::MaybeCreateMultiPortPath() {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective_);
QUIC_CLIENT_HISTOGRAM_BOOL(
"QuicConnection.ServerAllowsActiveMigrationForMultiPort",
!active_migration_disabled_,
"Whether the server allows active migration that's required for "
"multi-port");
if (active_migration_disabled_) {
return;
}
if (path_validator_.HasPendingPathValidation()) {
QUIC_CLIENT_HISTOGRAM_ENUM("QuicConnection.MultiPortPathCreationCancelled",
path_validator_.GetPathValidationReason(),
PathValidationReason::kMaxValue,
"Reason for cancelled multi port path creation");
return;
}
if (multi_port_stats_->num_multi_port_paths_created >=
kMaxNumMultiPortPaths) {
return;
}
auto context_observer = std::make_unique<ContextObserver>(this);
visitor_->CreateContextForMultiPortPath(std::move(context_observer));
}
void QuicConnection::SendOrQueuePacket(SerializedPacket packet) {
WritePacket(&packet);
}
void QuicConnection::OnAckAlarm() {
QUICHE_DCHECK(ack_frame_updated());
QUICHE_DCHECK(connected());
QuicConnection::ScopedPacketFlusher flusher(this);
if (SupportsMultiplePacketNumberSpaces()) {
SendAllPendingAcks();
} else {
SendAck();
}
}
void QuicConnection::SendAck() {
QUICHE_DCHECK(!SupportsMultiplePacketNumberSpaces());
QUIC_DVLOG(1) << ENDPOINT << "Sending an ACK proactively";
QuicFrames frames;
frames.push_back(GetUpdatedAckFrame());
if (!packet_creator_.FlushAckFrame(frames)) {
return;
}
ResetAckStates();
if (!ShouldBundleRetransmittableFrameWithAck()) {
return;
}
consecutive_num_packets_with_no_retransmittable_frames_ = 0;
if (packet_creator_.HasPendingRetransmittableFrames() ||
visitor_->WillingAndAbleToWrite()) {
return;
}
visitor_->OnAckNeedsRetransmittableFrame();
}
EncryptionLevel QuicConnection::GetEncryptionLevelToSendPingForSpace(
PacketNumberSpace space) const {
switch (space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return framer_.GetEncryptionLevelToSendApplicationData();
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
bool QuicConnection::IsKnownServerAddress(
const QuicSocketAddress& address) const {
QUICHE_DCHECK(address.IsInitialized());
return std::find(known_server_addresses_.cbegin(),
known_server_addresses_.cend(),
address) != known_server_addresses_.cend();
}
QuicEcnCodepoint QuicConnection::GetEcnCodepointToSend(
const QuicSocketAddress& destination_address) const {
if (destination_address != peer_address()) {
return ECN_NOT_ECT;
}
if (in_probe_time_out_ && !default_path_.ecn_marked_packet_acked) {
return ECN_NOT_ECT;
}
return packet_writer_params_.ecn_codepoint;
}
WriteResult QuicConnection::SendPacketToWriter(
const char* buffer, size_t buf_len, const QuicIpAddress& self_address,
const QuicSocketAddress& destination_address, QuicPacketWriter* writer,
const QuicEcnCodepoint ecn_codepoint) {
QuicPacketWriterParams params = packet_writer_params_;
params.ecn_codepoint = ecn_codepoint;
last_ecn_codepoint_sent_ = ecn_codepoint;
WriteResult result =
writer->WritePacket(buffer, buf_len, self_address, destination_address,
per_packet_options_, params);
return result;
}
void QuicConnection::OnRetransmissionAlarm() {
QUICHE_DCHECK(connected());
ScopedRetransmissionTimeoutIndicator indicator(this);
#ifndef NDEBUG
if (sent_packet_manager_.unacked_packets().empty()) {
QUICHE_DCHECK(sent_packet_manager_.handshake_mode_disabled());
QUICHE_DCHECK(!IsHandshakeConfirmed());
}
#endif
if (!connected_) {
return;
}
QuicPacketNumber previous_created_packet_number =
packet_creator_.packet_number();
const auto retransmission_mode =
sent_packet_manager_.OnRetransmissionTimeout();
if (retransmission_mode == QuicSentPacketManager::PTO_MODE) {
const QuicPacketCount num_packet_numbers_to_skip = 1;
packet_creator_.SkipNPacketNumbers(
num_packet_numbers_to_skip,
sent_packet_manager_.GetLeastPacketAwaitedByPeer(encryption_level_),
sent_packet_manager_.EstimateMaxPacketsInFlight(max_packet_length()));
previous_created_packet_number += num_packet_numbers_to_skip;
if (debug_visitor_ != nullptr) {
debug_visitor_->OnNPacketNumbersSkipped(num_packet_numbers_to_skip,
clock_->Now());
}
}
if (default_enable_5rto_blackhole_detection_ &&
!sent_packet_manager_.HasInFlightPackets() &&
blackhole_detector_.IsDetectionInProgress()) {
QUICHE_DCHECK_EQ(QuicSentPacketManager::LOSS_MODE, retransmission_mode);
blackhole_detector_.StopDetection(false);
}
WriteIfNotBlocked();
if (!connected_) {
return;
}
sent_packet_manager_.MaybeSendProbePacket();
if (packet_creator_.packet_number() == previous_created_packet_number &&
retransmission_mode == QuicSentPacketManager::PTO_MODE &&
!visitor_->WillingAndAbleToWrite()) {
QUIC_DLOG(INFO) << ENDPOINT
<< "No packet gets sent when timer fires in mode "
<< retransmission_mode << ", send PING";
QUICHE_DCHECK_LT(0u,
sent_packet_manager_.pending_timer_transmission_count());
if (SupportsMultiplePacketNumberSpaces()) {
PacketNumberSpace packet_number_space;
if (sent_packet_manager_
.GetEarliestPacketSentTimeForPto(&packet_number_space)
.IsInitialized()) {
SendPingAtLevel(
GetEncryptionLevelToSendPingForSpace(packet_number_space));
} else {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective_);
if (framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_HANDSHAKE)) {
SendPingAtLevel(ENCRYPTION_HANDSHAKE);
} else if (framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_INITIAL)) {
SendPingAtLevel(ENCRYPTION_INITIAL);
} else {
QUIC_BUG(quic_bug_no_pto) << "PTO fired but nothing was sent.";
}
}
} else {
SendPingAtLevel(encryption_level_);
}
}
if (retransmission_mode == QuicSentPacketManager::PTO_MODE) {
QUIC_BUG_IF(
quic_bug_12714_27,
packet_creator_.packet_number() == previous_created_packet_number &&
(!visitor_->WillingAndAbleToWrite() ||
sent_packet_manager_.pending_timer_transmission_count() == 0u))
<< "retransmission_mode: " << retransmission_mode
<< ", packet_number: " << packet_creator_.packet_number()
<< ", session has data to write: " << visitor_->WillingAndAbleToWrite()
<< ", writer is blocked: " << writer_->IsWriteBlocked()
<< ", pending_timer_transmission_count: "
<< sent_packet_manager_.pending_timer_transmission_count();
}
if (!HasQueuedData() && !retransmission_alarm().IsSet()) {
SetRetransmissionAlarm();
}
if (packet_writer_params_.ecn_codepoint == ECN_NOT_ECT ||
default_path_.ecn_marked_packet_acked) {
return;
}
++default_path_.ecn_pto_count;
if (default_path_.ecn_pto_count == kEcnPtoLimit) {
QUIC_DVLOG(1) << ENDPOINT << "ECN packets PTO 3 times.";
OnInvalidEcnFeedback();
}
}
void QuicConnection::SetEncrypter(EncryptionLevel level,
std::unique_ptr<QuicEncrypter> encrypter) {
packet_creator_.SetEncrypter(level, std::move(encrypter));
}
void QuicConnection::RemoveEncrypter(EncryptionLevel level) {
framer_.RemoveEncrypter(level);
}
void QuicConnection::SetDiversificationNonce(
const DiversificationNonce& nonce) {
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, perspective_);
packet_creator_.SetDiversificationNonce(nonce);
}
void QuicConnection::SetDefaultEncryptionLevel(EncryptionLevel level) {
QUIC_DVLOG(1) << ENDPOINT << "Setting default encryption level from "
<< encryption_level_ << " to " << level;
const bool changing_level = level != encryption_level_;
if (changing_level && packet_creator_.HasPendingFrames()) {
ScopedPacketFlusher flusher(this);
packet_creator_.FlushCurrentPacket();
}
encryption_level_ = level;
packet_creator_.set_encryption_level(level);
QUIC_BUG_IF(quic_bug_12714_28, !framer_.HasEncrypterOfEncryptionLevel(level))
<< ENDPOINT << "Trying to set encryption level to "
<< EncryptionLevelToString(level) << " while the key is missing";
if (!changing_level) {
return;
}
packet_creator_.UpdatePacketNumberLength(
sent_packet_manager_.GetLeastPacketAwaitedByPeer(encryption_level_),
sent_packet_manager_.EstimateMaxPacketsInFlight(max_packet_length()));
}
void QuicConnection::SetDecrypter(EncryptionLevel level,
std::unique_ptr<QuicDecrypter> decrypter) {
framer_.SetDecrypter(level, std::move(decrypter));
if (!undecryptable_packets_.empty() &&
!process_undecryptable_packets_alarm().IsSet()) {
process_undecryptable_packets_alarm().Set(clock_->ApproximateNow());
}
}
void QuicConnection::SetAlternativeDecrypter(
EncryptionLevel level, std::unique_ptr<QuicDecrypter> decrypter,
bool latch_once_used) {
framer_.SetAlternativeDecrypter(level, std::move(decrypter), latch_once_used);
if (!undecryptable_packets_.empty() &&
!process_undecryptable_packets_alarm().IsSet()) {
process_undecryptable_packets_alarm().Set(clock_->ApproximateNow());
}
}
void QuicConnection::InstallDecrypter(
EncryptionLevel level, std::unique_ptr<QuicDecrypter> decrypter) {
if (level == ENCRYPTION_ZERO_RTT) {
had_zero_rtt_decrypter_ = true;
}
framer_.InstallDecrypter(level, std::move(decrypter));
if (!undecryptable_packets_.empty() &&
!process_undecryptable_packets_alarm().IsSet()) {
process_undecryptable_packets_alarm().Set(clock_->ApproximateNow());
}
}
void QuicConnection::RemoveDecrypter(EncryptionLevel level) {
framer_.RemoveDecrypter(level);
}
void QuicConnection::OnDiscardPreviousOneRttKeysAlarm() {
QUICHE_DCHECK(connected());
framer_.DiscardPreviousOneRttKeys();
}
bool QuicConnection::IsKeyUpdateAllowed() const {
return support_key_update_for_connection_ &&
GetLargestAckedPacket().IsInitialized() &&
lowest_packet_sent_in_current_key_phase_.IsInitialized() &&
GetLargestAckedPacket() >= lowest_packet_sent_in_current_key_phase_;
}
bool QuicConnection::HaveSentPacketsInCurrentKeyPhaseButNoneAcked() const {
return lowest_packet_sent_in_current_key_phase_.IsInitialized() &&
(!GetLargestAckedPacket().IsInitialized() ||
GetLargestAckedPacket() < lowest_packet_sent_in_current_key_phase_);
}
QuicPacketCount QuicConnection::PotentialPeerKeyUpdateAttemptCount() const {
return framer_.PotentialPeerKeyUpdateAttemptCount();
}
bool QuicConnection::InitiateKeyUpdate(KeyUpdateReason reason) {
QUIC_DLOG(INFO) << ENDPOINT << "InitiateKeyUpdate";
if (!IsKeyUpdateAllowed()) {
QUIC_BUG(quic_bug_10511_28) << "key update not allowed";
return false;
}
return framer_.DoKeyUpdate(reason);
}
const QuicDecrypter* QuicConnection::decrypter() const {
return framer_.decrypter();
}
const QuicDecrypter* QuicConnection::alternative_decrypter() const {
return framer_.alternative_decrypter();
}
void QuicConnection::QueueUndecryptablePacket(
const QuicEncryptedPacket& packet, EncryptionLevel decryption_level) {
for (const auto& saved_packet : undecryptable_packets_) {
if (packet.data() == saved_packet.packet->data() &&
packet.length() == saved_packet.packet->length()) {
QUIC_DVLOG(1) << ENDPOINT << "Not queueing known undecryptable packet";
return;
}
}
QUIC_DVLOG(1) << ENDPOINT << "Queueing undecryptable packet.";
undecryptable_packets_.emplace_back(packet, decryption_level,
last_received_packet_info_);
if (perspective_ == Perspective::IS_CLIENT) {
SetRetransmissionAlarm();
}
}
void QuicConnection::OnProcessUndecryptablePacketsAlarm() {
QUICHE_DCHECK(connected());
ScopedPacketFlusher flusher(this);
MaybeProcessUndecryptablePackets();
}
void QuicConnection::MaybeProcessUndecryptablePackets() {
process_undecryptable_packets_alarm().Cancel();
if (undecryptable_packets_.empty() ||
encryption_level_ == ENCRYPTION_INITIAL) {
return;
}
auto iter = undecryptable_packets_.begin();
while (connected_ && iter != undecryptable_packets_.end()) {
packet_creator_.FlushCurrentPacket();
if (!connected_) {
return;
}
UndecryptablePacket* undecryptable_packet = &*iter;
QUIC_DVLOG(1) << ENDPOINT << "Attempting to process undecryptable packet";
if (debug_visitor_ != nullptr) {
debug_visitor_->OnAttemptingToProcessUndecryptablePacket(
undecryptable_packet->encryption_level);
}
last_received_packet_info_ = undecryptable_packet->packet_info;
current_packet_data_ = undecryptable_packet->packet->data();
const bool processed = framer_.ProcessPacket(*undecryptable_packet->packet);
current_packet_data_ = nullptr;
if (processed) {
QUIC_DVLOG(1) << ENDPOINT << "Processed undecryptable packet!";
iter = undecryptable_packets_.erase(iter);
++stats_.packets_processed;
continue;
}
const bool has_decryption_key = version().KnowsWhichDecrypterToUse() &&
framer_.HasDecrypterOfEncryptionLevel(
undecryptable_packet->encryption_level);
if (framer_.error() == QUIC_DECRYPTION_FAILURE &&
ShouldEnqueueUnDecryptablePacket(undecryptable_packet->encryption_level,
has_decryption_key)) {
QUIC_DVLOG(1)
<< ENDPOINT
<< "Need to attempt to process this undecryptable packet later";
++iter;
continue;
}
iter = undecryptable_packets_.erase(iter);
}
if (IsHandshakeComplete()) {
if (debug_visitor_ != nullptr) {
for (const auto& undecryptable_packet : undecryptable_packets_) {
debug_visitor_->OnUndecryptablePacket(
undecryptable_packet.encryption_level, true);
}
}
undecryptable_packets_.clear();
}
if (perspective_ == Perspective::IS_CLIENT) {
SetRetransmissionAlarm();
}
}
void QuicConnection::QueueCoalescedPacket(const QuicEncryptedPacket& packet) {
QUIC_DVLOG(1) << ENDPOINT << "Queueing coalesced packet.";
received_coalesced_packets_.push_back(packet.Clone());
++stats_.num_coalesced_packets_received;
}
bool QuicConnection::MaybeProcessCoalescedPackets() {
bool processed = false;
while (connected_ && !received_coalesced_packets_.empty()) {
packet_creator_.FlushCurrentPacket();
if (!connected_) {
return processed;
}
std::unique_ptr<QuicEncryptedPacket> packet =
std::move(received_coalesced_packets_.front());
received_coalesced_packets_.pop_front();
QUIC_DVLOG(1) << ENDPOINT << "Processing coalesced packet";
if (framer_.ProcessPacket(*packet)) {
processed = true;
++stats_.num_coalesced_packets_processed;
} else {
}
}
if (processed) {
MaybeProcessUndecryptablePackets();
MaybeSendInResponseToPacket();
}
return processed;
}
void QuicConnection::CloseConnection(
QuicErrorCode error, const std::string& details,
ConnectionCloseBehavior connection_close_behavior) {
CloseConnection(error, NO_IETF_QUIC_ERROR, details,
connection_close_behavior);
}
void QuicConnection::CloseConnection(
QuicErrorCode error, QuicIetfTransportErrorCodes ietf_error,
const std::string& error_details,
ConnectionCloseBehavior connection_close_behavior) {
QUICHE_DCHECK(!error_details.empty());
if (!connected_) {
QUIC_DLOG(INFO) << "Connection is already closed.";
return;
}
if (ietf_error != NO_IETF_QUIC_ERROR) {
QUIC_DLOG(INFO) << ENDPOINT << "Closing connection: " << connection_id()
<< ", with wire error: " << ietf_error
<< ", error: " << QuicErrorCodeToString(error)
<< ", and details: " << error_details;
} else {
QUIC_DLOG(INFO) << ENDPOINT << "Closing connection: " << connection_id()
<< ", with error: " << QuicErrorCodeToString(error) << " ("
<< error << "), and details: " << error_details;
}
if (connection_close_behavior != ConnectionCloseBehavior::SILENT_CLOSE) {
SendConnectionClosePacket(error, ietf_error, error_details);
}
TearDownLocalConnectionState(error, ietf_error, error_details,
ConnectionCloseSource::FROM_SELF);
}
void QuicConnection::SendConnectionClosePacket(
QuicErrorCode error, QuicIetfTransportErrorCodes ietf_error,
const std::string& details) {
QuicPacketCreator::ScopedPeerAddressContext peer_address_context(
&packet_creator_, peer_address(), default_path_.client_connection_id,
default_path_.server_connection_id);
if (!SupportsMultiplePacketNumberSpaces()) {
QUIC_DLOG(INFO) << ENDPOINT << "Sending connection close packet.";
ScopedEncryptionLevelContext encryption_level_context(
this, GetConnectionCloseEncryptionLevel());
if (version().CanSendCoalescedPackets()) {
coalesced_packet_.Clear();
}
ClearQueuedPackets();
ScopedPacketFlusher flusher(this);
if (error != QUIC_PACKET_WRITE_ERROR &&
!uber_received_packet_manager_.IsAckFrameEmpty(
QuicUtils::GetPacketNumberSpace(encryption_level_)) &&
!packet_creator_.has_ack()) {
SendAck();
}
QuicConnectionCloseFrame* const frame = new QuicConnectionCloseFrame(
transport_version(), error, ietf_error, details,
framer_.current_received_frame_type());
packet_creator_.ConsumeRetransmittableControlFrame(QuicFrame(frame));
packet_creator_.FlushCurrentPacket();
if (version().CanSendCoalescedPackets()) {
FlushCoalescedPacket();
}
ClearQueuedPackets();
return;
}
ScopedPacketFlusher flusher(this);
if (version().CanSendCoalescedPackets()) {
coalesced_packet_.Clear();
}
ClearQueuedPackets();
for (EncryptionLevel level :
{ENCRYPTION_INITIAL, ENCRYPTION_HANDSHAKE, ENCRYPTION_ZERO_RTT,
ENCRYPTION_FORWARD_SECURE}) {
if (!framer_.HasEncrypterOfEncryptionLevel(level)) {
continue;
}
QUIC_DLOG(INFO) << ENDPOINT
<< "Sending connection close packet at level: " << level;
ScopedEncryptionLevelContext context(this, level);
if (error != QUIC_PACKET_WRITE_ERROR &&
!uber_received_packet_manager_.IsAckFrameEmpty(
QuicUtils::GetPacketNumberSpace(encryption_level_)) &&
!packet_creator_.has_ack()) {
QuicFrames frames;
frames.push_back(GetUpdatedAckFrame());
packet_creator_.FlushAckFrame(frames);
}
if (level == ENCRYPTION_FORWARD_SECURE &&
perspective_ == Perspective::IS_SERVER) {
visitor_->BeforeConnectionCloseSent();
}
auto* frame = new QuicConnectionCloseFrame(
transport_version(), error, ietf_error, details,
framer_.current_received_frame_type());
packet_creator_.ConsumeRetransmittableControlFrame(QuicFrame(frame));
packet_creator_.FlushCurrentPacket();
}
if (version().CanSendCoalescedPackets()) {
FlushCoalescedPacket();
}
ClearQueuedPackets();
}
void QuicConnection::TearDownLocalConnectionState(
QuicErrorCode error, QuicIetfTransportErrorCodes ietf_error,
const std::string& error_details, ConnectionCloseSource source) {
QuicConnectionCloseFrame frame(transport_version(), error, ietf_error,
error_details,
framer_.current_received_frame_type());
return TearDownLocalConnectionState(frame, source);
}
void QuicConnection::TearDownLocalConnectionState(
const QuicConnectionCloseFrame& frame, ConnectionCloseSource source) {
if (!connected_) {
QUIC_DLOG(INFO) << "Connection is already closed.";
return;
}
FlushPackets();
connected_ = false;
QUICHE_DCHECK(visitor_ != nullptr);
visitor_->OnConnectionClosed(frame, source);
sent_packet_manager_.OnConnectionClosed();
if (debug_visitor_ != nullptr) {
debug_visitor_->OnConnectionClosed(frame, source);
}
CancelAllAlarms();
CancelPathValidation();
peer_issued_cid_manager_.reset();
self_issued_cid_manager_.reset();
}
void QuicConnection::CancelAllAlarms() {
QUIC_DVLOG(1) << "Cancelling all QuicConnection alarms.";
ack_alarm().PermanentCancel();
ping_manager_.Stop();
retransmission_alarm().PermanentCancel();
send_alarm().PermanentCancel();
mtu_discovery_alarm().PermanentCancel();
process_undecryptable_packets_alarm().PermanentCancel();
discard_previous_one_rtt_keys_alarm().PermanentCancel();
discard_zero_rtt_decryption_keys_alarm().PermanentCancel();
multi_port_probing_alarm().PermanentCancel();
blackhole_detector_.StopDetection(true);
idle_network_detector_.StopDetection();
}
QuicByteCount QuicConnection::max_packet_length() const {
return packet_creator_.max_packet_length();
}
void QuicConnection::SetMaxPacketLength(QuicByteCount length) {
long_term_mtu_ = length;
stats_.max_egress_mtu = std::max(stats_.max_egress_mtu, long_term_mtu_);
packet_creator_.SetMaxPacketLength(GetLimitedMaxPacketSize(length));
}
bool QuicConnection::HasQueuedData() const {
return packet_creator_.HasPendingFrames() || !buffered_packets_.empty();
}
void QuicConnection::SetNetworkTimeouts(QuicTime::Delta handshake_timeout,
QuicTime::Delta idle_timeout) {
QUIC_BUG_IF(quic_bug_12714_29, idle_timeout > handshake_timeout)
<< "idle_timeout:" << idle_timeout.ToMilliseconds()
<< " handshake_timeout:" << handshake_timeout.ToMilliseconds();
QUIC_DVLOG(1) << ENDPOINT << "Setting network timeouts: "
<< "handshake_timeout:" << handshake_timeout.ToMilliseconds()
<< " idle_timeout:" << idle_timeout.ToMilliseconds();
if (perspective_ == Perspective::IS_SERVER) {
idle_timeout = idle_timeout + QuicTime::Delta::FromSeconds(3);
} else if (idle_timeout > QuicTime::Delta::FromSeconds(1)) {
idle_timeout = idle_timeout - QuicTime::Delta::FromSeconds(1);
}
idle_network_detector_.SetTimeouts(handshake_timeout, idle_timeout);
}
void QuicConnection::SetPingAlarm() {
if (!connected_) {
return;
}
ping_manager_.SetAlarm(clock_->ApproximateNow(),
visitor_->ShouldKeepConnectionAlive(),
sent_packet_manager_.HasInFlightPackets());
}
void QuicConnection::SetRetransmissionAlarm() {
if (!connected_) {
if (retransmission_alarm().IsSet()) {
QUIC_BUG(quic_bug_10511_29)
<< ENDPOINT << "Retransmission alarm is set while disconnected";
retransmission_alarm().Cancel();
}
return;
}
if (packet_creator_.PacketFlusherAttached()) {
pending_retransmission_alarm_ = true;
return;
}
if (LimitedByAmplificationFactor(packet_creator_.max_packet_length())) {
retransmission_alarm().Cancel();
return;
}
PacketNumberSpace packet_number_space;
if (SupportsMultiplePacketNumberSpaces() && !IsHandshakeConfirmed() &&
!sent_packet_manager_
.GetEarliestPacketSentTimeForPto(&packet_number_space)
.IsInitialized()) {
if (perspective_ == Perspective::IS_SERVER) {
retransmission_alarm().Cancel();
return;
}
if (retransmission_alarm().IsSet() &&
GetRetransmissionDeadline() > retransmission_alarm().deadline()) {
return;
}
}
retransmission_alarm().Update(GetRetransmissionDeadline(), kAlarmGranularity);
}
void QuicConnection::MaybeSetMtuAlarm(QuicPacketNumber sent_packet_number) {
if (mtu_discovery_alarm().IsSet() ||
!mtu_discoverer_.ShouldProbeMtu(sent_packet_number)) {
return;
}
mtu_discovery_alarm().Set(clock_->ApproximateNow());
}
QuicConnection::ScopedPacketFlusher::ScopedPacketFlusher(
QuicConnection* connection)
: connection_(connection),
flush_and_set_pending_retransmission_alarm_on_delete_(false),
handshake_packet_sent_(connection != nullptr &&
connection->handshake_packet_sent_) {
if (connection_ == nullptr) {
return;
}
if (!connection_->packet_creator_.PacketFlusherAttached()) {
flush_and_set_pending_retransmission_alarm_on_delete_ = true;
connection->packet_creator_.AttachPacketFlusher();
}
}
QuicConnection::ScopedPacketFlusher::~ScopedPacketFlusher() {
if (connection_ == nullptr || !connection_->connected()) {
return;
}
if (flush_and_set_pending_retransmission_alarm_on_delete_) {
const QuicTime ack_timeout =
connection_->uber_received_packet_manager_.GetEarliestAckTimeout();
if (ack_timeout.IsInitialized()) {
if (ack_timeout <= connection_->clock_->ApproximateNow() &&
!connection_->CanWrite(NO_RETRANSMITTABLE_DATA)) {
connection_->ack_alarm().Cancel();
} else if (!connection_->ack_alarm().IsSet() ||
connection_->ack_alarm().deadline() > ack_timeout) {
connection_->ack_alarm().Update(ack_timeout, QuicTime::Delta::Zero());
}
}
if (connection_->ack_alarm().IsSet() &&
connection_->ack_alarm().deadline() <=
connection_->clock_->ApproximateNow()) {
if (connection_->send_alarm().IsSet() &&
connection_->send_alarm().deadline() <=
connection_->clock_->ApproximateNow()) {
connection_->ack_alarm().Cancel();
} else if (connection_->SupportsMultiplePacketNumberSpaces()) {
connection_->SendAllPendingAcks();
} else {
connection_->SendAck();
}
}
if (connection_->version().CanSendCoalescedPackets()) {
connection_->MaybeCoalescePacketOfHigherSpace();
}
connection_->packet_creator_.Flush();
if (connection_->version().CanSendCoalescedPackets()) {
connection_->FlushCoalescedPacket();
}
connection_->FlushPackets();
if (!connection_->connected()) {
return;
}
if (!handshake_packet_sent_ && connection_->handshake_packet_sent_) {
connection_->visitor_->OnHandshakePacketSent();
}
connection_->SetTransmissionType(NOT_RETRANSMISSION);
connection_->CheckIfApplicationLimited();
if (connection_->pending_retransmission_alarm_) {
connection_->SetRetransmissionAlarm();
connection_->pending_retransmission_alarm_ = false;
}
}
QUICHE_DCHECK_EQ(flush_and_set_pending_retransmission_alarm_on_delete_,
!connection_->packet_creator_.PacketFlusherAttached());
}
QuicConnection::ScopedEncryptionLevelContext::ScopedEncryptionLevelContext(
QuicConnection* connection, EncryptionLevel encryption_level)
: connection_(connection), latched_encryption_level_(ENCRYPTION_INITIAL) {
if (connection_ == nullptr) {
return;
}
latched_encryption_level_ = connection_->encryption_level_;
connection_->SetDefaultEncryptionLevel(encryption_level);
}
QuicConnection::ScopedEncryptionLevelContext::~ScopedEncryptionLevelContext() {
if (connection_ == nullptr || !connection_->connected_) {
return;
}
connection_->SetDefaultEncryptionLevel(latched_encryption_level_);
}
QuicConnection::BufferedPacket::BufferedPacket(
const SerializedPacket& packet, const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address, const QuicEcnCodepoint ecn_codepoint)
: BufferedPacket(packet.encrypted_buffer, packet.encrypted_length,
self_address, peer_address, ecn_codepoint) {}
QuicConnection::BufferedPacket::BufferedPacket(
const char* encrypted_buffer, QuicPacketLength encrypted_length,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address, const QuicEcnCodepoint ecn_codepoint)
: length(encrypted_length),
self_address(self_address),
peer_address(peer_address),
ecn_codepoint(ecn_codepoint) {
data = std::make_unique<char[]>(encrypted_length);
memcpy(data.get(), encrypted_buffer, encrypted_length);
}
QuicConnection::BufferedPacket::BufferedPacket(
QuicRandom& random, QuicPacketLength encrypted_length,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address)
: length(encrypted_length),
self_address(self_address),
peer_address(peer_address) {
data = std::make_unique<char[]>(encrypted_length);
random.RandBytes(data.get(), encrypted_length);
}
QuicConnection::ReceivedPacketInfo::ReceivedPacketInfo(QuicTime receipt_time)
: receipt_time(receipt_time) {}
QuicConnection::ReceivedPacketInfo::ReceivedPacketInfo(
const QuicSocketAddress& destination_address,
const QuicSocketAddress& source_address, QuicTime receipt_time,
QuicByteCount length, QuicEcnCodepoint ecn_codepoint)
: destination_address(destination_address),
source_address(source_address),
receipt_time(receipt_time),
length(length),
ecn_codepoint(ecn_codepoint) {}
std::ostream& operator<<(std::ostream& os,
const QuicConnection::ReceivedPacketInfo& info) {
os << " { destination_address: " << info.destination_address.ToString()
<< ", source_address: " << info.source_address.ToString()
<< ", received_bytes_counted: " << info.received_bytes_counted
<< ", length: " << info.length
<< ", destination_connection_id: " << info.destination_connection_id;
if (!info.decrypted) {
os << " }\n";
return os;
}
os << ", decrypted: " << info.decrypted
<< ", decrypted_level: " << EncryptionLevelToString(info.decrypted_level)
<< ", header: " << info.header << ", frames: ";
for (const auto frame : info.frames) {
os << frame;
}
os << " }\n";
return os;
}
HasRetransmittableData QuicConnection::IsRetransmittable(
const SerializedPacket& packet) {
if (packet.transmission_type != NOT_RETRANSMISSION ||
!packet.retransmittable_frames.empty()) {
return HAS_RETRANSMITTABLE_DATA;
} else {
return NO_RETRANSMITTABLE_DATA;
}
}
bool QuicConnection::IsTerminationPacket(const SerializedPacket& packet,
QuicErrorCode* error_code) {
if (packet.retransmittable_frames.empty()) {
return false;
}
for (const QuicFrame& frame : packet.retransmittable_frames) {
if (frame.type == CONNECTION_CLOSE_FRAME) {
*error_code = frame.connection_close_frame->quic_error_code;
return true;
}
}
return false;
}
void QuicConnection::SetMtuDiscoveryTarget(QuicByteCount target) {
QUIC_DVLOG(2) << ENDPOINT << "SetMtuDiscoveryTarget: " << target;
mtu_discoverer_.Disable();
mtu_discoverer_.Enable(max_packet_length(), GetLimitedMaxPacketSize(target));
}
QuicByteCount QuicConnection::GetLimitedMaxPacketSize(
QuicByteCount suggested_max_packet_size) {
if (!peer_address().IsInitialized()) {
QUIC_BUG(quic_bug_10511_30)
<< "Attempted to use a connection without a valid peer address";
return suggested_max_packet_size;
}
const QuicByteCount writer_limit = writer_->GetMaxPacketSize(peer_address());
QuicByteCount max_packet_size = suggested_max_packet_size;
if (max_packet_size > writer_limit) {
max_packet_size = writer_limit;
}
if (max_packet_size > peer_max_packet_size_) {
max_packet_size = peer_max_packet_size_;
}
if (max_packet_size > kMaxOutgoingPacketSize) {
max_packet_size = kMaxOutgoingPacketSize;
}
return max_packet_size;
}
void QuicConnection::SendMtuDiscoveryPacket(QuicByteCount target_mtu) {
QUICHE_DCHECK_EQ(target_mtu, GetLimitedMaxPacketSize(target_mtu));
packet_creator_.GenerateMtuDiscoveryPacket(target_mtu);
}
bool QuicConnection::SendConnectivityProbingPacket(
QuicPacketWriter* probing_writer, const QuicSocketAddress& peer_address) {
QUICHE_DCHECK(peer_address.IsInitialized());
if (!connected_) {
QUIC_BUG(quic_bug_10511_31)
<< "Not sending connectivity probing packet as connection is "
<< "disconnected.";
return false;
}
if (perspective_ == Perspective::IS_SERVER && probing_writer == nullptr) {
probing_writer = writer_;
}
QUICHE_DCHECK(probing_writer);
if (probing_writer->IsWriteBlocked()) {
QUIC_DLOG(INFO)
<< ENDPOINT
<< "Writer blocked when sending connectivity probing packet.";
if (probing_writer == writer_) {
visitor_->OnWriteBlocked();
}
return true;
}
QUIC_DLOG(INFO) << ENDPOINT
<< "Sending path probe packet for connection_id = "
<< default_path_.server_connection_id;
std::unique_ptr<SerializedPacket> probing_packet;
if (!version().HasIetfQuicFrames()) {
probing_packet = packet_creator_.SerializeConnectivityProbingPacket();
} else {
QuicPathFrameBuffer transmitted_connectivity_probe_payload;
random_generator_->RandBytes(&transmitted_connectivity_probe_payload,
sizeof(QuicPathFrameBuffer));
probing_packet =
packet_creator_.SerializePathChallengeConnectivityProbingPacket(
transmitted_connectivity_probe_payload);
}
QUICHE_DCHECK_EQ(IsRetransmittable(*probing_packet), NO_RETRANSMITTABLE_DATA);
return WritePacketUsingWriter(std::move(probing_packet), probing_writer,
self_address(), peer_address,
true);
}
bool QuicConnection::WritePacketUsingWriter(
std::unique_ptr<SerializedPacket> packet, QuicPacketWriter* writer,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address, bool measure_rtt) {
const QuicTime packet_send_time = clock_->Now();
QUIC_BUG_IF(write using blocked writer, writer->IsWriteBlocked());
QUIC_DVLOG(2) << ENDPOINT
<< "Sending path probe packet for server connection ID "
<< default_path_.server_connection_id << std::endl
<< quiche::QuicheTextUtils::HexDump(absl::string_view(
packet->encrypted_buffer, packet->encrypted_length));
WriteResult result = SendPacketToWriter(
packet->encrypted_buffer, packet->encrypted_length, self_address.host(),
peer_address, writer, GetEcnCodepointToSend(peer_address));
const uint32_t writer_batch_id = result.batch_id;
if (writer->IsBatchMode() && result.status == WRITE_STATUS_OK &&
result.bytes_written == 0) {
result = writer->Flush();
}
if (IsWriteError(result.status)) {
QUIC_DLOG(INFO) << ENDPOINT << "Write probing packet failed with error = "
<< result.error_code;
return false;
}
sent_packet_manager_.OnPacketSent(
packet.get(), packet_send_time, packet->transmission_type,
NO_RETRANSMITTABLE_DATA, measure_rtt, last_ecn_codepoint_sent_);
if (debug_visitor_ != nullptr) {
if (sent_packet_manager_.unacked_packets().empty()) {
QUIC_BUG(quic_bug_10511_32)
<< "Unacked map is empty right after packet is sent";
} else {
debug_visitor_->OnPacketSent(
packet->packet_number, packet->encrypted_length,
packet->has_crypto_handshake, packet->transmission_type,
packet->encryption_level,
sent_packet_manager_.unacked_packets()
.rbegin()
->retransmittable_frames,
packet->nonretransmittable_frames, packet_send_time, writer_batch_id);
}
}
if (IsWriteBlockedStatus(result.status)) {
if (writer == writer_) {
visitor_->OnWriteBlocked();
}
if (result.status == WRITE_STATUS_BLOCKED_DATA_BUFFERED) {
QUIC_DLOG(INFO) << ENDPOINT << "Write probing packet blocked";
}
}
return true;
}
void QuicConnection::DisableMtuDiscovery() {
mtu_discoverer_.Disable();
mtu_discovery_alarm().Cancel();
}
void QuicConnection::OnMtuDiscoveryAlarm() {
QUICHE_DCHECK(connected());
QUICHE_DCHECK(!mtu_discovery_alarm().IsSet());
const QuicPacketNumber largest_sent_packet =
sent_packet_manager_.GetLargestSentPacket();
if (mtu_discoverer_.ShouldProbeMtu(largest_sent_packet)) {
++mtu_probe_count_;
SendMtuDiscoveryPacket(
mtu_discoverer_.GetUpdatedMtuProbeSize(largest_sent_packet));
}
QUICHE_DCHECK(!mtu_discovery_alarm().IsSet());
}
void QuicConnection::OnEffectivePeerMigrationValidated(
bool ) {
if (active_effective_peer_migration_type_ == NO_CHANGE) {
QUIC_BUG(quic_bug_10511_33) << "No migration underway.";
return;
}
highest_packet_sent_before_effective_peer_migration_.Clear();
const bool send_address_token =
active_effective_peer_migration_type_ != PORT_CHANGE;
active_effective_peer_migration_type_ = NO_CHANGE;
++stats_.num_validated_peer_migration;
if (!framer_.version().HasIetfQuicFrames()) {
return;
}
if (debug_visitor_ != nullptr) {
const QuicTime now = clock_->ApproximateNow();
if (now >= stats_.handshake_completion_time) {
debug_visitor_->OnPeerMigrationValidated(
now - stats_.handshake_completion_time);
} else {
QUIC_BUG(quic_bug_10511_34)
<< "Handshake completion time is larger than current time.";
}
}
default_path_.validated = true;
alternative_path_.Clear();
if (send_address_token) {
visitor_->MaybeSendAddressToken();
}
}
void QuicConnection::StartEffectivePeerMigration(AddressChangeType type) {
if (!framer_.version().HasIetfQuicFrames()) {
if (type == NO_CHANGE) {
QUIC_BUG(quic_bug_10511_35)
<< "EffectivePeerMigration started without address change.";
return;
}
QUIC_DLOG(INFO)
<< ENDPOINT << "Effective peer's ip:port changed from "
<< default_path_.peer_address.ToString() << " to "
<< GetEffectivePeerAddressFromCurrentPacket().ToString()
<< ", address change type is " << type
<< ", migrating connection without validating new client address.";
highest_packet_sent_before_effective_peer_migration_ =
sent_packet_manager_.GetLargestSentPacket();
default_path_.peer_address = GetEffectivePeerAddressFromCurrentPacket();
active_effective_peer_migration_type_ = type;
OnConnectionMigration();
return;
}
if (type == NO_CHANGE) {
UpdatePeerAddress(last_received_packet_info_.source_address);
QUIC_BUG(quic_bug_10511_36)
<< "EffectivePeerMigration started without address change.";
return;
}
packet_creator_.FlushCurrentPacket();
packet_creator_.SendRemainingPendingPadding();
if (!connected_) {
return;
}
const QuicSocketAddress current_effective_peer_address =
GetEffectivePeerAddressFromCurrentPacket();
QUIC_DLOG(INFO) << ENDPOINT << "Effective peer's ip:port changed from "
<< default_path_.peer_address.ToString() << " to "
<< current_effective_peer_address.ToString()
<< ", address change type is " << type
<< ", migrating connection.";
const QuicSocketAddress previous_direct_peer_address = direct_peer_address_;
PathState previous_default_path = std::move(default_path_);
active_effective_peer_migration_type_ = type;
MaybeClearQueuedPacketsOnPathChange();
OnConnectionMigration();
if (type == PORT_CHANGE) {
QUICHE_DCHECK(previous_default_path.validated ||
(alternative_path_.validated &&
alternative_path_.send_algorithm != nullptr));
} else {
previous_default_path.rtt_stats.emplace();
previous_default_path.rtt_stats->CloneFrom(
*sent_packet_manager_.GetRttStats());
previous_default_path.send_algorithm = OnPeerIpAddressChanged();
if (alternative_path_.peer_address.host() ==
current_effective_peer_address.host() &&
alternative_path_.send_algorithm != nullptr &&
alternative_path_.rtt_stats.has_value()) {
sent_packet_manager_.SetSendAlgorithm(
alternative_path_.send_algorithm.release());
sent_packet_manager_.SetRttStats(*alternative_path_.rtt_stats);
alternative_path_.rtt_stats = std::nullopt;
}
}
UpdatePeerAddress(last_received_packet_info_.source_address);
if (IsAlternativePath(last_received_packet_info_.destination_address,
current_effective_peer_address)) {
SetDefaultPathState(std::move(alternative_path_));
} else {
QuicConnectionId client_connection_id;
std::optional<StatelessResetToken> stateless_reset_token;
FindMatchingOrNewClientConnectionIdOrToken(
previous_default_path, alternative_path_,
last_received_packet_info_.destination_connection_id,
&client_connection_id, &stateless_reset_token);
SetDefaultPathState(
PathState(last_received_packet_info_.destination_address,
current_effective_peer_address, client_connection_id,
last_received_packet_info_.destination_connection_id,
stateless_reset_token));
default_path_.validated =
(alternative_path_.peer_address.host() ==
current_effective_peer_address.host() &&
alternative_path_.validated) ||
(previous_default_path.validated && type == PORT_CHANGE);
}
if (!last_received_packet_info_.received_bytes_counted) {
default_path_.bytes_received_before_address_validation +=
last_received_packet_info_.length;
last_received_packet_info_.received_bytes_counted = true;
}
if (!previous_default_path.validated) {
QUIC_DVLOG(1) << "Cancel validation of previous peer address change to "
<< previous_default_path.peer_address
<< " upon peer migration to " << default_path_.peer_address;
path_validator_.CancelPathValidation();
++stats_.num_peer_migration_while_validating_default_path;
}
if (alternative_path_.peer_address.host() ==
default_path_.peer_address.host()) {
alternative_path_.Clear();
}
if (default_path_.validated) {
QUIC_DVLOG(1) << "Peer migrated to a validated address.";
if (!(previous_default_path.validated && type == PORT_CHANGE)) {
++stats_.num_peer_migration_to_proactively_validated_address;
}
OnEffectivePeerMigrationValidated(
default_path_.server_connection_id ==
previous_default_path.server_connection_id);
return;
}
QUICHE_DCHECK(EnforceAntiAmplificationLimit());
QUIC_DVLOG(1) << "Apply anti-amplification limit to effective peer address "
<< default_path_.peer_address << " with "
<< default_path_.bytes_sent_before_address_validation
<< " bytes sent and "
<< default_path_.bytes_received_before_address_validation
<< " bytes received.";
QUICHE_DCHECK(!alternative_path_.peer_address.IsInitialized() ||
alternative_path_.peer_address.host() !=
default_path_.peer_address.host());
if (previous_default_path.validated) {
alternative_path_ = std::move(previous_default_path);
QUICHE_DCHECK(alternative_path_.send_algorithm != nullptr);
}
if (!path_validator_.IsValidatingPeerAddress(
current_effective_peer_address)) {
++stats_.num_reverse_path_validtion_upon_migration;
ValidatePath(std::make_unique<ReversePathValidationContext>(
default_path_.self_address, peer_address(),
default_path_.peer_address, this),
std::make_unique<ReversePathValidationResultDelegate>(
this, previous_direct_peer_address),
PathValidationReason::kReversePathValidation);
} else {
QUIC_DVLOG(1) << "Peer address " << default_path_.peer_address
<< " is already under validation, wait for result.";
++stats_.num_peer_migration_to_proactively_validated_address;
}
}
void QuicConnection::OnConnectionMigration() {
if (debug_visitor_ != nullptr) {
const QuicTime now = clock_->ApproximateNow();
if (now >= stats_.handshake_completion_time) {
debug_visitor_->OnPeerAddressChange(
active_effective_peer_migration_type_,
now - stats_.handshake_completion_time);
}
}
visitor_->OnConnectionMigration(active_effective_peer_migration_type_);
if (active_effective_peer_migration_type_ != PORT_CHANGE &&
active_effective_peer_migration_type_ != IPV4_SUBNET_CHANGE &&
!framer_.version().HasIetfQuicFrames()) {
sent_packet_manager_.OnConnectionMigration(false);
}
}
bool QuicConnection::IsCurrentPacketConnectivityProbing() const {
return is_current_packet_connectivity_probing_;
}
bool QuicConnection::ack_frame_updated() const {
return uber_received_packet_manager_.IsAckFrameUpdated();
}
absl::string_view QuicConnection::GetCurrentPacket() {
if (current_packet_data_ == nullptr) {
return absl::string_view();
}
return absl::string_view(current_packet_data_,
last_received_packet_info_.length);
}
bool QuicConnection::MaybeConsiderAsMemoryCorruption(
const QuicStreamFrame& frame) {
if (QuicUtils::IsCryptoStreamId(transport_version(), frame.stream_id) ||
last_received_packet_info_.decrypted_level != ENCRYPTION_INITIAL) {
return false;
}
if (perspective_ == Perspective::IS_SERVER &&
frame.data_length >= sizeof(kCHLO) &&
strncmp(frame.data_buffer, reinterpret_cast<const char*>(&kCHLO),
sizeof(kCHLO)) == 0) {
return true;
}
if (perspective_ == Perspective::IS_CLIENT &&
frame.data_length >= sizeof(kREJ) &&
strncmp(frame.data_buffer, reinterpret_cast<const char*>(&kREJ),
sizeof(kREJ)) == 0) {
return true;
}
return false;
}
void QuicConnection::CheckIfApplicationLimited() {
if (!connected_) {
return;
}
bool application_limited =
buffered_packets_.empty() && !visitor_->WillingAndAbleToWrite();
if (!application_limited) {
return;
}
sent_packet_manager_.OnApplicationLimited();
}
bool QuicConnection::UpdatePacketContent(QuicFrameType type) {
last_received_packet_info_.frames.push_back(type);
if (version().HasIetfQuicFrames()) {
if (perspective_ == Perspective::IS_CLIENT) {
return connected_;
}
if (!QuicUtils::IsProbingFrame(type)) {
MaybeStartIetfPeerMigration();
return connected_;
}
QuicSocketAddress current_effective_peer_address =
GetEffectivePeerAddressFromCurrentPacket();
if (IsDefaultPath(last_received_packet_info_.destination_address,
last_received_packet_info_.source_address)) {
return connected_;
}
if (type == PATH_CHALLENGE_FRAME &&
!IsAlternativePath(last_received_packet_info_.destination_address,
current_effective_peer_address)) {
QUIC_DVLOG(1)
<< "The peer is probing a new path with effective peer address "
<< current_effective_peer_address << ", self address "
<< last_received_packet_info_.destination_address;
if (!default_path_.validated) {
QUIC_DVLOG(1) << "The connection hasn't finished handshake or is "
"validating a recent peer address change.";
QUIC_BUG_IF(quic_bug_12714_30,
IsHandshakeConfirmed() && !alternative_path_.validated)
<< "No validated peer address to send after handshake comfirmed.";
} else if (!IsReceivedPeerAddressValidated()) {
QuicConnectionId client_connection_id;
std::optional<StatelessResetToken> stateless_reset_token;
FindMatchingOrNewClientConnectionIdOrToken(
default_path_, alternative_path_,
last_received_packet_info_.destination_connection_id,
&client_connection_id, &stateless_reset_token);
alternative_path_ =
PathState(last_received_packet_info_.destination_address,
current_effective_peer_address, client_connection_id,
last_received_packet_info_.destination_connection_id,
stateless_reset_token);
should_proactively_validate_peer_address_on_path_challenge_ = true;
}
}
MaybeUpdateBytesReceivedFromAlternativeAddress(
last_received_packet_info_.length);
return connected_;
}
if (!ignore_gquic_probing_) {
if (current_packet_content_ == NOT_PADDED_PING) {
return connected_;
}
if (type == PING_FRAME) {
if (current_packet_content_ == NO_FRAMES_RECEIVED) {
current_packet_content_ = FIRST_FRAME_IS_PING;
return connected_;
}
}
if (type == PADDING_FRAME &&
current_packet_content_ == FIRST_FRAME_IS_PING) {
current_packet_content_ = SECOND_FRAME_IS_PADDING;
QUIC_CODE_COUNT_N(gquic_padded_ping_received, 1, 2);
if (perspective_ == Perspective::IS_SERVER) {
is_current_packet_connectivity_probing_ =
current_effective_peer_migration_type_ != NO_CHANGE;
if (is_current_packet_connectivity_probing_) {
QUIC_CODE_COUNT_N(gquic_padded_ping_received, 2, 2);
}
QUIC_DLOG_IF(INFO, is_current_packet_connectivity_probing_)
<< ENDPOINT
<< "Detected connectivity probing packet. "
"current_effective_peer_migration_type_:"
<< current_effective_peer_migration_type_;
} else {
is_current_packet_connectivity_probing_ =
(last_received_packet_info_.source_address != peer_address()) ||
(last_received_packet_info_.destination_address !=
default_path_.self_address);
QUIC_DLOG_IF(INFO, is_current_packet_connectivity_probing_)
<< ENDPOINT
<< "Detected connectivity probing packet. "
"last_packet_source_address:"
<< last_received_packet_info_.source_address
<< ", peer_address_:" << peer_address()
<< ", last_packet_destination_address:"
<< last_received_packet_info_.destination_address
<< ", default path self_address :" << default_path_.self_address;
}
return connected_;
}
current_packet_content_ = NOT_PADDED_PING;
} else {
QUIC_RELOADABLE_FLAG_COUNT(quic_ignore_gquic_probing);
QUICHE_DCHECK_EQ(current_packet_content_, NO_FRAMES_RECEIVED);
}
if (GetLargestReceivedPacket().IsInitialized() &&
last_received_packet_info_.header.packet_number ==
GetLargestReceivedPacket()) {
UpdatePeerAddress(last_received_packet_info_.source_address);
if (current_effective_peer_migration_type_ != NO_CHANGE) {
StartEffectivePeerMigration(current_effective_peer_migration_type_);
}
}
current_effective_peer_migration_type_ = NO_CHANGE;
return connected_;
}
void QuicConnection::MaybeStartIetfPeerMigration() {
QUICHE_DCHECK(version().HasIetfQuicFrames());
if (current_effective_peer_migration_type_ != NO_CHANGE &&
!IsHandshakeConfirmed()) {
QUIC_LOG_EVERY_N_SEC(INFO, 60)
<< ENDPOINT << "Effective peer's ip:port changed from "
<< default_path_.peer_address.ToString() << " to "
<< GetEffectivePeerAddressFromCurrentPacket().ToString()
<< " before handshake confirmed, "
"current_effective_peer_migration_type_: "
<< current_effective_peer_migration_type_;
CloseConnection(
(current_effective_peer_migration_type_ == PORT_CHANGE
? QUIC_PEER_PORT_CHANGE_HANDSHAKE_UNCONFIRMED
: QUIC_CONNECTION_MIGRATION_HANDSHAKE_UNCONFIRMED),
absl::StrFormat(
"Peer address changed from %s to %s before handshake is confirmed.",
default_path_.peer_address.ToString(),
GetEffectivePeerAddressFromCurrentPacket().ToString()),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
if (GetLargestReceivedPacket().IsInitialized() &&
last_received_packet_info_.header.packet_number ==
GetLargestReceivedPacket()) {
if (current_effective_peer_migration_type_ != NO_CHANGE) {
StartEffectivePeerMigration(current_effective_peer_migration_type_);
} else {
UpdatePeerAddress(last_received_packet_info_.source_address);
}
}
current_effective_peer_migration_type_ = NO_CHANGE;
}
void QuicConnection::PostProcessAfterAckFrame(bool acked_new_packet) {
if (!packet_creator_.has_ack()) {
uber_received_packet_manager_.DontWaitForPacketsBefore(
last_received_packet_info_.decrypted_level,
SupportsMultiplePacketNumberSpaces()
? sent_packet_manager_.GetLargestPacketPeerKnowsIsAcked(
last_received_packet_info_.decrypted_level)
: sent_packet_manager_.largest_packet_peer_knows_is_acked());
}
SetRetransmissionAlarm();
if (acked_new_packet) {
OnForwardProgressMade();
} else if (default_enable_5rto_blackhole_detection_ &&
!sent_packet_manager_.HasInFlightPackets() &&
blackhole_detector_.IsDetectionInProgress()) {
blackhole_detector_.StopDetection(false);
}
}
void QuicConnection::SetSessionNotifier(
SessionNotifierInterface* session_notifier) {
sent_packet_manager_.SetSessionNotifier(session_notifier);
}
void QuicConnection::SetDataProducer(
QuicStreamFrameDataProducer* data_producer) {
framer_.set_data_producer(data_producer);
}
void QuicConnection::SetTransmissionType(TransmissionType type) {
packet_creator_.SetTransmissionType(type);
}
void QuicConnection::UpdateReleaseTimeIntoFuture() {
QUICHE_DCHECK(supports_release_time_);
const QuicTime::Delta prior_max_release_time = release_time_into_future_;
release_time_into_future_ = std::max(
QuicTime::Delta::FromMilliseconds(kMinReleaseTimeIntoFutureMs),
std::min(QuicTime::Delta::FromMilliseconds(
GetQuicFlag(quic_max_pace_time_into_future_ms)),
sent_packet_manager_.GetRttStats()->SmoothedOrInitialRtt() *
GetQuicFlag(quic_pace_time_into_future_srtt_fraction)));
QUIC_DVLOG(3) << "Updated max release time delay from "
<< prior_max_release_time << " to "
<< release_time_into_future_;
}
void QuicConnection::ResetAckStates() {
ack_alarm().Cancel();
uber_received_packet_manager_.ResetAckStates(encryption_level_);
}
MessageStatus QuicConnection::SendMessage(
QuicMessageId message_id, absl::Span<quiche::QuicheMemSlice> message,
bool flush) {
if (MemSliceSpanTotalSize(message) > GetCurrentLargestMessagePayload()) {
return MESSAGE_STATUS_TOO_LARGE;
}
if (!connected_ || (!flush && !CanWrite(HAS_RETRANSMITTABLE_DATA))) {
return MESSAGE_STATUS_BLOCKED;
}
ScopedPacketFlusher flusher(this);
return packet_creator_.AddMessageFrame(message_id, message);
}
QuicPacketLength QuicConnection::GetCurrentLargestMessagePayload() const {
return packet_creator_.GetCurrentLargestMessagePayload();
}
QuicPacketLength QuicConnection::GetGuaranteedLargestMessagePayload() const {
return packet_creator_.GetGuaranteedLargestMessagePayload();
}
uint32_t QuicConnection::cipher_id() const {
if (version().KnowsWhichDecrypterToUse()) {
if (quic_limit_new_streams_per_loop_2_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_limit_new_streams_per_loop_2, 4, 4);
for (auto decryption_level :
{ENCRYPTION_FORWARD_SECURE, ENCRYPTION_HANDSHAKE,
ENCRYPTION_ZERO_RTT, ENCRYPTION_INITIAL}) {
const QuicDecrypter* decrypter = framer_.GetDecrypter(decryption_level);
if (decrypter != nullptr) {
return decrypter->cipher_id();
}
}
QUICHE_BUG(no_decrypter_found)
<< ENDPOINT << "No decrypter found at all encryption levels";
return 0;
} else {
return framer_.GetDecrypter(last_received_packet_info_.decrypted_level)
->cipher_id();
}
}
return framer_.decrypter()->cipher_id();
}
EncryptionLevel QuicConnection::GetConnectionCloseEncryptionLevel() const {
if (perspective_ == Perspective::IS_CLIENT) {
return encryption_level_;
}
if (IsHandshakeComplete()) {
QUIC_BUG_IF(quic_bug_12714_31,
encryption_level_ != ENCRYPTION_FORWARD_SECURE)
<< ENDPOINT << "Unexpected connection close encryption level "
<< encryption_level_;
return ENCRYPTION_FORWARD_SECURE;
}
if (framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_ZERO_RTT)) {
if (encryption_level_ != ENCRYPTION_ZERO_RTT) {
QUIC_CODE_COUNT(quic_wrong_encryption_level_connection_close_ietf);
}
return ENCRYPTION_ZERO_RTT;
}
return ENCRYPTION_INITIAL;
}
void QuicConnection::MaybeBundleCryptoDataWithAcks() {
QUICHE_DCHECK(SupportsMultiplePacketNumberSpaces());
if (IsHandshakeConfirmed()) {
return;
}
PacketNumberSpace space = HANDSHAKE_DATA;
if (perspective() == Perspective::IS_SERVER &&
framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_INITIAL)) {
space = INITIAL_DATA;
}
const QuicTime ack_timeout =
uber_received_packet_manager_.GetAckTimeout(space);
if (!ack_timeout.IsInitialized() ||
(ack_timeout > clock_->ApproximateNow() &&
ack_timeout > uber_received_packet_manager_.GetEarliestAckTimeout())) {
return;
}
if (coalesced_packet_.length() > 0) {
return;
}
if (!framer_.HasAnEncrypterForSpace(space)) {
QUIC_BUG(quic_bug_10511_39)
<< ENDPOINT
<< "Try to bundle crypto with ACK with missing key of space "
<< PacketNumberSpaceToString(space);
return;
}
sent_packet_manager_.RetransmitDataOfSpaceIfAny(space);
}
void QuicConnection::SendAllPendingAcks() {
QUICHE_DCHECK(SupportsMultiplePacketNumberSpaces());
QUIC_DVLOG(1) << ENDPOINT << "Trying to send all pending ACKs";
ack_alarm().Cancel();
QuicTime earliest_ack_timeout =
uber_received_packet_manager_.GetEarliestAckTimeout();
QUIC_BUG_IF(quic_bug_12714_32, !earliest_ack_timeout.IsInitialized());
MaybeBundleCryptoDataWithAcks();
visitor_->MaybeBundleOpportunistically();
earliest_ack_timeout = uber_received_packet_manager_.GetEarliestAckTimeout();
if (!earliest_ack_timeout.IsInitialized()) {
return;
}
for (int8_t i = INITIAL_DATA; i <= APPLICATION_DATA; ++i) {
const QuicTime ack_timeout = uber_received_packet_manager_.GetAckTimeout(
static_cast<PacketNumberSpace>(i));
if (!ack_timeout.IsInitialized()) {
continue;
}
if (!framer_.HasAnEncrypterForSpace(static_cast<PacketNumberSpace>(i))) {
continue;
}
if (ack_timeout > clock_->ApproximateNow() &&
ack_timeout > earliest_ack_timeout) {
continue;
}
QUIC_DVLOG(1) << ENDPOINT << "Sending ACK of packet number space "
<< PacketNumberSpaceToString(
static_cast<PacketNumberSpace>(i));
ScopedEncryptionLevelContext context(
this, QuicUtils::GetEncryptionLevelToSendAckofSpace(
static_cast<PacketNumberSpace>(i)));
QuicFrames frames;
frames.push_back(uber_received_packet_manager_.GetUpdatedAckFrame(
static_cast<PacketNumberSpace>(i), clock_->ApproximateNow()));
const bool flushed = packet_creator_.FlushAckFrame(frames);
if (!flushed) {
QUIC_BUG_IF(quic_bug_12714_33,
connected_ && !writer_->IsWriteBlocked() &&
!LimitedByAmplificationFactor(
packet_creator_.max_packet_length()) &&
!IsMissingDestinationConnectionID())
<< "Writer not blocked and not throttled by amplification factor, "
"but ACK not flushed for packet space:"
<< PacketNumberSpaceToString(static_cast<PacketNumberSpace>(i))
<< ", fill_coalesced_packet: " << fill_coalesced_packet_
<< ", blocked_by_no_connection_id: "
<< (peer_issued_cid_manager_ != nullptr &&
packet_creator_.GetDestinationConnectionId().IsEmpty())
<< ", has_soft_max_packet_length: "
<< packet_creator_.HasSoftMaxPacketLength()
<< ", max_packet_length: " << packet_creator_.max_packet_length()
<< ", pending frames: " << packet_creator_.GetPendingFramesInfo();
break;
}
ResetAckStates();
}
const QuicTime timeout =
uber_received_packet_manager_.GetEarliestAckTimeout();
if (timeout.IsInitialized()) {
ack_alarm().Update(timeout, kAlarmGranularity);
}
if (encryption_level_ != ENCRYPTION_FORWARD_SECURE ||
!ShouldBundleRetransmittableFrameWithAck()) {
return;
}
consecutive_num_packets_with_no_retransmittable_frames_ = 0;
if (packet_creator_.HasPendingRetransmittableFrames() ||
visitor_->WillingAndAbleToWrite()) {
return;
}
visitor_->OnAckNeedsRetransmittableFrame();
}
bool QuicConnection::ShouldBundleRetransmittableFrameWithAck() const {
if (consecutive_num_packets_with_no_retransmittable_frames_ >=
max_consecutive_num_packets_with_no_retransmittable_frames_) {
return true;
}
if (bundle_retransmittable_with_pto_ack_ &&
sent_packet_manager_.GetConsecutivePtoCount() > 0) {
return true;
}
return false;
}
void QuicConnection::MaybeCoalescePacketOfHigherSpace() {
if (!connected() || !packet_creator_.HasSoftMaxPacketLength()) {
return;
}
if (fill_coalesced_packet_) {
QUIC_BUG(quic_coalesce_packet_reentrant);
return;
}
for (EncryptionLevel retransmission_level :
{ENCRYPTION_INITIAL, ENCRYPTION_HANDSHAKE}) {
const EncryptionLevel coalesced_level =
retransmission_level == ENCRYPTION_INITIAL ? ENCRYPTION_HANDSHAKE
: ENCRYPTION_FORWARD_SECURE;
if (coalesced_packet_.ContainsPacketOfEncryptionLevel(
retransmission_level) &&
coalesced_packet_.TransmissionTypeOfPacket(retransmission_level) !=
NOT_RETRANSMISSION &&
framer_.HasEncrypterOfEncryptionLevel(coalesced_level) &&
!coalesced_packet_.ContainsPacketOfEncryptionLevel(coalesced_level)) {
QUIC_DVLOG(1) << ENDPOINT
<< "Trying to coalesce packet of encryption level: "
<< EncryptionLevelToString(coalesced_level);
fill_coalesced_packet_ = true;
sent_packet_manager_.RetransmitDataOfSpaceIfAny(
QuicUtils::GetPacketNumberSpace(coalesced_level));
fill_coalesced_packet_ = false;
}
}
}
bool QuicConnection::FlushCoalescedPacket() {
ScopedCoalescedPacketClearer clearer(&coalesced_packet_);
if (!connected_) {
return false;
}
if (!version().CanSendCoalescedPackets()) {
QUIC_BUG_IF(quic_bug_12714_34, coalesced_packet_.length() > 0);
return true;
}
if (coalesced_packet_.ContainsPacketOfEncryptionLevel(ENCRYPTION_INITIAL) &&
!framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_INITIAL)) {
QUIC_BUG(quic_bug_10511_40)
<< ENDPOINT
<< "Coalescer contains initial packet while initial key has "
"been dropped.";
coalesced_packet_.NeuterInitialPacket();
}
if (coalesced_packet_.length() == 0) {
return true;
}
char buffer[kMaxOutgoingPacketSize];
const size_t length = packet_creator_.SerializeCoalescedPacket(
coalesced_packet_, buffer, coalesced_packet_.max_packet_length());
if (length == 0) {
if (connected_) {
CloseConnection(QUIC_FAILED_TO_SERIALIZE_PACKET,
"Failed to serialize coalesced packet.",
ConnectionCloseBehavior::SILENT_CLOSE);
}
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnCoalescedPacketSent(coalesced_packet_, length);
}
QUIC_DVLOG(1) << ENDPOINT << "Sending coalesced packet "
<< coalesced_packet_.ToString(length);
const size_t padding_size =
length - std::min<size_t>(length, coalesced_packet_.length());
if (!buffered_packets_.empty() || HandleWriteBlocked() ||
(enforce_strict_amplification_factor_ &&
LimitedByAmplificationFactor(padding_size))) {
QUIC_DVLOG(1) << ENDPOINT
<< "Buffering coalesced packet of len: " << length;
buffered_packets_.emplace_back(
buffer, static_cast<QuicPacketLength>(length),
coalesced_packet_.self_address(), coalesced_packet_.peer_address(),
coalesced_packet_.ecn_codepoint());
} else {
WriteResult result = SendPacketToWriter(
buffer, length, coalesced_packet_.self_address().host(),
coalesced_packet_.peer_address(), writer_,
coalesced_packet_.ecn_codepoint());
if (IsWriteError(result.status)) {
OnWriteError(result.error_code);
return false;
}
if (IsWriteBlockedStatus(result.status)) {
visitor_->OnWriteBlocked();
if (result.status != WRITE_STATUS_BLOCKED_DATA_BUFFERED) {
QUIC_DVLOG(1) << ENDPOINT
<< "Buffering coalesced packet of len: " << length;
buffered_packets_.emplace_back(
buffer, static_cast<QuicPacketLength>(length),
coalesced_packet_.self_address(), coalesced_packet_.peer_address(),
coalesced_packet_.ecn_codepoint());
}
}
}
if (accelerated_server_preferred_address_ &&
stats_.num_duplicated_packets_sent_to_server_preferred_address <
kMaxDuplicatedPacketsSentToServerPreferredAddress) {
QUICHE_DCHECK(received_server_preferred_address_.IsInitialized());
path_validator_.MaybeWritePacketToAddress(
buffer, length, received_server_preferred_address_);
++stats_.num_duplicated_packets_sent_to_server_preferred_address;
}
if (length > coalesced_packet_.length()) {
if (IsDefaultPath(coalesced_packet_.self_address(),
coalesced_packet_.peer_address())) {
if (EnforceAntiAmplificationLimit()) {
default_path_.bytes_sent_before_address_validation += padding_size;
}
} else {
MaybeUpdateBytesSentToAlternativeAddress(coalesced_packet_.peer_address(),
padding_size);
}
stats_.bytes_sent += padding_size;
if (coalesced_packet_.initial_packet() != nullptr &&
coalesced_packet_.initial_packet()->transmission_type !=
NOT_RETRANSMISSION) {
stats_.bytes_retransmitted += padding_size;
}
}
return true;
}
void QuicConnection::MaybeEnableMultiplePacketNumberSpacesSupport() {
if (version().handshake_protocol != PROTOCOL_TLS1_3) {
return;
}
QUIC_DVLOG(1) << ENDPOINT << "connection " << connection_id()
<< " supports multiple packet number spaces";
framer_.EnableMultiplePacketNumberSpacesSupport();
sent_packet_manager_.EnableMultiplePacketNumberSpacesSupport();
uber_received_packet_manager_.EnableMultiplePacketNumberSpacesSupport(
perspective_);
}
bool QuicConnection::SupportsMultiplePacketNumberSpaces() const {
return sent_packet_manager_.supports_multiple_packet_number_spaces();
}
void QuicConnection::SetLargestReceivedPacketWithAck(
QuicPacketNumber new_value) {
if (SupportsMultiplePacketNumberSpaces()) {
largest_seen_packets_with_ack_[QuicUtils::GetPacketNumberSpace(
last_received_packet_info_.decrypted_level)] = new_value;
} else {
largest_seen_packet_with_ack_ = new_value;
}
}
void QuicConnection::OnForwardProgressMade() {
if (!connected_) {
return;
}
if (is_path_degrading_) {
visitor_->OnForwardProgressMadeAfterPathDegrading();
stats_.num_forward_progress_after_path_degrading++;
is_path_degrading_ = false;
}
if (sent_packet_manager_.HasInFlightPackets()) {
blackhole_detector_.RestartDetection(GetPathDegradingDeadline(),
GetNetworkBlackholeDeadline(),
GetPathMtuReductionDeadline());
} else {
blackhole_detector_.StopDetection(false);
}
QUIC_BUG_IF(quic_bug_12714_35,
perspective_ == Perspective::IS_SERVER &&
default_enable_5rto_blackhole_detection_ &&
blackhole_detector_.IsDetectionInProgress() &&
!sent_packet_manager_.HasInFlightPackets())
<< ENDPOINT
<< "Trying to start blackhole detection without no bytes in flight";
}
QuicPacketNumber QuicConnection::GetLargestReceivedPacketWithAck() const {
if (SupportsMultiplePacketNumberSpaces()) {
return largest_seen_packets_with_ack_[QuicUtils::GetPacketNumberSpace(
last_received_packet_info_.decrypted_level)];
}
return largest_seen_packet_with_ack_;
}
QuicPacketNumber QuicConnection::GetLargestAckedPacket() const {
if (SupportsMultiplePacketNumberSpaces()) {
return sent_packet_manager_.GetLargestAckedPacket(
last_received_packet_info_.decrypted_level);
}
return sent_packet_manager_.GetLargestObserved();
}
QuicPacketNumber QuicConnection::GetLargestReceivedPacket() const {
return uber_received_packet_manager_.GetLargestObserved(
last_received_packet_info_.decrypted_level);
}
bool QuicConnection::EnforceAntiAmplificationLimit() const {
return version().SupportsAntiAmplificationLimit() &&
perspective_ == Perspective::IS_SERVER && !default_path_.validated;
}
bool QuicConnection::ShouldFixTimeouts(const QuicConfig& config) const {
return quic_fix_timeouts_ && version().UsesTls() &&
config.HasClientSentConnectionOption(kFTOE, perspective_);
}
bool QuicConnection::LimitedByAmplificationFactor(QuicByteCount bytes) const {
return EnforceAntiAmplificationLimit() &&
(default_path_.bytes_sent_before_address_validation +
(enforce_strict_amplification_factor_ ? bytes : 0)) >=
anti_amplification_factor_ *
default_path_.bytes_received_before_address_validation;
}
SerializedPacketFate QuicConnection::GetSerializedPacketFate(
bool is_mtu_discovery, EncryptionLevel encryption_level) {
if (ShouldDiscardPacket(encryption_level)) {
return DISCARD;
}
if (version().CanSendCoalescedPackets() && !coalescing_done_ &&
!is_mtu_discovery) {
if (!IsHandshakeConfirmed()) {
return COALESCE;
}
if (coalesced_packet_.length() > 0) {
return COALESCE;
}
}
if (!buffered_packets_.empty() || HandleWriteBlocked()) {
return BUFFER;
}
return SEND_TO_WRITER;
}
bool QuicConnection::IsHandshakeComplete() const {
return visitor_->GetHandshakeState() >= HANDSHAKE_COMPLETE;
}
bool QuicConnection::IsHandshakeConfirmed() const {
QUICHE_DCHECK_EQ(PROTOCOL_TLS1_3, version().handshake_protocol);
return visitor_->GetHandshakeState() == HANDSHAKE_CONFIRMED;
}
size_t QuicConnection::min_received_before_ack_decimation() const {
return uber_received_packet_manager_.min_received_before_ack_decimation();
}
void QuicConnection::set_min_received_before_ack_decimation(size_t new_value) {
uber_received_packet_manager_.set_min_received_before_ack_decimation(
new_value);
}
const QuicAckFrame& QuicConnection::ack_frame() const {
if (SupportsMultiplePacketNumberSpaces()) {
return uber_received_packet_manager_.GetAckFrame(
QuicUtils::GetPacketNumberSpace(
last_received_packet_info_.decrypted_level));
}
return uber_received_packet_manager_.ack_frame();
}
void QuicConnection::set_client_connection_id(
QuicConnectionId client_connection_id) {
if (!version().SupportsClientConnectionIds()) {
QUIC_BUG_IF(quic_bug_12714_36, !client_connection_id.IsEmpty())
<< ENDPOINT << "Attempted to use client connection ID "
<< client_connection_id << " with unsupported version " << version();
return;
}
default_path_.client_connection_id = client_connection_id;
client_connection_id_is_set_ = true;
if (version().HasIetfQuicFrames() && !client_connection_id.IsEmpty()) {
if (perspective_ == Perspective::IS_SERVER) {
QUICHE_DCHECK(peer_issued_cid_manager_ == nullptr);
peer_issued_cid_manager_ =
std::make_unique<QuicPeerIssuedConnectionIdManager>(
kMinNumOfActiveConnectionIds, client_connection_id, clock_,
alarm_factory_, this, context());
} else {
bool create_client_self_issued_cid_manager = true;
quiche::AdjustTestValue(
"quic::QuicConnection::create_cid_manager_when_set_client_cid",
&create_client_self_issued_cid_manager);
if (create_client_self_issued_cid_manager) {
self_issued_cid_manager_ = MakeSelfIssuedConnectionIdManager();
}
}
}
QUIC_DLOG(INFO) << ENDPOINT << "setting client connection ID to "
<< default_path_.client_connection_id
<< " for connection with server connection ID "
<< default_path_.server_connection_id;
packet_creator_.SetClientConnectionId(default_path_.client_connection_id);
framer_.SetExpectedClientConnectionIdLength(
default_path_.client_connection_id.length());
}
void QuicConnection::OnPathDegradingDetected() {
is_path_degrading_ = true;
visitor_->OnPathDegrading();
stats_.num_path_degrading++;
if (multi_port_stats_ && multi_port_migration_enabled_) {
MaybeMigrateToMultiPortPath();
}
}
void QuicConnection::MaybeMigrateToMultiPortPath() {
if (!alternative_path_.validated) {
QUIC_CLIENT_HISTOGRAM_ENUM(
"QuicConnection.MultiPortPathStatusWhenMigrating",
MultiPortStatusOnMigration::kNotValidated,
MultiPortStatusOnMigration::kMaxValue,
"Status of the multi port path upon migration");
return;
}
std::unique_ptr<QuicPathValidationContext> context;
const bool has_pending_validation =
path_validator_.HasPendingPathValidation();
if (!has_pending_validation) {
context = std::move(multi_port_path_context_);
multi_port_probing_alarm().Cancel();
QUIC_CLIENT_HISTOGRAM_ENUM(
"QuicConnection.MultiPortPathStatusWhenMigrating",
MultiPortStatusOnMigration::kWaitingForRefreshValidation,
MultiPortStatusOnMigration::kMaxValue,
"Status of the multi port path upon migration");
} else {
context = path_validator_.ReleaseContext();
QUIC_CLIENT_HISTOGRAM_ENUM(
"QuicConnection.MultiPortPathStatusWhenMigrating",
MultiPortStatusOnMigration::kPendingRefreshValidation,
MultiPortStatusOnMigration::kMaxValue,
"Status of the multi port path upon migration");
}
if (context == nullptr) {
QUICHE_BUG(quic_bug_12714_90) << "No multi-port context to migrate to";
return;
}
visitor_->MigrateToMultiPortPath(std::move(context));
}
void QuicConnection::OnBlackholeDetected() {
if (default_enable_5rto_blackhole_detection_ &&
!sent_packet_manager_.HasInFlightPackets()) {
QUIC_BUG(quic_bug_10511_41)
<< ENDPOINT
<< "Blackhole detected, but there is no bytes in flight, version: "
<< version();
return;
}
CloseConnection(QUIC_TOO_MANY_RTOS, "Network blackhole detected",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
void QuicConnection::OnPathMtuReductionDetected() {
MaybeRevertToPreviousMtu();
}
void QuicConnection::OnHandshakeTimeout() {
const QuicTime::Delta duration =
clock_->ApproximateNow() - stats_.connection_creation_time;
std::string error_details = absl::StrCat(
"Handshake timeout expired after ", duration.ToDebuggingValue(),
". Timeout:",
idle_network_detector_.handshake_timeout().ToDebuggingValue());
if (perspective() == Perspective::IS_CLIENT && version().UsesTls()) {
absl::StrAppend(&error_details, " ", UndecryptablePacketsInfo());
}
QUIC_DVLOG(1) << ENDPOINT << error_details;
CloseConnection(QUIC_HANDSHAKE_TIMEOUT, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
void QuicConnection::OnIdleNetworkDetected() {
const QuicTime::Delta duration =
clock_->ApproximateNow() -
idle_network_detector_.last_network_activity_time();
std::string error_details = absl::StrCat(
"No recent network activity after ", duration.ToDebuggingValue(),
". Timeout:",
idle_network_detector_.idle_network_timeout().ToDebuggingValue());
if (perspective() == Perspective::IS_CLIENT && version().UsesTls() &&
!IsHandshakeComplete()) {
absl::StrAppend(&error_details, " ", UndecryptablePacketsInfo());
}
QUIC_DVLOG(1) << ENDPOINT << error_details;
const bool has_consecutive_pto =
sent_packet_manager_.GetConsecutivePtoCount() > 0;
if (has_consecutive_pto || visitor_->ShouldKeepConnectionAlive()) {
if (GetQuicReloadableFlag(quic_add_stream_info_to_idle_close_detail) &&
!has_consecutive_pto) {
QUIC_RELOADABLE_FLAG_COUNT(quic_add_stream_info_to_idle_close_detail);
absl::StrAppend(&error_details, ", ",
visitor_->GetStreamsInfoForLogging());
}
CloseConnection(QUIC_NETWORK_IDLE_TIMEOUT, error_details,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QuicErrorCode error_code = QUIC_NETWORK_IDLE_TIMEOUT;
if (idle_timeout_connection_close_behavior_ ==
ConnectionCloseBehavior::
SILENT_CLOSE_WITH_CONNECTION_CLOSE_PACKET_SERIALIZED) {
error_code = QUIC_SILENT_IDLE_TIMEOUT;
}
CloseConnection(error_code, error_details,
idle_timeout_connection_close_behavior_);
}
void QuicConnection::OnKeepAliveTimeout() {
if (retransmission_alarm().IsSet() ||
!visitor_->ShouldKeepConnectionAlive()) {
return;
}
SendPingAtLevel(framer().GetEncryptionLevelToSendApplicationData());
}
void QuicConnection::OnRetransmittableOnWireTimeout() {
if (retransmission_alarm().IsSet() ||
!visitor_->ShouldKeepConnectionAlive()) {
return;
}
bool packet_buffered = false;
switch (retransmittable_on_wire_behavior_) {
case DEFAULT:
break;
case SEND_FIRST_FORWARD_SECURE_PACKET:
if (first_serialized_one_rtt_packet_ != nullptr) {
buffered_packets_.emplace_back(
first_serialized_one_rtt_packet_->data.get(),
first_serialized_one_rtt_packet_->length, self_address(),
peer_address(), first_serialized_one_rtt_packet_->ecn_codepoint);
packet_buffered = true;
}
break;
case SEND_RANDOM_BYTES:
const QuicPacketLength random_bytes_length = std::max<QuicPacketLength>(
QuicFramer::GetMinStatelessResetPacketLength() + 1,
random_generator_->RandUint64() %
packet_creator_.max_packet_length());
buffered_packets_.emplace_back(*random_generator_, random_bytes_length,
self_address(), peer_address());
packet_buffered = true;
break;
}
if (packet_buffered) {
if (!writer_->IsWriteBlocked()) {
WriteQueuedPackets();
}
if (connected_) {
ping_manager_.SetAlarm(clock_->ApproximateNow(),
visitor_->ShouldKeepConnectionAlive(),
true);
}
return;
}
SendPingAtLevel(framer().GetEncryptionLevelToSendApplicationData());
}
void QuicConnection::OnPeerIssuedConnectionIdRetired() {
QUICHE_DCHECK(peer_issued_cid_manager_ != nullptr);
QuicConnectionId* default_path_cid =
perspective_ == Perspective::IS_CLIENT
? &default_path_.server_connection_id
: &default_path_.client_connection_id;
QuicConnectionId* alternative_path_cid =
perspective_ == Perspective::IS_CLIENT
? &alternative_path_.server_connection_id
: &alternative_path_.client_connection_id;
bool default_path_and_alternative_path_use_the_same_peer_connection_id =
*default_path_cid == *alternative_path_cid;
if (!default_path_cid->IsEmpty() &&
!peer_issued_cid_manager_->IsConnectionIdActive(*default_path_cid)) {
*default_path_cid = QuicConnectionId();
}
if (default_path_cid->IsEmpty()) {
const QuicConnectionIdData* unused_connection_id_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
if (unused_connection_id_data != nullptr) {
*default_path_cid = unused_connection_id_data->connection_id;
default_path_.stateless_reset_token =
unused_connection_id_data->stateless_reset_token;
if (perspective_ == Perspective::IS_CLIENT) {
packet_creator_.SetServerConnectionId(
unused_connection_id_data->connection_id);
} else {
packet_creator_.SetClientConnectionId(
unused_connection_id_data->connection_id);
}
}
}
if (default_path_and_alternative_path_use_the_same_peer_connection_id) {
*alternative_path_cid = *default_path_cid;
alternative_path_.stateless_reset_token =
default_path_.stateless_reset_token;
} else if (!alternative_path_cid->IsEmpty() &&
!peer_issued_cid_manager_->IsConnectionIdActive(
*alternative_path_cid)) {
*alternative_path_cid = EmptyQuicConnectionId();
const QuicConnectionIdData* unused_connection_id_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
if (unused_connection_id_data != nullptr) {
*alternative_path_cid = unused_connection_id_data->connection_id;
alternative_path_.stateless_reset_token =
unused_connection_id_data->stateless_reset_token;
}
}
std::vector<uint64_t> retired_cid_sequence_numbers =
peer_issued_cid_manager_->ConsumeToBeRetiredConnectionIdSequenceNumbers();
QUICHE_DCHECK(!retired_cid_sequence_numbers.empty());
for (const auto& sequence_number : retired_cid_sequence_numbers) {
++stats_.num_retire_connection_id_sent;
visitor_->SendRetireConnectionId(sequence_number);
}
}
bool QuicConnection::SendNewConnectionId(
const QuicNewConnectionIdFrame& frame) {
visitor_->SendNewConnectionId(frame);
++stats_.num_new_connection_id_sent;
return connected_;
}
bool QuicConnection::MaybeReserveConnectionId(
const QuicConnectionId& connection_id) {
if (perspective_ == Perspective::IS_SERVER) {
return visitor_->MaybeReserveConnectionId(connection_id);
}
return true;
}
void QuicConnection::OnSelfIssuedConnectionIdRetired(
const QuicConnectionId& connection_id) {
if (perspective_ == Perspective::IS_SERVER) {
visitor_->OnServerConnectionIdRetired(connection_id);
}
}
void QuicConnection::MaybeUpdateAckTimeout() {
if (should_last_packet_instigate_acks_) {
return;
}
should_last_packet_instigate_acks_ = true;
uber_received_packet_manager_.MaybeUpdateAckTimeout(
true,
last_received_packet_info_.decrypted_level,
last_received_packet_info_.header.packet_number,
last_received_packet_info_.receipt_time, clock_->ApproximateNow(),
sent_packet_manager_.GetRttStats());
}
QuicTime QuicConnection::GetPathDegradingDeadline() const {
if (!ShouldDetectPathDegrading()) {
return QuicTime::Zero();
}
return clock_->ApproximateNow() +
sent_packet_manager_.GetPathDegradingDelay();
}
bool QuicConnection::ShouldDetectPathDegrading() const {
if (!connected_) {
return false;
}
if (GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed) &&
SupportsMultiplePacketNumberSpaces()) {
QUIC_RELOADABLE_FLAG_COUNT_N(
quic_no_path_degrading_before_handshake_confirmed, 1, 2);
return perspective_ == Perspective::IS_CLIENT && IsHandshakeConfirmed() &&
!is_path_degrading_;
}
if (!idle_network_detector_.handshake_timeout().IsInfinite()) {
return false;
}
return perspective_ == Perspective::IS_CLIENT && !is_path_degrading_;
}
QuicTime QuicConnection::GetNetworkBlackholeDeadline() const {
if (!ShouldDetectBlackhole()) {
return QuicTime::Zero();
}
QUICHE_DCHECK_LT(0u, num_rtos_for_blackhole_detection_);
const QuicTime::Delta blackhole_delay =
sent_packet_manager_.GetNetworkBlackholeDelay(
num_rtos_for_blackhole_detection_);
if (!ShouldDetectPathDegrading()) {
return clock_->ApproximateNow() + blackhole_delay;
}
return clock_->ApproximateNow() +
CalculateNetworkBlackholeDelay(
blackhole_delay, sent_packet_manager_.GetPathDegradingDelay(),
sent_packet_manager_.GetPtoDelay());
}
QuicTime::Delta QuicConnection::CalculateNetworkBlackholeDelay(
QuicTime::Delta blackhole_delay, QuicTime::Delta path_degrading_delay,
QuicTime::Delta pto_delay) {
const QuicTime::Delta min_delay = path_degrading_delay + pto_delay * 2;
if (blackhole_delay < min_delay) {
QUIC_CODE_COUNT(quic_extending_short_blackhole_delay);
}
return std::max(min_delay, blackhole_delay);
}
void QuicConnection::AddKnownServerAddress(const QuicSocketAddress& address) {
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT);
if (!address.IsInitialized() || IsKnownServerAddress(address)) {
return;
}
known_server_addresses_.push_back(address);
}
std::optional<QuicNewConnectionIdFrame>
QuicConnection::MaybeIssueNewConnectionIdForPreferredAddress() {
if (self_issued_cid_manager_ == nullptr) {
return std::nullopt;
}
return self_issued_cid_manager_
->MaybeIssueNewConnectionIdForPreferredAddress();
}
bool QuicConnection::ShouldDetectBlackhole() const {
if (!connected_ || blackhole_detection_disabled_) {
return false;
}
if (GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed) &&
SupportsMultiplePacketNumberSpaces() && !IsHandshakeConfirmed()) {
QUIC_RELOADABLE_FLAG_COUNT_N(
quic_no_path_degrading_before_handshake_confirmed, 2, 2);
return false;
}
if (default_enable_5rto_blackhole_detection_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_default_enable_5rto_blackhole_detection2,
3, 3);
return IsHandshakeComplete();
}
if (!idle_network_detector_.handshake_timeout().IsInfinite()) {
return false;
}
return num_rtos_for_blackhole_detection_ > 0;
}
QuicTime QuicConnection::GetRetransmissionDeadline() const {
if (perspective_ == Perspective::IS_CLIENT &&
SupportsMultiplePacketNumberSpaces() && !IsHandshakeConfirmed() &&
stats_.pto_count == 0 &&
!framer_.HasDecrypterOfEncryptionLevel(ENCRYPTION_HANDSHAKE) &&
!undecryptable_packets_.empty()) {
return clock_->ApproximateNow() + kAlarmGranularity;
}
return sent_packet_manager_.GetRetransmissionTime();
}
bool QuicConnection::SendPathChallenge(
const QuicPathFrameBuffer& data_buffer,
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicSocketAddress& effective_peer_address, QuicPacketWriter* writer) {
if (!framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_FORWARD_SECURE)) {
return connected_;
}
QuicConnectionId client_cid, server_cid;
FindOnPathConnectionIds(self_address, effective_peer_address, &client_cid,
&server_cid);
if (writer == writer_) {
ScopedPacketFlusher flusher(this);
{
QuicPacketCreator::ScopedPeerAddressContext context(
&packet_creator_, peer_address, client_cid, server_cid);
packet_creator_.AddPathChallengeFrame(data_buffer);
}
} else if (!writer->IsWriteBlocked()) {
QuicPacketCreator::ScopedPeerAddressContext context(
&packet_creator_, peer_address, client_cid, server_cid);
std::unique_ptr<SerializedPacket> probing_packet =
packet_creator_.SerializePathChallengeConnectivityProbingPacket(
data_buffer);
QUICHE_DCHECK_EQ(IsRetransmittable(*probing_packet),
NO_RETRANSMITTABLE_DATA)
<< ENDPOINT << "Probing Packet contains retransmittable frames";
QUICHE_DCHECK_EQ(self_address, alternative_path_.self_address)
<< ENDPOINT
<< "Send PATH_CHALLENGE from self_address: " << self_address.ToString()
<< " which is different from alt_path self address: "
<< alternative_path_.self_address.ToString();
WritePacketUsingWriter(std::move(probing_packet), writer, self_address,
peer_address, false);
} else {
QUIC_DLOG(INFO) << ENDPOINT
<< "Writer blocked when sending PATH_CHALLENGE.";
}
return connected_;
}
QuicTime QuicConnection::GetRetryTimeout(
const QuicSocketAddress& peer_address_to_use,
QuicPacketWriter* writer_to_use) const {
if (writer_to_use == writer_ && peer_address_to_use == peer_address()) {
return clock_->ApproximateNow() + sent_packet_manager_.GetPtoDelay();
}
return clock_->ApproximateNow() +
QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs);
}
void QuicConnection::ValidatePath(
std::unique_ptr<QuicPathValidationContext> context,
std::unique_ptr<QuicPathValidator::ResultDelegate> result_delegate,
PathValidationReason reason) {
QUICHE_DCHECK(version().HasIetfQuicFrames());
if (path_validator_.HasPendingPathValidation()) {
if (perspective_ == Perspective::IS_CLIENT &&
IsValidatingServerPreferredAddress()) {
QUIC_CLIENT_HISTOGRAM_BOOL(
"QuicSession.ServerPreferredAddressValidationCancelled", true,
"How often the caller kicked off another validation while there is "
"an on-going server preferred address validation.");
}
path_validator_.CancelPathValidation();
}
if (perspective_ == Perspective::IS_CLIENT &&
!IsDefaultPath(context->self_address(), context->peer_address())) {
if (self_issued_cid_manager_ != nullptr) {
self_issued_cid_manager_->MaybeSendNewConnectionIds();
if (!connected_) {
return;
}
}
if ((self_issued_cid_manager_ != nullptr &&
!self_issued_cid_manager_->HasConnectionIdToConsume()) ||
(peer_issued_cid_manager_ != nullptr &&
!peer_issued_cid_manager_->HasUnusedConnectionId())) {
QUIC_DVLOG(1) << "Client cannot start new path validation as there is no "
"requried connection ID is available.";
result_delegate->OnPathValidationFailure(std::move(context));
return;
}
QuicConnectionId client_connection_id, server_connection_id;
std::optional<StatelessResetToken> stateless_reset_token;
if (self_issued_cid_manager_ != nullptr) {
client_connection_id =
*self_issued_cid_manager_->ConsumeOneConnectionId();
}
if (peer_issued_cid_manager_ != nullptr) {
const auto* connection_id_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
server_connection_id = connection_id_data->connection_id;
stateless_reset_token = connection_id_data->stateless_reset_token;
}
alternative_path_ = PathState(context->self_address(),
context->peer_address(), client_connection_id,
server_connection_id, stateless_reset_token);
}
if (multi_port_stats_ != nullptr &&
reason == PathValidationReason::kMultiPort) {
multi_port_stats_->num_client_probing_attempts++;
}
if (perspective_ == Perspective::IS_CLIENT) {
stats_.num_client_probing_attempts++;
}
path_validator_.StartPathValidation(std::move(context),
std::move(result_delegate), reason);
if (perspective_ == Perspective::IS_CLIENT &&
IsValidatingServerPreferredAddress()) {
AddKnownServerAddress(received_server_preferred_address_);
}
}
bool QuicConnection::SendPathResponse(
const QuicPathFrameBuffer& data_buffer,
const QuicSocketAddress& peer_address_to_send,
const QuicSocketAddress& effective_peer_address) {
if (!framer_.HasEncrypterOfEncryptionLevel(ENCRYPTION_FORWARD_SECURE)) {
return false;
}
QuicConnectionId client_cid, server_cid;
FindOnPathConnectionIds(last_received_packet_info_.destination_address,
effective_peer_address, &client_cid, &server_cid);
QuicPacketCreator::ScopedPeerAddressContext context(
&packet_creator_, peer_address_to_send, client_cid, server_cid);
QUIC_DVLOG(1) << ENDPOINT << "Send PATH_RESPONSE to " << peer_address_to_send;
if (default_path_.self_address ==
last_received_packet_info_.destination_address) {
return packet_creator_.AddPathResponseFrame(data_buffer);
}
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective_);
if (!path_validator_.HasPendingPathValidation() ||
path_validator_.GetContext()->self_address() !=
last_received_packet_info_.destination_address) {
return true;
}
QuicPacketWriter* writer = path_validator_.GetContext()->WriterToUse();
if (writer->IsWriteBlocked()) {
QUIC_DLOG(INFO) << ENDPOINT << "Writer blocked when sending PATH_RESPONSE.";
return true;
}
std::unique_ptr<SerializedPacket> probing_packet =
packet_creator_.SerializePathResponseConnectivityProbingPacket(
{data_buffer}, true);
QUICHE_DCHECK_EQ(IsRetransmittable(*probing_packet), NO_RETRANSMITTABLE_DATA);
QUIC_DVLOG(1) << ENDPOINT
<< "Send PATH_RESPONSE from alternative socket with address "
<< last_received_packet_info_.destination_address;
WritePacketUsingWriter(std::move(probing_packet), writer,
last_received_packet_info_.destination_address,
peer_address_to_send,
false);
return true;
}
void QuicConnection::UpdatePeerAddress(QuicSocketAddress peer_address) {
direct_peer_address_ = peer_address;
packet_creator_.SetDefaultPeerAddress(peer_address);
}
void QuicConnection::SendPingAtLevel(EncryptionLevel level) {
ScopedEncryptionLevelContext context(this, level);
SendControlFrame(QuicFrame(QuicPingFrame()));
}
bool QuicConnection::HasPendingPathValidation() const {
return path_validator_.HasPendingPathValidation();
}
QuicPathValidationContext* QuicConnection::GetPathValidationContext() const {
return path_validator_.GetContext();
}
void QuicConnection::CancelPathValidation() {
path_validator_.CancelPathValidation();
}
bool QuicConnection::UpdateConnectionIdsOnMigration(
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address) {
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT);
if (IsAlternativePath(self_address, peer_address)) {
default_path_.client_connection_id = alternative_path_.client_connection_id;
default_path_.server_connection_id = alternative_path_.server_connection_id;
default_path_.stateless_reset_token =
alternative_path_.stateless_reset_token;
return true;
}
if (self_issued_cid_manager_ != nullptr) {
self_issued_cid_manager_->MaybeSendNewConnectionIds();
if (!connected_) {
return false;
}
}
if ((self_issued_cid_manager_ != nullptr &&
!self_issued_cid_manager_->HasConnectionIdToConsume()) ||
(peer_issued_cid_manager_ != nullptr &&
!peer_issued_cid_manager_->HasUnusedConnectionId())) {
return false;
}
if (self_issued_cid_manager_ != nullptr) {
default_path_.client_connection_id =
*self_issued_cid_manager_->ConsumeOneConnectionId();
}
if (peer_issued_cid_manager_ != nullptr) {
const auto* connection_id_data =
peer_issued_cid_manager_->ConsumeOneUnusedConnectionId();
default_path_.server_connection_id = connection_id_data->connection_id;
default_path_.stateless_reset_token =
connection_id_data->stateless_reset_token;
}
return true;
}
void QuicConnection::RetirePeerIssuedConnectionIdsNoLongerOnPath() {
if (!version().HasIetfQuicFrames() || peer_issued_cid_manager_ == nullptr) {
return;
}
if (perspective_ == Perspective::IS_CLIENT) {
peer_issued_cid_manager_->MaybeRetireUnusedConnectionIds(
{default_path_.server_connection_id,
alternative_path_.server_connection_id});
} else {
peer_issued_cid_manager_->MaybeRetireUnusedConnectionIds(
{default_path_.client_connection_id,
alternative_path_.client_connection_id});
}
}
bool QuicConnection::MigratePath(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
QuicPacketWriter* writer, bool owns_writer) {
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT);
if (!connected_) {
if (owns_writer) {
delete writer;
}
return false;
}
QUICHE_DCHECK(!version().UsesHttp3() || IsHandshakeConfirmed() ||
accelerated_server_preferred_address_);
if (version().UsesHttp3()) {
if (!UpdateConnectionIdsOnMigration(self_address, peer_address)) {
if (owns_writer) {
delete writer;
}
return false;
}
if (packet_creator_.GetServerConnectionId().length() !=
default_path_.server_connection_id.length()) {
packet_creator_.FlushCurrentPacket();
}
packet_creator_.SetClientConnectionId(default_path_.client_connection_id);
packet_creator_.SetServerConnectionId(default_path_.server_connection_id);
}
const auto self_address_change_type = QuicUtils::DetermineAddressChangeType(
default_path_.self_address, self_address);
const auto peer_address_change_type = QuicUtils::DetermineAddressChangeType(
default_path_.peer_address, peer_address);
QUICHE_DCHECK(self_address_change_type != NO_CHANGE ||
peer_address_change_type != NO_CHANGE);
const bool is_port_change = (self_address_change_type == PORT_CHANGE ||
self_address_change_type == NO_CHANGE) &&
(peer_address_change_type == PORT_CHANGE ||
peer_address_change_type == NO_CHANGE);
SetSelfAddress(self_address);
UpdatePeerAddress(peer_address);
default_path_.peer_address = peer_address;
if (writer_ != writer) {
SetQuicPacketWriter(writer, owns_writer);
}
MaybeClearQueuedPacketsOnPathChange();
OnSuccessfulMigration(is_port_change);
return true;
}
void QuicConnection::OnPathValidationFailureAtClient(
bool is_multi_port, const QuicPathValidationContext& context) {
QUICHE_DCHECK(perspective_ == Perspective::IS_CLIENT &&
version().HasIetfQuicFrames());
alternative_path_.Clear();
if (is_multi_port && multi_port_stats_ != nullptr) {
if (is_path_degrading_) {
multi_port_stats_->num_multi_port_probe_failures_when_path_degrading++;
} else {
multi_port_stats_
->num_multi_port_probe_failures_when_path_not_degrading++;
}
}
if (context.peer_address() == received_server_preferred_address_ &&
received_server_preferred_address_ != default_path_.peer_address) {
QUIC_DLOG(INFO) << "Failed to validate server preferred address : "
<< received_server_preferred_address_;
mutable_stats().failed_to_validate_server_preferred_address = true;
}
RetirePeerIssuedConnectionIdsNoLongerOnPath();
}
QuicConnectionId QuicConnection::GetOneActiveServerConnectionId() const {
if (perspective_ == Perspective::IS_CLIENT ||
self_issued_cid_manager_ == nullptr) {
return connection_id();
}
auto active_connection_ids = GetActiveServerConnectionIds();
QUIC_BUG_IF(quic_bug_6944, active_connection_ids.empty());
if (active_connection_ids.empty() ||
std::find(active_connection_ids.begin(), active_connection_ids.end(),
connection_id()) != active_connection_ids.end()) {
return connection_id();
}
QUICHE_CODE_COUNT(connection_id_on_default_path_has_been_retired);
auto active_connection_id =
self_issued_cid_manager_->GetOneActiveConnectionId();
return active_connection_id;
}
std::vector<QuicConnectionId> QuicConnection::GetActiveServerConnectionIds()
const {
QUICHE_DCHECK_EQ(Perspective::IS_SERVER, perspective_);
std::vector<QuicConnectionId> result;
if (self_issued_cid_manager_ == nullptr) {
result.push_back(default_path_.server_connection_id);
} else {
QUICHE_DCHECK(version().HasIetfQuicFrames());
result = self_issued_cid_manager_->GetUnretiredConnectionIds();
}
if (!original_destination_connection_id_.has_value()) {
return result;
}
if (std::find(result.begin(), result.end(),
*original_destination_connection_id_) != result.end()) {
QUIC_BUG(quic_unexpected_original_destination_connection_id)
<< "original_destination_connection_id: "
<< *original_destination_connection_id_
<< " is unexpectedly in active list";
} else {
result.insert(result.end(), *original_destination_connection_id_);
}
return result;
}
void QuicConnection::CreateConnectionIdManager() {
if (!version().HasIetfQuicFrames()) {
return;
}
if (perspective_ == Perspective::IS_CLIENT) {
if (!default_path_.server_connection_id.IsEmpty()) {
peer_issued_cid_manager_ =
std::make_unique<QuicPeerIssuedConnectionIdManager>(
kMinNumOfActiveConnectionIds, default_path_.server_connection_id,
clock_, alarm_factory_, this, context());
}
} else {
if (!default_path_.server_connection_id.IsEmpty()) {
self_issued_cid_manager_ = MakeSelfIssuedConnectionIdManager();
}
}
}
void QuicConnection::QuicBugIfHasPendingFrames(QuicStreamId id) const {
QUIC_BUG_IF(quic_has_pending_frames_unexpectedly,
connected_ && packet_creator_.HasPendingStreamFramesOfStream(id))
<< "Stream " << id
<< " has pending frames unexpectedly. Received packet info: "
<< last_received_packet_info_;
}
void QuicConnection::SetUnackedMapInitialCapacity() {
sent_packet_manager_.ReserveUnackedPacketsInitialCapacity(
GetUnackedMapInitialCapacity());
}
void QuicConnection::SetSourceAddressTokenToSend(absl::string_view token) {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
if (!packet_creator_.HasRetryToken()) {
packet_creator_.SetRetryToken(std::string(token.data(), token.length()));
}
}
void QuicConnection::MaybeUpdateBytesSentToAlternativeAddress(
const QuicSocketAddress& peer_address, QuicByteCount sent_packet_size) {
if (!version().SupportsAntiAmplificationLimit() ||
perspective_ != Perspective::IS_SERVER) {
return;
}
QUICHE_DCHECK(!IsDefaultPath(default_path_.self_address, peer_address));
if (!IsAlternativePath(default_path_.self_address, peer_address)) {
QUIC_DLOG(INFO) << "Wrote to uninteresting peer address: " << peer_address
<< " default direct_peer_address_ " << direct_peer_address_
<< " alternative path peer address "
<< alternative_path_.peer_address;
return;
}
if (alternative_path_.validated) {
return;
}
if (alternative_path_.bytes_sent_before_address_validation >=
anti_amplification_factor_ *
alternative_path_.bytes_received_before_address_validation) {
QUIC_LOG_FIRST_N(WARNING, 100)
<< "Server sent more data than allowed to unverified alternative "
"peer address "
<< peer_address << " bytes sent "
<< alternative_path_.bytes_sent_before_address_validation
<< ", bytes received "
<< alternative_path_.bytes_received_before_address_validation;
}
alternative_path_.bytes_sent_before_address_validation += sent_packet_size;
}
void QuicConnection::MaybeUpdateBytesReceivedFromAlternativeAddress(
QuicByteCount received_packet_size) {
if (!version().SupportsAntiAmplificationLimit() ||
perspective_ != Perspective::IS_SERVER ||
!IsAlternativePath(last_received_packet_info_.destination_address,
GetEffectivePeerAddressFromCurrentPacket()) ||
last_received_packet_info_.received_bytes_counted) {
return;
}
QUICHE_DCHECK(!IsDefaultPath(last_received_packet_info_.destination_address,
GetEffectivePeerAddressFromCurrentPacket()));
if (!alternative_path_.validated) {
alternative_path_.bytes_received_before_address_validation +=
received_packet_size;
}
last_received_packet_info_.received_bytes_counted = true;
}
bool QuicConnection::IsDefaultPath(
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address) const {
return direct_peer_address_ == peer_address &&
default_path_.self_address == self_address;
}
bool QuicConnection::IsAlternativePath(
const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address) const {
return alternative_path_.peer_address == peer_address &&
alternative_path_.self_address == self_address;
}
void QuicConnection::PathState::Clear() {
self_address = QuicSocketAddress();
peer_address = QuicSocketAddress();
client_connection_id = {};
server_connection_id = {};
validated = false;
bytes_received_before_address_validation = 0;
bytes_sent_before_address_validation = 0;
send_algorithm = nullptr;
rtt_stats = std::nullopt;
stateless_reset_token.reset();
ecn_marked_packet_acked = false;
ecn_pto_count = 0;
}
QuicConnection::PathState::PathState(PathState&& other) {
*this = std::move(other);
}
QuicConnection::PathState& QuicConnection::PathState::operator=(
QuicConnection::PathState&& other) {
if (this != &other) {
self_address = other.self_address;
peer_address = other.peer_address;
client_connection_id = other.client_connection_id;
server_connection_id = other.server_connection_id;
stateless_reset_token = other.stateless_reset_token;
validated = other.validated;
bytes_received_before_address_validation =
other.bytes_received_before_address_validation;
bytes_sent_before_address_validation =
other.bytes_sent_before_address_validation;
send_algorithm = std::move(other.send_algorithm);
if (other.rtt_stats.has_value()) {
rtt_stats.emplace();
rtt_stats->CloneFrom(*other.rtt_stats);
} else {
rtt_stats.reset();
}
other.Clear();
}
return *this;
}
bool QuicConnection::IsReceivedPeerAddressValidated() const {
QuicSocketAddress current_effective_peer_address =
GetEffectivePeerAddressFromCurrentPacket();
QUICHE_DCHECK(current_effective_peer_address.IsInitialized());
return (alternative_path_.peer_address.host() ==
current_effective_peer_address.host() &&
alternative_path_.validated) ||
(default_path_.validated && default_path_.peer_address.host() ==
current_effective_peer_address.host());
}
void QuicConnection::OnMultiPortPathProbingSuccess(
std::unique_ptr<QuicPathValidationContext> context, QuicTime start_time) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective());
alternative_path_.validated = true;
multi_port_path_context_ = std::move(context);
multi_port_probing_alarm().Set(clock_->ApproximateNow() +
multi_port_probing_interval_);
if (multi_port_stats_ != nullptr) {
multi_port_stats_->num_successful_probes++;
auto now = clock_->Now();
auto time_delta = now - start_time;
multi_port_stats_->rtt_stats.UpdateRtt(time_delta, QuicTime::Delta::Zero(),
now);
if (is_path_degrading_) {
multi_port_stats_->rtt_stats_when_default_path_degrading.UpdateRtt(
time_delta, QuicTime::Delta::Zero(), now);
}
}
}
void QuicConnection::MaybeProbeMultiPortPath() {
if (!connected_ || path_validator_.HasPendingPathValidation() ||
!multi_port_path_context_ ||
alternative_path_.self_address !=
multi_port_path_context_->self_address() ||
alternative_path_.peer_address !=
multi_port_path_context_->peer_address() ||
!visitor_->ShouldKeepConnectionAlive() ||
multi_port_probing_alarm().IsSet()) {
return;
}
if (multi_port_stats_ != nullptr) {
multi_port_stats_->num_client_probing_attempts++;
}
auto multi_port_validation_result_delegate =
std::make_unique<MultiPortPathValidationResultDelegate>(this);
path_validator_.StartPathValidation(
std::move(multi_port_path_context_),
std::move(multi_port_validation_result_delegate),
PathValidationReason::kMultiPort);
}
void QuicConnection::ContextObserver::OnMultiPortPathContextAvailable(
std::unique_ptr<QuicPathValidationContext> path_context) {
if (!path_context) {
return;
}
auto multi_port_validation_result_delegate =
std::make_unique<MultiPortPathValidationResultDelegate>(connection_);
connection_->multi_port_probing_alarm().Cancel();
connection_->multi_port_path_context_ = nullptr;
connection_->multi_port_stats_->num_multi_port_paths_created++;
connection_->ValidatePath(std::move(path_context),
std::move(multi_port_validation_result_delegate),
PathValidationReason::kMultiPort);
}
QuicConnection::MultiPortPathValidationResultDelegate::
MultiPortPathValidationResultDelegate(QuicConnection* connection)
: connection_(connection) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, connection->perspective());
}
void QuicConnection::MultiPortPathValidationResultDelegate::
OnPathValidationSuccess(std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) {
connection_->OnMultiPortPathProbingSuccess(std::move(context), start_time);
}
void QuicConnection::MultiPortPathValidationResultDelegate::
OnPathValidationFailure(
std::unique_ptr<QuicPathValidationContext> context) {
connection_->OnPathValidationFailureAtClient(true,
*context);
}
QuicConnection::ReversePathValidationResultDelegate::
ReversePathValidationResultDelegate(
QuicConnection* connection,
const QuicSocketAddress& direct_peer_address)
: QuicPathValidator::ResultDelegate(),
connection_(connection),
original_direct_peer_address_(direct_peer_address),
peer_address_default_path_(connection->direct_peer_address_),
peer_address_alternative_path_(
connection_->alternative_path_.peer_address),
active_effective_peer_migration_type_(
connection_->active_effective_peer_migration_type_) {}
void QuicConnection::ReversePathValidationResultDelegate::
OnPathValidationSuccess(std::unique_ptr<QuicPathValidationContext> context,
QuicTime start_time) {
QUIC_DLOG(INFO) << "Successfully validated new path " << *context
<< ", validation started at " << start_time;
if (connection_->IsDefaultPath(context->self_address(),
context->peer_address())) {
QUIC_CODE_COUNT_N(quic_kick_off_client_address_validation, 3, 6);
if (connection_->active_effective_peer_migration_type_ == NO_CHANGE) {
std::string error_detail = absl::StrCat(
"Reverse path validation on default path from ",
context->self_address().ToString(), " to ",
context->peer_address().ToString(),
" completed without active peer address change: current "
"peer address on default path ",
connection_->direct_peer_address_.ToString(),
", peer address on default path when the reverse path "
"validation was kicked off ",
peer_address_default_path_.ToString(),
", peer address on alternative path when the reverse "
"path validation was kicked off ",
peer_address_alternative_path_.ToString(),
", with active_effective_peer_migration_type_ = ",
AddressChangeTypeToString(active_effective_peer_migration_type_),
". The last received packet number ",
connection_->last_received_packet_info_.header.packet_number
.ToString(),
" Connection is connected: ", connection_->connected_);
QUIC_BUG(quic_bug_10511_43) << error_detail;
}
connection_->OnEffectivePeerMigrationValidated(
connection_->alternative_path_.server_connection_id ==
connection_->default_path_.server_connection_id);
} else {
QUICHE_DCHECK(connection_->IsAlternativePath(
context->self_address(), context->effective_peer_address()));
QUIC_CODE_COUNT_N(quic_kick_off_client_address_validation, 4, 6);
QUIC_DVLOG(1) << "Mark alternative peer address "
<< context->effective_peer_address() << " validated.";
connection_->alternative_path_.validated = true;
}
}
void QuicConnection::ReversePathValidationResultDelegate::
OnPathValidationFailure(
std::unique_ptr<QuicPathValidationContext> context) {
if (!connection_->connected()) {
return;
}
QUIC_DLOG(INFO) << "Fail to validate new path " << *context;
if (connection_->IsDefaultPath(context->self_address(),
context->peer_address())) {
QUIC_CODE_COUNT_N(quic_kick_off_client_address_validation, 5, 6);
connection_->RestoreToLastValidatedPath(original_direct_peer_address_);
} else if (connection_->IsAlternativePath(
context->self_address(), context->effective_peer_address())) {
QUIC_CODE_COUNT_N(quic_kick_off_client_address_validation, 6, 6);
connection_->alternative_path_.Clear();
}
connection_->RetirePeerIssuedConnectionIdsNoLongerOnPath();
}
QuicConnection::ScopedRetransmissionTimeoutIndicator::
ScopedRetransmissionTimeoutIndicator(QuicConnection* connection)
: connection_(connection) {
QUICHE_DCHECK(!connection_->in_probe_time_out_)
<< "ScopedRetransmissionTimeoutIndicator is not supposed to be nested";
connection_->in_probe_time_out_ = true;
}
QuicConnection::ScopedRetransmissionTimeoutIndicator::
~ScopedRetransmissionTimeoutIndicator() {
QUICHE_DCHECK(connection_->in_probe_time_out_);
connection_->in_probe_time_out_ = false;
}
void QuicConnection::RestoreToLastValidatedPath(
QuicSocketAddress original_direct_peer_address) {
QUIC_DLOG(INFO) << "Switch back to use the old peer address "
<< alternative_path_.peer_address;
if (!alternative_path_.validated) {
CloseConnection(QUIC_INTERNAL_ERROR,
"No validated peer address to use after reverse path "
"validation failure.",
ConnectionCloseBehavior::SILENT_CLOSE);
return;
}
MaybeClearQueuedPacketsOnPathChange();
OnPeerIpAddressChanged();
if (alternative_path_.send_algorithm != nullptr) {
sent_packet_manager_.SetSendAlgorithm(
alternative_path_.send_algorithm.release());
} else {
QUIC_BUG(quic_bug_10511_42)
<< "Fail to store congestion controller before migration.";
}
if (alternative_path_.rtt_stats.has_value()) {
sent_packet_manager_.SetRttStats(*alternative_path_.rtt_stats);
}
UpdatePeerAddress(original_direct_peer_address);
SetDefaultPathState(std::move(alternative_path_));
active_effective_peer_migration_type_ = NO_CHANGE;
++stats_.num_invalid_peer_migration;
WriteIfNotBlocked();
}
std::unique_ptr<SendAlgorithmInterface>
QuicConnection::OnPeerIpAddressChanged() {
QUICHE_DCHECK(framer_.version().HasIetfQuicFrames());
std::unique_ptr<SendAlgorithmInterface> old_send_algorithm =
sent_packet_manager_.OnConnectionMigration(
true);
QUICHE_DCHECK(!sent_packet_manager_.HasInFlightPackets());
SetRetransmissionAlarm();
blackhole_detector_.StopDetection(false);
return old_send_algorithm;
}
void QuicConnection::set_keep_alive_ping_timeout(
QuicTime::Delta keep_alive_ping_timeout) {
ping_manager_.set_keep_alive_timeout(keep_alive_ping_timeout);
}
void QuicConnection::set_initial_retransmittable_on_wire_timeout(
QuicTime::Delta retransmittable_on_wire_timeout) {
ping_manager_.set_initial_retransmittable_on_wire_timeout(
retransmittable_on_wire_timeout);
}
bool QuicConnection::IsValidatingServerPreferredAddress() const {
QUICHE_DCHECK_EQ(perspective_, Perspective::IS_CLIENT);
return received_server_preferred_address_.IsInitialized() &&
received_server_preferred_address_ != default_path_.peer_address &&
path_validator_.HasPendingPathValidation() &&
path_validator_.GetContext()->peer_address() ==
received_server_preferred_address_;
}
void QuicConnection::OnServerPreferredAddressValidated(
QuicPathValidationContext& context, bool owns_writer) {
QUIC_DLOG(INFO) << "Server preferred address: " << context.peer_address()
<< " validated. Migrating path, self_address: "
<< context.self_address()
<< ", peer_address: " << context.peer_address();
mutable_stats().server_preferred_address_validated = true;
const bool success =
MigratePath(context.self_address(), context.peer_address(),
context.WriterToUse(), owns_writer);
QUIC_BUG_IF(failed to migrate to server preferred address, !success)
<< "Failed to migrate to server preferred address: "
<< context.peer_address() << " after successful validation";
}
bool QuicConnection::set_ecn_codepoint(QuicEcnCodepoint ecn_codepoint) {
if (!GetQuicRestartFlag(quic_support_ect1)) {
return false;
}
QUIC_RESTART_FLAG_COUNT_N(quic_support_ect1, 3, 9);
if (disable_ecn_codepoint_validation_ || ecn_codepoint == ECN_NOT_ECT) {
packet_writer_params_.ecn_codepoint = ecn_codepoint;
return true;
}
if (!writer_->SupportsEcn()) {
return false;
}
switch (ecn_codepoint) {
case ECN_NOT_ECT:
QUICHE_DCHECK(false);
break;
case ECN_ECT0:
if (!sent_packet_manager_.EnableECT0()) {
return false;
}
break;
case ECN_ECT1:
if (!sent_packet_manager_.EnableECT1()) {
return false;
}
break;
case ECN_CE:
return false;
}
packet_writer_params_.ecn_codepoint = ecn_codepoint;
return true;
}
void QuicConnection::OnIdleDetectorAlarm() { idle_network_detector_.OnAlarm(); }
void QuicConnection::OnPingAlarm() { ping_manager_.OnAlarm(); }
void QuicConnection::OnNetworkBlackholeDetectorAlarm() {
blackhole_detector_.OnAlarm();
}
std::unique_ptr<SerializedPacket>
QuicConnection::SerializeLargePacketNumberConnectionClosePacket(
QuicErrorCode error, const std::string& error_details) {
QUICHE_DCHECK(IsHandshakeConfirmed());
QUICHE_DCHECK(!error_details.empty());
if (!IsHandshakeConfirmed()) {
return nullptr;
}
return packet_creator_.SerializeLargePacketNumberConnectionClosePacket(
GetLargestAckedPacket(), error, error_details);
}
#undef ENDPOINT
} | #include "quiche/quic/core/quic_connection.h"
#include <errno.h>
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/congestion_control/loss_detection_interface.h"
#include "quiche/quic/core/congestion_control/send_algorithm_interface.h"
#include "quiche/quic/core/crypto/null_decrypter.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/frames/quic_connection_close_frame.h"
#include "quiche/quic/core/frames/quic_new_connection_id_frame.h"
#include "quiche/quic/core/frames/quic_path_response_frame.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/frames/quic_rst_stream_frame.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_packet_creator.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_path_validator.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_ip_address_family.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
#include "quiche/quic/test_tools/mock_connection_id_generator.h"
#include "quiche/quic/test_tools/mock_random.h"
#include "quiche/quic/test_tools/quic_coalesced_packet_peer.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_framer_peer.h"
#include "quiche/quic/test_tools/quic_packet_creator_peer.h"
#include "quiche/quic/test_tools/quic_path_validator_peer.h"
#include "quiche/quic/test_tools/quic_sent_packet_manager_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_data_producer.h"
#include "quiche/quic/test_tools/simple_session_notifier.h"
#include "quiche/common/simple_buffer_allocator.h"
using testing::_;
using testing::AnyNumber;
using testing::AtLeast;
using testing::DoAll;
using testing::DoDefault;
using testing::ElementsAre;
using testing::Ge;
using testing::IgnoreResult;
using testing::InSequence;
using testing::Invoke;
using testing::InvokeWithoutArgs;
using testing::Lt;
using testing::Ref;
using testing::Return;
using testing::SaveArg;
using testing::SetArgPointee;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
const char data1[] = "foo data";
const char data2[] = "bar data";
const bool kHasStopWaiting = true;
const int kDefaultRetransmissionTimeMs = 500;
DiversificationNonce kTestDiversificationNonce = {
'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a',
'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b',
'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b', 'a', 'b',
};
const StatelessResetToken kTestStatelessResetToken{
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f};
const QuicSocketAddress kPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(),
12345);
const QuicSocketAddress kSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(),
443);
const QuicSocketAddress kServerPreferredAddress = QuicSocketAddress(
[]() {
QuicIpAddress address;
address.FromString("2604:31c0::");
return address;
}(),
443);
QuicStreamId GetNthClientInitiatedStreamId(int n,
QuicTransportVersion version) {
return QuicUtils::GetFirstBidirectionalStreamId(version,
Perspective::IS_CLIENT) +
n * 2;
}
QuicLongHeaderType EncryptionlevelToLongHeaderType(EncryptionLevel level) {
switch (level) {
case ENCRYPTION_INITIAL:
return INITIAL;
case ENCRYPTION_HANDSHAKE:
return HANDSHAKE;
case ENCRYPTION_ZERO_RTT:
return ZERO_RTT_PROTECTED;
case ENCRYPTION_FORWARD_SECURE:
QUICHE_DCHECK(false);
return INVALID_PACKET_TYPE;
default:
QUICHE_DCHECK(false);
return INVALID_PACKET_TYPE;
}
}
class TaggingEncrypterWithConfidentialityLimit : public TaggingEncrypter {
public:
TaggingEncrypterWithConfidentialityLimit(
uint8_t tag, QuicPacketCount confidentiality_limit)
: TaggingEncrypter(tag), confidentiality_limit_(confidentiality_limit) {}
QuicPacketCount GetConfidentialityLimit() const override {
return confidentiality_limit_;
}
private:
QuicPacketCount confidentiality_limit_;
};
class StrictTaggingDecrypterWithIntegrityLimit : public StrictTaggingDecrypter {
public:
StrictTaggingDecrypterWithIntegrityLimit(uint8_t tag,
QuicPacketCount integrity_limit)
: StrictTaggingDecrypter(tag), integrity_limit_(integrity_limit) {}
QuicPacketCount GetIntegrityLimit() const override {
return integrity_limit_;
}
private:
QuicPacketCount integrity_limit_;
};
class TestConnectionHelper : public QuicConnectionHelperInterface {
public:
TestConnectionHelper(MockClock* clock, MockRandom* random_generator)
: clock_(clock), random_generator_(random_generator) {
clock_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
}
TestConnectionHelper(const TestConnectionHelper&) = delete;
TestConnectionHelper& operator=(const TestConnectionHelper&) = delete;
const QuicClock* GetClock() const override { return clock_; }
QuicRandom* GetRandomGenerator() override { return random_generator_; }
quiche::QuicheBufferAllocator* GetStreamSendBufferAllocator() override {
return &buffer_allocator_;
}
private:
MockClock* clock_;
MockRandom* random_generator_;
quiche::SimpleBufferAllocator buffer_allocator_;
};
class TestConnection : public QuicConnection {
public:
TestConnection(QuicConnectionId connection_id,
QuicSocketAddress initial_self_address,
QuicSocketAddress initial_peer_address,
TestConnectionHelper* helper, TestAlarmFactory* alarm_factory,
TestPacketWriter* writer, Perspective perspective,
ParsedQuicVersion version,
ConnectionIdGeneratorInterface& generator)
: QuicConnection(connection_id, initial_self_address,
initial_peer_address, helper, alarm_factory, writer,
false, perspective,
SupportedVersions(version), generator),
notifier_(nullptr) {
writer->set_perspective(perspective);
SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
SetDataProducer(&producer_);
ON_CALL(*this, OnSerializedPacket(_))
.WillByDefault([this](SerializedPacket packet) {
QuicConnection::OnSerializedPacket(std::move(packet));
});
}
TestConnection(const TestConnection&) = delete;
TestConnection& operator=(const TestConnection&) = delete;
MOCK_METHOD(void, OnSerializedPacket, (SerializedPacket packet), (override));
void OnEffectivePeerMigrationValidated(bool is_migration_linkable) override {
QuicConnection::OnEffectivePeerMigrationValidated(is_migration_linkable);
if (is_migration_linkable) {
num_linkable_client_migration_++;
} else {
num_unlinkable_client_migration_++;
}
}
uint32_t num_unlinkable_client_migration() const {
return num_unlinkable_client_migration_;
}
uint32_t num_linkable_client_migration() const {
return num_linkable_client_migration_;
}
void SetSendAlgorithm(SendAlgorithmInterface* send_algorithm) {
QuicConnectionPeer::SetSendAlgorithm(this, send_algorithm);
}
void SetLossAlgorithm(LossDetectionInterface* loss_algorithm) {
QuicConnectionPeer::SetLossAlgorithm(this, loss_algorithm);
}
void SendPacket(EncryptionLevel , uint64_t packet_number,
std::unique_ptr<QuicPacket> packet,
HasRetransmittableData retransmittable, bool has_ack,
bool has_pending_frames) {
ScopedPacketFlusher flusher(this);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
QuicConnectionPeer::GetFramer(this)->EncryptPayload(
ENCRYPTION_INITIAL, QuicPacketNumber(packet_number), *packet,
buffer, kMaxOutgoingPacketSize);
SerializedPacket serialized_packet(
QuicPacketNumber(packet_number), PACKET_4BYTE_PACKET_NUMBER, buffer,
encrypted_length, has_ack, has_pending_frames);
serialized_packet.peer_address = kPeerAddress;
if (retransmittable == HAS_RETRANSMITTABLE_DATA) {
serialized_packet.retransmittable_frames.push_back(
QuicFrame(QuicPingFrame()));
}
OnSerializedPacket(std::move(serialized_packet));
}
QuicConsumedData SaveAndSendStreamData(QuicStreamId id,
absl::string_view data,
QuicStreamOffset offset,
StreamSendingState state) {
return SaveAndSendStreamData(id, data, offset, state, NOT_RETRANSMISSION);
}
QuicConsumedData SaveAndSendStreamData(QuicStreamId id,
absl::string_view data,
QuicStreamOffset offset,
StreamSendingState state,
TransmissionType transmission_type) {
ScopedPacketFlusher flusher(this);
producer_.SaveStreamData(id, data);
if (notifier_ != nullptr) {
return notifier_->WriteOrBufferData(id, data.length(), state,
transmission_type);
}
return QuicConnection::SendStreamData(id, data.length(), offset, state);
}
QuicConsumedData SendStreamDataWithString(QuicStreamId id,
absl::string_view data,
QuicStreamOffset offset,
StreamSendingState state) {
ScopedPacketFlusher flusher(this);
if (!QuicUtils::IsCryptoStreamId(transport_version(), id) &&
this->encryption_level() == ENCRYPTION_INITIAL) {
this->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
if (perspective() == Perspective::IS_CLIENT && !IsHandshakeComplete()) {
OnHandshakeComplete();
}
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(this);
}
}
return SaveAndSendStreamData(id, data, offset, state);
}
QuicConsumedData SendApplicationDataAtLevel(EncryptionLevel encryption_level,
QuicStreamId id,
absl::string_view data,
QuicStreamOffset offset,
StreamSendingState state) {
ScopedPacketFlusher flusher(this);
QUICHE_DCHECK(encryption_level >= ENCRYPTION_ZERO_RTT);
SetEncrypter(encryption_level,
std::make_unique<TaggingEncrypter>(encryption_level));
SetDefaultEncryptionLevel(encryption_level);
return SaveAndSendStreamData(id, data, offset, state);
}
QuicConsumedData SendStreamData3() {
return SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, transport_version()), "food", 0,
NO_FIN);
}
QuicConsumedData SendStreamData5() {
return SendStreamDataWithString(
GetNthClientInitiatedStreamId(2, transport_version()), "food2", 0,
NO_FIN);
}
QuicConsumedData EnsureWritableAndSendStreamData5() {
EXPECT_TRUE(CanWrite(HAS_RETRANSMITTABLE_DATA));
return SendStreamData5();
}
QuicConsumedData SendCryptoStreamData() {
QuicStreamOffset offset = 0;
absl::string_view data("chlo");
if (!QuicVersionUsesCryptoFrames(transport_version())) {
return SendCryptoDataWithString(data, offset);
}
producer_.SaveCryptoData(ENCRYPTION_INITIAL, offset, data);
size_t bytes_written;
if (notifier_) {
bytes_written =
notifier_->WriteCryptoData(ENCRYPTION_INITIAL, data.length(), offset);
} else {
bytes_written = QuicConnection::SendCryptoData(ENCRYPTION_INITIAL,
data.length(), offset);
}
return QuicConsumedData(bytes_written, false);
}
QuicConsumedData SendCryptoDataWithString(absl::string_view data,
QuicStreamOffset offset) {
return SendCryptoDataWithString(data, offset, ENCRYPTION_INITIAL);
}
QuicConsumedData SendCryptoDataWithString(absl::string_view data,
QuicStreamOffset offset,
EncryptionLevel encryption_level) {
if (!QuicVersionUsesCryptoFrames(transport_version())) {
return SendStreamDataWithString(
QuicUtils::GetCryptoStreamId(transport_version()), data, offset,
NO_FIN);
}
producer_.SaveCryptoData(encryption_level, offset, data);
size_t bytes_written;
if (notifier_) {
bytes_written =
notifier_->WriteCryptoData(encryption_level, data.length(), offset);
} else {
bytes_written = QuicConnection::SendCryptoData(encryption_level,
data.length(), offset);
}
return QuicConsumedData(bytes_written, false);
}
void set_version(ParsedQuicVersion version) {
QuicConnectionPeer::GetFramer(this)->set_version(version);
}
void SetSupportedVersions(const ParsedQuicVersionVector& versions) {
QuicConnectionPeer::GetFramer(this)->SetSupportedVersions(versions);
writer()->SetSupportedVersions(versions);
}
void set_perspective(Perspective perspective) {
writer()->set_perspective(perspective);
QuicConnectionPeer::ResetPeerIssuedConnectionIdManager(this);
QuicConnectionPeer::SetPerspective(this, perspective);
QuicSentPacketManagerPeer::SetPerspective(
QuicConnectionPeer::GetSentPacketManager(this), perspective);
QuicConnectionPeer::GetFramer(this)->SetInitialObfuscators(
TestConnectionId());
}
void EnablePathMtuDiscovery(MockSendAlgorithm* send_algorithm) {
ASSERT_EQ(Perspective::IS_SERVER, perspective());
if (GetQuicReloadableFlag(quic_enable_mtu_discovery_at_server)) {
OnConfigNegotiated();
} else {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kMTUH);
config.SetInitialReceivedConnectionOptions(connection_options);
EXPECT_CALL(*send_algorithm, SetFromConfig(_, _));
SetFromConfig(config);
}
EXPECT_CALL(*send_algorithm, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Infinite()));
}
TestAlarmFactory::TestAlarm* GetAckAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetAckAlarm(this));
}
TestAlarmFactory::TestAlarm* GetPingAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetPingAlarm(this));
}
TestAlarmFactory::TestAlarm* GetRetransmissionAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetRetransmissionAlarm(this));
}
TestAlarmFactory::TestAlarm* GetSendAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetSendAlarm(this));
}
TestAlarmFactory::TestAlarm* GetTimeoutAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetIdleNetworkDetectorAlarm(this));
}
TestAlarmFactory::TestAlarm* GetMtuDiscoveryAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetMtuDiscoveryAlarm(this));
}
TestAlarmFactory::TestAlarm* GetProcessUndecryptablePacketsAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetProcessUndecryptablePacketsAlarm(this));
}
TestAlarmFactory::TestAlarm* GetDiscardPreviousOneRttKeysAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetDiscardPreviousOneRttKeysAlarm(this));
}
TestAlarmFactory::TestAlarm* GetDiscardZeroRttDecryptionKeysAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetDiscardZeroRttDecryptionKeysAlarm(this));
}
TestAlarmFactory::TestAlarm* GetBlackholeDetectorAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetBlackholeDetectorAlarm(this));
}
TestAlarmFactory::TestAlarm* GetRetirePeerIssuedConnectionIdAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
QuicConnectionPeer::GetRetirePeerIssuedConnectionIdAlarm(this));
}
TestAlarmFactory::TestAlarm* GetRetireSelfIssuedConnectionIdAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
QuicConnectionPeer::GetRetireSelfIssuedConnectionIdAlarm(this));
}
TestAlarmFactory::TestAlarm* GetMultiPortProbingAlarm() {
return reinterpret_cast<TestAlarmFactory::TestAlarm*>(
&QuicConnectionPeer::GetMultiPortProbingAlarm(this));
}
void PathDegradingTimeout() {
QUICHE_DCHECK(PathDegradingDetectionInProgress());
GetBlackholeDetectorAlarm()->Fire();
}
bool PathDegradingDetectionInProgress() {
return QuicConnectionPeer::GetPathDegradingDeadline(this).IsInitialized();
}
bool BlackholeDetectionInProgress() {
return QuicConnectionPeer::GetBlackholeDetectionDeadline(this)
.IsInitialized();
}
bool PathMtuReductionDetectionInProgress() {
return QuicConnectionPeer::GetPathMtuReductionDetectionDeadline(this)
.IsInitialized();
}
QuicByteCount GetBytesInFlight() {
return QuicConnectionPeer::GetSentPacketManager(this)->GetBytesInFlight();
}
void set_notifier(SimpleSessionNotifier* notifier) { notifier_ = notifier; }
void ReturnEffectivePeerAddressForNextPacket(const QuicSocketAddress& addr) {
next_effective_peer_addr_ = std::make_unique<QuicSocketAddress>(addr);
}
void SendOrQueuePacket(SerializedPacket packet) override {
QuicConnection::SendOrQueuePacket(std::move(packet));
self_address_on_default_path_while_sending_packet_ = self_address();
}
QuicSocketAddress self_address_on_default_path_while_sending_packet() {
return self_address_on_default_path_while_sending_packet_;
}
SimpleDataProducer* producer() { return &producer_; }
using QuicConnection::active_effective_peer_migration_type;
using QuicConnection::IsCurrentPacketConnectivityProbing;
using QuicConnection::SelectMutualVersion;
using QuicConnection::set_defer_send_in_response_to_packets;
protected:
QuicSocketAddress GetEffectivePeerAddressFromCurrentPacket() const override {
if (next_effective_peer_addr_) {
return *std::move(next_effective_peer_addr_);
}
return QuicConnection::GetEffectivePeerAddressFromCurrentPacket();
}
private:
TestPacketWriter* writer() {
return static_cast<TestPacketWriter*>(QuicConnection::writer());
}
SimpleDataProducer producer_;
SimpleSessionNotifier* notifier_;
std::unique_ptr<QuicSocketAddress> next_effective_peer_addr_;
QuicSocketAddress self_address_on_default_path_while_sending_packet_;
uint32_t num_unlinkable_client_migration_ = 0;
uint32_t num_linkable_client_migration_ = 0;
};
enum class AckResponse { kDefer, kImmediate };
struct TestParams {
TestParams(ParsedQuicVersion version, AckResponse ack_response)
: version(version), ack_response(ack_response) {}
ParsedQuicVersion version;
AckResponse ack_response;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
ParsedQuicVersionToString(p.version), "_",
(p.ack_response == AckResponse::kDefer ? "defer" : "immediate"));
}
std::vector<TestParams> GetTestParams() {
QuicFlagSaver flags;
std::vector<TestParams> params;
ParsedQuicVersionVector all_supported_versions = AllSupportedVersions();
for (size_t i = 0; i < all_supported_versions.size(); ++i) {
for (AckResponse ack_response :
{AckResponse::kDefer, AckResponse::kImmediate}) {
params.push_back(TestParams(all_supported_versions[i], ack_response));
}
}
return params;
}
class QuicConnectionTest : public QuicTestWithParam<TestParams> {
public:
void SaveConnectionCloseFrame(const QuicConnectionCloseFrame& frame,
ConnectionCloseSource ) {
saved_connection_close_frame_ = frame;
connection_close_frame_count_++;
}
protected:
QuicConnectionTest()
: connection_id_(TestConnectionId()),
framer_(SupportedVersions(version()), QuicTime::Zero(),
Perspective::IS_CLIENT, connection_id_.length()),
send_algorithm_(new StrictMock<MockSendAlgorithm>),
loss_algorithm_(new MockLossAlgorithm()),
helper_(new TestConnectionHelper(&clock_, &random_generator_)),
alarm_factory_(new TestAlarmFactory()),
peer_framer_(SupportedVersions(version()), QuicTime::Zero(),
Perspective::IS_SERVER, connection_id_.length()),
peer_creator_(connection_id_, &peer_framer_,
nullptr),
writer_(
new TestPacketWriter(version(), &clock_, Perspective::IS_CLIENT)),
connection_(connection_id_, kSelfAddress, kPeerAddress, helper_.get(),
alarm_factory_.get(), writer_.get(), Perspective::IS_CLIENT,
version(), connection_id_generator_),
creator_(QuicConnectionPeer::GetPacketCreator(&connection_)),
manager_(QuicConnectionPeer::GetSentPacketManager(&connection_)),
frame1_(0, false, 0, absl::string_view(data1)),
frame2_(0, false, 3, absl::string_view(data2)),
crypto_frame_(ENCRYPTION_INITIAL, 0, absl::string_view(data1)),
packet_number_length_(PACKET_4BYTE_PACKET_NUMBER),
connection_id_included_(CONNECTION_ID_PRESENT),
notifier_(&connection_),
connection_close_frame_count_(0) {
QUIC_DVLOG(2) << "QuicConnectionTest(" << PrintToString(GetParam()) << ")";
connection_.set_defer_send_in_response_to_packets(GetParam().ack_response ==
AckResponse::kDefer);
framer_.SetInitialObfuscators(TestConnectionId());
connection_.InstallInitialCrypters(TestConnectionId());
CrypterPair crypters;
CryptoUtils::CreateInitialObfuscators(Perspective::IS_SERVER, version(),
TestConnectionId(), &crypters);
peer_creator_.SetEncrypter(ENCRYPTION_INITIAL,
std::move(crypters.encrypter));
if (version().KnowsWhichDecrypterToUse()) {
peer_framer_.InstallDecrypter(ENCRYPTION_INITIAL,
std::move(crypters.decrypter));
} else {
peer_framer_.SetDecrypter(ENCRYPTION_INITIAL,
std::move(crypters.decrypter));
}
for (EncryptionLevel level :
{ENCRYPTION_ZERO_RTT, ENCRYPTION_FORWARD_SECURE}) {
peer_creator_.SetEncrypter(level,
std::make_unique<TaggingEncrypter>(level));
}
QuicFramerPeer::SetLastSerializedServerConnectionId(
QuicConnectionPeer::GetFramer(&connection_), connection_id_);
QuicFramerPeer::SetLastWrittenPacketNumberLength(
QuicConnectionPeer::GetFramer(&connection_), packet_number_length_);
QuicStreamId stream_id;
if (QuicVersionUsesCryptoFrames(version().transport_version)) {
stream_id = QuicUtils::GetFirstBidirectionalStreamId(
version().transport_version, Perspective::IS_CLIENT);
} else {
stream_id = QuicUtils::GetCryptoStreamId(version().transport_version);
}
frame1_.stream_id = stream_id;
frame2_.stream_id = stream_id;
connection_.set_visitor(&visitor_);
connection_.SetSessionNotifier(¬ifier_);
connection_.set_notifier(¬ifier_);
connection_.SetSendAlgorithm(send_algorithm_);
connection_.SetLossAlgorithm(loss_algorithm_.get());
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnPacketNeutered(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_))
.Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, GetCongestionControlType())
.Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, GetCongestionControlType())
.Times(AnyNumber());
EXPECT_CALL(visitor_, WillingAndAbleToWrite())
.WillRepeatedly(
Invoke(¬ifier_, &SimpleSessionNotifier::WillingToWrite));
EXPECT_CALL(visitor_, OnPacketDecrypted(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnCanWrite())
.WillRepeatedly(Invoke(¬ifier_, &SimpleSessionNotifier::OnCanWrite));
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(false));
EXPECT_CALL(visitor_, OnCongestionWindowChange(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, MaybeBundleOpportunistically()).Times(AnyNumber());
EXPECT_CALL(visitor_, GetFlowControlSendWindowSize(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnOneRttPacketAcknowledged())
.Times(testing::AtMost(1));
EXPECT_CALL(*loss_algorithm_, GetLossTimeout())
.WillRepeatedly(Return(QuicTime::Zero()));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_START));
if (connection_.version().KnowsWhichDecrypterToUse()) {
connection_.InstallDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
} else {
connection_.SetAlternativeDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE),
false);
}
peer_creator_.SetDefaultPeerAddress(kSelfAddress);
}
QuicConnectionTest(const QuicConnectionTest&) = delete;
QuicConnectionTest& operator=(const QuicConnectionTest&) = delete;
ParsedQuicVersion version() { return GetParam().version; }
void SetClientConnectionId(const QuicConnectionId& client_connection_id) {
connection_.set_client_connection_id(client_connection_id);
writer_->framer()->framer()->SetExpectedClientConnectionIdLength(
client_connection_id.length());
}
void SetDecrypter(EncryptionLevel level,
std::unique_ptr<QuicDecrypter> decrypter) {
if (connection_.version().KnowsWhichDecrypterToUse()) {
connection_.InstallDecrypter(level, std::move(decrypter));
} else {
connection_.SetAlternativeDecrypter(level, std::move(decrypter), false);
}
}
void ProcessPacket(uint64_t number) {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(number);
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
}
void ProcessReceivedPacket(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
const QuicReceivedPacket& packet) {
connection_.ProcessUdpPacket(self_address, peer_address, packet);
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
}
QuicFrame MakeCryptoFrame() const {
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
return QuicFrame(new QuicCryptoFrame(crypto_frame_));
}
return QuicFrame(QuicStreamFrame(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), false,
0u, absl::string_view()));
}
void ProcessFramePacket(QuicFrame frame) {
ProcessFramePacketWithAddresses(frame, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
}
void ProcessFramePacketWithAddresses(QuicFrame frame,
QuicSocketAddress self_address,
QuicSocketAddress peer_address,
EncryptionLevel level) {
QuicFrames frames;
frames.push_back(QuicFrame(frame));
return ProcessFramesPacketWithAddresses(frames, self_address, peer_address,
level);
}
std::unique_ptr<QuicReceivedPacket> ConstructPacket(QuicFrames frames,
EncryptionLevel level,
char* buffer,
size_t buffer_len) {
QUICHE_DCHECK(peer_framer_.HasEncrypterOfEncryptionLevel(level));
peer_creator_.set_encryption_level(level);
QuicPacketCreatorPeer::SetSendVersionInPacket(
&peer_creator_,
level < ENCRYPTION_FORWARD_SECURE &&
connection_.perspective() == Perspective::IS_SERVER);
SerializedPacket serialized_packet =
QuicPacketCreatorPeer::SerializeAllFrames(&peer_creator_, frames,
buffer, buffer_len);
return std::make_unique<QuicReceivedPacket>(
serialized_packet.encrypted_buffer, serialized_packet.encrypted_length,
clock_.Now());
}
void ProcessFramesPacketWithAddresses(QuicFrames frames,
QuicSocketAddress self_address,
QuicSocketAddress peer_address,
EncryptionLevel level) {
char buffer[kMaxOutgoingPacketSize];
connection_.ProcessUdpPacket(
self_address, peer_address,
*ConstructPacket(std::move(frames), level, buffer,
kMaxOutgoingPacketSize));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
}
void ForceProcessFramePacket(QuicFrame frame) {
QuicFrames frames;
frames.push_back(QuicFrame(frame));
bool send_version = connection_.perspective() == Perspective::IS_SERVER;
if (connection_.version().KnowsWhichDecrypterToUse()) {
send_version = true;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(&peer_creator_, send_version);
QuicPacketHeader header;
QuicPacketCreatorPeer::FillPacketHeader(&peer_creator_, &header);
char encrypted_buffer[kMaxOutgoingPacketSize];
size_t length = peer_framer_.BuildDataPacket(
header, frames, encrypted_buffer, kMaxOutgoingPacketSize,
ENCRYPTION_INITIAL);
QUICHE_DCHECK_GT(length, 0u);
const size_t encrypted_length = peer_framer_.EncryptInPlace(
ENCRYPTION_INITIAL, header.packet_number,
GetStartOfEncryptedData(peer_framer_.version().transport_version,
header),
length, kMaxOutgoingPacketSize, encrypted_buffer);
QUICHE_DCHECK_GT(encrypted_length, 0u);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(encrypted_buffer, encrypted_length, clock_.Now()));
}
size_t ProcessFramePacketAtLevel(uint64_t number, QuicFrame frame,
EncryptionLevel level) {
return ProcessFramePacketAtLevelWithEcn(number, frame, level, ECN_NOT_ECT);
}
size_t ProcessFramePacketAtLevelWithEcn(uint64_t number, QuicFrame frame,
EncryptionLevel level,
QuicEcnCodepoint ecn_codepoint) {
QuicFrames frames;
frames.push_back(frame);
return ProcessFramesPacketAtLevelWithEcn(number, frames, level,
ecn_codepoint);
}
size_t ProcessFramesPacketAtLevel(uint64_t number, QuicFrames frames,
EncryptionLevel level) {
return ProcessFramesPacketAtLevelWithEcn(number, frames, level,
ECN_NOT_ECT);
}
size_t ProcessFramesPacketAtLevelWithEcn(uint64_t number,
const QuicFrames& frames,
EncryptionLevel level,
QuicEcnCodepoint ecn_codepoint) {
QuicPacketHeader header = ConstructPacketHeader(number, level);
peer_creator_.set_encryption_level(level);
if (level > ENCRYPTION_INITIAL) {
peer_framer_.SetEncrypter(level,
std::make_unique<TaggingEncrypter>(level));
if (connection_.version().KnowsWhichDecrypterToUse()) {
connection_.InstallDecrypter(
level, std::make_unique<StrictTaggingDecrypter>(level));
} else {
connection_.SetAlternativeDecrypter(
level, std::make_unique<StrictTaggingDecrypter>(level), false);
}
}
std::unique_ptr<QuicPacket> packet(ConstructPacket(header, frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(level, QuicPacketNumber(number), *packet,
buffer, kMaxOutgoingPacketSize);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false, 0,
true, nullptr, 0, false, ecn_codepoint));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
return encrypted_length;
}
struct PacketInfo {
PacketInfo(uint64_t packet_number, QuicFrames frames, EncryptionLevel level)
: packet_number(packet_number), frames(frames), level(level) {}
uint64_t packet_number;
QuicFrames frames;
EncryptionLevel level;
};
size_t ProcessCoalescedPacket(std::vector<PacketInfo> packets) {
return ProcessCoalescedPacket(packets, ECN_NOT_ECT);
}
size_t ProcessCoalescedPacket(std::vector<PacketInfo> packets,
QuicEcnCodepoint ecn_codepoint) {
char coalesced_buffer[kMaxOutgoingPacketSize];
size_t coalesced_size = 0;
bool contains_initial = false;
for (const auto& packet : packets) {
QuicPacketHeader header =
ConstructPacketHeader(packet.packet_number, packet.level);
peer_creator_.set_encryption_level(packet.level);
if (packet.level == ENCRYPTION_INITIAL) {
contains_initial = true;
}
EncryptionLevel level =
QuicPacketCreatorPeer::GetEncryptionLevel(&peer_creator_);
if (level > ENCRYPTION_INITIAL) {
peer_framer_.SetEncrypter(level,
std::make_unique<TaggingEncrypter>(level));
if (connection_.version().KnowsWhichDecrypterToUse()) {
connection_.InstallDecrypter(
level, std::make_unique<StrictTaggingDecrypter>(level));
} else {
connection_.SetDecrypter(
level, std::make_unique<StrictTaggingDecrypter>(level));
}
}
std::unique_ptr<QuicPacket> constructed_packet(
ConstructPacket(header, packet.frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
packet.level, QuicPacketNumber(packet.packet_number),
*constructed_packet, buffer, kMaxOutgoingPacketSize);
QUICHE_DCHECK_LE(coalesced_size + encrypted_length,
kMaxOutgoingPacketSize);
memcpy(coalesced_buffer + coalesced_size, buffer, encrypted_length);
coalesced_size += encrypted_length;
}
if (contains_initial) {
memset(coalesced_buffer + coalesced_size, '0',
kMaxOutgoingPacketSize - coalesced_size);
}
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(coalesced_buffer, coalesced_size, clock_.Now(),
false, 0, true, nullptr, 0, false, ecn_codepoint));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
return coalesced_size;
}
size_t ProcessDataPacket(uint64_t number) {
return ProcessDataPacketAtLevel(number, false, ENCRYPTION_FORWARD_SECURE);
}
size_t ProcessDataPacket(QuicPacketNumber packet_number) {
return ProcessDataPacketAtLevel(packet_number, false,
ENCRYPTION_FORWARD_SECURE);
}
size_t ProcessDataPacketAtLevel(QuicPacketNumber packet_number,
bool has_stop_waiting,
EncryptionLevel level) {
return ProcessDataPacketAtLevel(packet_number.ToUint64(), has_stop_waiting,
level);
}
size_t ProcessCryptoPacketAtLevel(uint64_t number, EncryptionLevel level) {
QuicPacketHeader header = ConstructPacketHeader(number, level);
QuicFrames frames;
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
frames.push_back(QuicFrame(&crypto_frame_));
} else {
frames.push_back(QuicFrame(frame1_));
}
if (level == ENCRYPTION_INITIAL) {
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
}
std::unique_ptr<QuicPacket> packet = ConstructPacket(header, frames);
char buffer[kMaxOutgoingPacketSize];
peer_creator_.set_encryption_level(level);
size_t encrypted_length =
peer_framer_.EncryptPayload(level, QuicPacketNumber(number), *packet,
buffer, kMaxOutgoingPacketSize);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
return encrypted_length;
}
size_t ProcessDataPacketAtLevel(uint64_t number, bool has_stop_waiting,
EncryptionLevel level) {
std::unique_ptr<QuicPacket> packet(
ConstructDataPacket(number, has_stop_waiting, level));
char buffer[kMaxOutgoingPacketSize];
peer_creator_.set_encryption_level(level);
size_t encrypted_length =
peer_framer_.EncryptPayload(level, QuicPacketNumber(number), *packet,
buffer, kMaxOutgoingPacketSize);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
return encrypted_length;
}
void ProcessClosePacket(uint64_t number) {
std::unique_ptr<QuicPacket> packet(ConstructClosePacket(number));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(number), *packet, buffer,
kMaxOutgoingPacketSize);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, QuicTime::Zero(), false));
}
QuicByteCount SendStreamDataToPeer(QuicStreamId id, absl::string_view data,
QuicStreamOffset offset,
StreamSendingState state,
QuicPacketNumber* last_packet) {
QuicByteCount packet_size = 0;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AnyNumber())
.WillRepeatedly(SaveArg<3>(&packet_size));
connection_.SendStreamDataWithString(id, data, offset, state);
if (last_packet != nullptr) {
*last_packet = creator_->packet_number();
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AnyNumber());
return packet_size;
}
void SendAckPacketToPeer() {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendAck();
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AnyNumber());
}
void SendRstStream(QuicStreamId id, QuicRstStreamErrorCode error,
QuicStreamOffset bytes_written) {
notifier_.WriteOrBufferRstStream(id, error, bytes_written);
connection_.OnStreamReset(id, error);
}
void SendPing() { notifier_.WriteOrBufferPing(); }
MessageStatus SendMessage(absl::string_view message) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
quiche::QuicheMemSlice slice(quiche::QuicheBuffer::Copy(
connection_.helper()->GetStreamSendBufferAllocator(), message));
return connection_.SendMessage(1, absl::MakeSpan(&slice, 1), false);
}
void ProcessAckPacket(uint64_t packet_number, QuicAckFrame* frame) {
if (packet_number > 1) {
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, packet_number - 1);
} else {
QuicPacketCreatorPeer::ClearPacketNumber(&peer_creator_);
}
ProcessFramePacket(QuicFrame(frame));
}
void ProcessAckPacket(QuicAckFrame* frame) {
ProcessFramePacket(QuicFrame(frame));
}
void ProcessStopWaitingPacket(QuicStopWaitingFrame frame) {
ProcessFramePacket(QuicFrame(frame));
}
size_t ProcessStopWaitingPacketAtLevel(uint64_t number,
QuicStopWaitingFrame frame,
EncryptionLevel ) {
return ProcessFramePacketAtLevel(number, QuicFrame(frame),
ENCRYPTION_ZERO_RTT);
}
void ProcessGoAwayPacket(QuicGoAwayFrame* frame) {
ProcessFramePacket(QuicFrame(frame));
}
bool IsMissing(uint64_t number) {
return IsAwaitingPacket(connection_.ack_frame(), QuicPacketNumber(number),
QuicPacketNumber());
}
std::unique_ptr<QuicPacket> ConstructPacket(const QuicPacketHeader& header,
const QuicFrames& frames) {
auto packet = BuildUnsizedDataPacket(&peer_framer_, header, frames);
EXPECT_NE(nullptr, packet.get());
return packet;
}
QuicPacketHeader ConstructPacketHeader(uint64_t number,
EncryptionLevel level) {
QuicPacketHeader header;
if (level < ENCRYPTION_FORWARD_SECURE) {
header.version_flag = true;
header.form = IETF_QUIC_LONG_HEADER_PACKET;
header.long_packet_type = EncryptionlevelToLongHeaderType(level);
if (QuicVersionHasLongHeaderLengths(
peer_framer_.version().transport_version)) {
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
if (header.long_packet_type == INITIAL) {
header.retry_token_length_length =
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
}
}
}
if (peer_framer_.perspective() == Perspective::IS_SERVER) {
header.source_connection_id = connection_id_;
header.source_connection_id_included = connection_id_included_;
header.destination_connection_id_included = CONNECTION_ID_ABSENT;
} else {
header.destination_connection_id = connection_id_;
header.destination_connection_id_included = connection_id_included_;
}
if (peer_framer_.perspective() == Perspective::IS_SERVER) {
if (!connection_.client_connection_id().IsEmpty()) {
header.destination_connection_id = connection_.client_connection_id();
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
} else {
header.destination_connection_id_included = CONNECTION_ID_ABSENT;
}
if (header.version_flag) {
header.source_connection_id = connection_id_;
header.source_connection_id_included = CONNECTION_ID_PRESENT;
if (GetParam().version.handshake_protocol == PROTOCOL_QUIC_CRYPTO &&
header.long_packet_type == ZERO_RTT_PROTECTED) {
header.nonce = &kTestDiversificationNonce;
}
}
}
header.packet_number_length = packet_number_length_;
header.packet_number = QuicPacketNumber(number);
return header;
}
std::unique_ptr<QuicPacket> ConstructDataPacket(uint64_t number,
bool has_stop_waiting,
EncryptionLevel level) {
QuicPacketHeader header = ConstructPacketHeader(number, level);
QuicFrames frames;
if (VersionHasIetfQuicFrames(version().transport_version) &&
(level == ENCRYPTION_INITIAL || level == ENCRYPTION_HANDSHAKE)) {
frames.push_back(QuicFrame(QuicPingFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(100)));
} else {
frames.push_back(QuicFrame(frame1_));
if (has_stop_waiting) {
frames.push_back(QuicFrame(stop_waiting_));
}
}
return ConstructPacket(header, frames);
}
std::unique_ptr<SerializedPacket> ConstructProbingPacket() {
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
if (VersionHasIetfQuicFrames(version().transport_version)) {
QuicPathFrameBuffer payload = {
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xfe}};
return QuicPacketCreatorPeer::
SerializePathChallengeConnectivityProbingPacket(&peer_creator_,
payload);
}
QUICHE_DCHECK(!GetQuicReloadableFlag(quic_ignore_gquic_probing));
return QuicPacketCreatorPeer::SerializeConnectivityProbingPacket(
&peer_creator_);
}
std::unique_ptr<QuicPacket> ConstructClosePacket(uint64_t number) {
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicPacketHeader header;
if (peer_framer_.perspective() == Perspective::IS_SERVER) {
header.source_connection_id = connection_id_;
header.destination_connection_id_included = CONNECTION_ID_ABSENT;
} else {
header.destination_connection_id = connection_id_;
header.destination_connection_id_included = CONNECTION_ID_ABSENT;
}
header.packet_number = QuicPacketNumber(number);
QuicErrorCode kQuicErrorCode = QUIC_PEER_GOING_AWAY;
QuicConnectionCloseFrame qccf(peer_framer_.transport_version(),
kQuicErrorCode, NO_IETF_QUIC_ERROR, "",
0);
QuicFrames frames;
frames.push_back(QuicFrame(&qccf));
return ConstructPacket(header, frames);
}
QuicTime::Delta DefaultRetransmissionTime() {
return QuicTime::Delta::FromMilliseconds(kDefaultRetransmissionTimeMs);
}
QuicTime::Delta DefaultDelayedAckTime() {
return QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
}
const QuicStopWaitingFrame InitStopWaitingFrame(uint64_t least_unacked) {
QuicStopWaitingFrame frame;
frame.least_unacked = QuicPacketNumber(least_unacked);
return frame;
}
QuicAckFrame ConstructAckFrame(uint64_t largest_acked, uint64_t missing) {
return ConstructAckFrame(QuicPacketNumber(largest_acked),
QuicPacketNumber(missing));
}
QuicAckFrame ConstructAckFrame(QuicPacketNumber largest_acked,
QuicPacketNumber missing) {
if (missing == QuicPacketNumber(1)) {
return InitAckFrame({{missing + 1, largest_acked + 1}});
}
return InitAckFrame(
{{QuicPacketNumber(1), missing}, {missing + 1, largest_acked + 1}});
}
void AckPacket(QuicPacketNumber arrived, QuicAckFrame* frame) {
EXPECT_FALSE(frame->packets.Contains(arrived));
frame->packets.Add(arrived);
}
void TriggerConnectionClose() {
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicAckFrame frame = InitAckFrame(10000);
ProcessAckPacket(1, &frame);
EXPECT_FALSE(QuicConnectionPeer::GetConnectionClosePacket(&connection_) ==
nullptr);
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_INVALID_ACK_DATA));
}
void BlockOnNextWrite() {
writer_->BlockOnNextWrite();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AtLeast(1));
}
void SimulateNextPacketTooLarge() { writer_->SimulateNextPacketTooLarge(); }
void ExpectNextPacketUnprocessable() {
writer_->ExpectNextPacketUnprocessable();
}
void AlwaysGetPacketTooLarge() { writer_->AlwaysGetPacketTooLarge(); }
void SetWritePauseTimeDelta(QuicTime::Delta delta) {
writer_->SetWritePauseTimeDelta(delta);
}
void CongestionBlockWrites() {
EXPECT_CALL(*send_algorithm_, CanSend(_))
.WillRepeatedly(testing::Return(false));
}
void CongestionUnblockWrites() {
EXPECT_CALL(*send_algorithm_, CanSend(_))
.WillRepeatedly(testing::Return(true));
}
void set_perspective(Perspective perspective) {
connection_.set_perspective(perspective);
if (perspective == Perspective::IS_SERVER) {
connection_.set_can_truncate_connection_ids(true);
QuicConnectionPeer::SetNegotiatedVersion(&connection_);
connection_.OnSuccessfulVersionNegotiation();
}
QuicFramerPeer::SetPerspective(&peer_framer_,
QuicUtils::InvertPerspective(perspective));
peer_framer_.SetInitialObfuscators(TestConnectionId());
for (EncryptionLevel level : {ENCRYPTION_ZERO_RTT, ENCRYPTION_HANDSHAKE,
ENCRYPTION_FORWARD_SECURE}) {
if (peer_framer_.HasEncrypterOfEncryptionLevel(level)) {
peer_creator_.SetEncrypter(level,
std::make_unique<TaggingEncrypter>(level));
}
}
}
void set_packets_between_probes_base(
const QuicPacketCount packets_between_probes_base) {
QuicConnectionPeer::ReInitializeMtuDiscoverer(
&connection_, packets_between_probes_base,
QuicPacketNumber(packets_between_probes_base));
}
bool IsDefaultTestConfiguration() {
TestParams p = GetParam();
return p.ack_response == AckResponse::kImmediate &&
p.version == AllSupportedVersions()[0];
}
void TestConnectionCloseQuicErrorCode(QuicErrorCode expected_code) {
EXPECT_FALSE(QuicConnectionPeer::GetConnectionClosePacket(&connection_) ==
nullptr);
const std::vector<QuicConnectionCloseFrame>& connection_close_frames =
writer_->connection_close_frames();
ASSERT_EQ(1u, connection_close_frames.size());
EXPECT_THAT(connection_close_frames[0].quic_error_code,
IsError(expected_code));
if (!VersionHasIetfQuicFrames(version().transport_version)) {
EXPECT_THAT(connection_close_frames[0].wire_error_code,
IsError(expected_code));
EXPECT_EQ(GOOGLE_QUIC_CONNECTION_CLOSE,
connection_close_frames[0].close_type);
return;
}
QuicErrorCodeToIetfMapping mapping =
QuicErrorCodeToTransportErrorCode(expected_code);
if (mapping.is_transport_close) {
EXPECT_EQ(IETF_QUIC_TRANSPORT_CONNECTION_CLOSE,
connection_close_frames[0].close_type);
} else {
EXPECT_EQ(IETF_QUIC_APPLICATION_CONNECTION_CLOSE,
connection_close_frames[0].close_type);
}
EXPECT_EQ(mapping.error_code, connection_close_frames[0].wire_error_code);
}
void MtuDiscoveryTestInit() {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_TRUE(connection_.connected());
}
void PathProbeTestInit(Perspective perspective,
bool receive_new_server_connection_id = true) {
set_perspective(perspective);
connection_.CreateConnectionIdManager();
EXPECT_EQ(connection_.perspective(), perspective);
if (perspective == Perspective::IS_SERVER) {
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
if (version().SupportsAntiAmplificationLimit() &&
perspective == Perspective::IS_SERVER) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
}
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 2);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
if (perspective == Perspective::IS_CLIENT &&
receive_new_server_connection_id && version().HasIetfQuicFrames()) {
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
connection_.OnNewConnectionIdFrame(frame);
}
}
void ServerHandlePreferredAddressInit() {
ASSERT_TRUE(GetParam().version.HasIetfQuicFrames());
set_perspective(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
SetQuicReloadableFlag(quic_use_received_client_addresses_cache, true);
EXPECT_CALL(visitor_, AllowSelfAddressChange())
.WillRepeatedly(Return(true));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
}
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 2);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
QuicConfig config;
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
connection_.set_expected_server_preferred_address(kServerPreferredAddress);
}
void ServerPreferredAddressInit(QuicConfig& config) {
ASSERT_EQ(Perspective::IS_CLIENT, connection_.perspective());
ASSERT_TRUE(version().HasIetfQuicFrames());
ASSERT_TRUE(connection_.self_address().host().IsIPv6());
const QuicConnectionId connection_id = TestConnectionId(17);
const StatelessResetToken reset_token =
QuicUtils::GenerateStatelessResetToken(connection_id);
connection_.CreateConnectionIdManager();
connection_.SendCryptoStreamData();
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame = InitAckFrame(1);
ProcessFramePacketAtLevel(1, QuicFrame(&frame), ENCRYPTION_INITIAL);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
config.SetConnectionOptionsToSend(QuicTagVector{kSPAD});
QuicConfigPeer::SetReceivedStatelessResetToken(&config,
kTestStatelessResetToken);
QuicConfigPeer::SetReceivedAlternateServerAddress(&config,
kServerPreferredAddress);
QuicConfigPeer::SetPreferredAddressConnectionIdAndToken(
&config, connection_id, reset_token);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
ASSERT_TRUE(
QuicConnectionPeer::GetReceivedServerPreferredAddress(&connection_)
.IsInitialized());
EXPECT_EQ(
kServerPreferredAddress,
QuicConnectionPeer::GetReceivedServerPreferredAddress(&connection_));
}
void ForceWillingAndAbleToWriteOnceForDeferSending() {
if (GetParam().ack_response == AckResponse::kDefer) {
EXPECT_CALL(visitor_, WillingAndAbleToWrite())
.WillOnce(Return(true))
.RetiresOnSaturation();
}
}
void TestClientRetryHandling(bool invalid_retry_tag,
bool missing_original_id_in_config,
bool wrong_original_id_in_config,
bool missing_retry_id_in_config,
bool wrong_retry_id_in_config);
void TestReplaceConnectionIdFromInitial();
QuicConnectionId connection_id_;
QuicFramer framer_;
MockSendAlgorithm* send_algorithm_;
std::unique_ptr<MockLossAlgorithm> loss_algorithm_;
MockClock clock_;
MockRandom random_generator_;
quiche::SimpleBufferAllocator buffer_allocator_;
std::unique_ptr<TestConnectionHelper> helper_;
std::unique_ptr<TestAlarmFactory> alarm_factory_;
QuicFramer peer_framer_;
QuicPacketCreator peer_creator_;
std::unique_ptr<TestPacketWriter> writer_;
TestConnection connection_;
QuicPacketCreator* creator_;
QuicSentPacketManager* manager_;
StrictMock<MockQuicConnectionVisitor> visitor_;
QuicStreamFrame frame1_;
QuicStreamFrame frame2_;
QuicCryptoFrame crypto_frame_;
QuicAckFrame ack_;
QuicStopWaitingFrame stop_waiting_;
QuicPacketNumberLength packet_number_length_;
QuicConnectionIdIncluded connection_id_included_;
SimpleSessionNotifier notifier_;
QuicConnectionCloseFrame saved_connection_close_frame_;
int connection_close_frame_count_;
MockConnectionIdGenerator connection_id_generator_;
};
INSTANTIATE_TEST_SUITE_P(QuicConnectionTests, QuicConnectionTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicConnectionTest, CloseErrorCodeTestTransport) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(
IETF_QUIC_PROTOCOL_VIOLATION, "Should be transport close",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(IETF_QUIC_PROTOCOL_VIOLATION);
}
TEST_P(QuicConnectionTest, CloseErrorCodeTestApplication) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(
QUIC_HEADERS_STREAM_DATA_DECOMPRESS_FAILURE,
"Should be application close",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_HEADERS_STREAM_DATA_DECOMPRESS_FAILURE);
}
TEST_P(QuicConnectionTest, SelfAddressChangeAtClient) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
EXPECT_TRUE(connection_.connected());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_));
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_));
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
QuicIpAddress host;
host.FromString("1.1.1.1");
QuicSocketAddress self_address(host, 123);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_));
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_));
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), self_address, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.connected());
EXPECT_NE(connection_.self_address(), self_address);
}
TEST_P(QuicConnectionTest, SelfAddressChangeAtServer) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
EXPECT_TRUE(connection_.connected());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_));
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_));
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
QuicIpAddress host;
host.FromString("1.1.1.1");
QuicSocketAddress self_address(host, 123);
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
EXPECT_CALL(visitor_, AllowSelfAddressChange()).WillOnce(Return(false));
ProcessFramePacketWithAddresses(MakeCryptoFrame(), self_address, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(1u, connection_.GetStats().packets_dropped);
}
TEST_P(QuicConnectionTest, AllowSelfAddressChangeToMappedIpv4AddressAtServer) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
EXPECT_TRUE(connection_.connected());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(3);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(3);
}
QuicIpAddress host;
host.FromString("1.1.1.1");
QuicSocketAddress self_address1(host, 443);
connection_.SetSelfAddress(self_address1);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), self_address1,
kPeerAddress, ENCRYPTION_INITIAL);
QuicIpAddress host2;
host2.FromString(
absl::StrCat("::ffff:", connection_.self_address().host().ToString()));
QuicSocketAddress self_address2(host2, connection_.self_address().port());
ProcessFramePacketWithAddresses(MakeCryptoFrame(), self_address2,
kPeerAddress, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.connected());
ProcessFramePacketWithAddresses(MakeCryptoFrame(), self_address1,
kPeerAddress, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, ClientAddressChangeAndPacketReordered) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
}
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 5);
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(),
23456);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kNewPeerAddress, ENCRYPTION_INITIAL);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 4);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, PeerPortChangeAtServer) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
QuicTime::Delta default_init_rtt = rtt_stats->initial_rtt();
rtt_stats->set_initial_rtt(default_init_rtt * 2);
EXPECT_EQ(2 * default_init_rtt, rtt_stats->initial_rtt());
QuicSentPacketManagerPeer::SetConsecutivePtoCount(manager_, 1);
EXPECT_EQ(1u, manager_->GetConsecutivePtoCount());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(Invoke(
[=, this]() { EXPECT_EQ(kPeerAddress, connection_.peer_address()); }))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
}));
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(2 * default_init_rtt, rtt_stats->initial_rtt());
EXPECT_EQ(1u, manager_->GetConsecutivePtoCount());
EXPECT_EQ(manager_->GetSendAlgorithm(), send_algorithm_);
if (version().HasIetfQuicFrames()) {
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
EXPECT_EQ(1u, connection_.GetStats().num_validated_peer_migration);
EXPECT_EQ(1u, connection_.num_linkable_client_migration());
}
}
TEST_P(QuicConnectionTest, PeerIpAddressChangeAtServer) {
set_perspective(Perspective::IS_SERVER);
if (!version().SupportsAntiAmplificationLimit() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
QuicConnectionPeer::SetAddressValidated(&connection_);
connection_.OnHandshakeComplete();
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k5RTO);
config.SetInitialReceivedConnectionOptions(connection_options);
QuicConfigPeer::SetNegotiated(&config, true);
QuicConfigPeer::SetReceivedOriginalConnectionId(&config,
connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(&config,
QuicConnectionId());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(Invoke(
[=, this]() { EXPECT_EQ(kPeerAddress, connection_.peer_address()); }))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
}));
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
connection_.SendStreamData3();
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_TRUE(connection_.BlackholeDetectionInProgress());
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.Times(0);
EXPECT_CALL(visitor_, OnCanWrite()).Times(AnyNumber());
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
EXPECT_FALSE(connection_.BlackholeDetectionInProgress());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(2u, writer_->packets_write_attempts());
EXPECT_FALSE(writer_->path_challenge_frames().empty());
QuicPathFrameBuffer payload =
writer_->path_challenge_frames().front().data_buffer;
EXPECT_NE(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
send_algorithm_ = new StrictMock<MockSendAlgorithm>();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_)).Times(AnyNumber());
connection_.SetSendAlgorithm(send_algorithm_);
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(1u,
connection_.GetStats().num_reverse_path_validtion_upon_migration);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
QuicAckFrame ack_frame = InitAckFrame(2);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramePacketWithAddresses(QuicFrame(&ack_frame), kSelfAddress,
kNewPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
QuicFrames frames3;
frames3.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
EXPECT_CALL(visitor_, MaybeSendAddressToken());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(testing::AtLeast(1u));
ProcessFramesPacketWithAddresses(frames3, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
connection_.SendCryptoDataWithString(std::string(1200, 'a'), 0);
EXPECT_EQ(1u, connection_.GetStats().num_validated_peer_migration);
EXPECT_EQ(1u, connection_.num_linkable_client_migration());
}
TEST_P(QuicConnectionTest, PeerIpAddressChangeAtServerWithMissingConnectionId) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
QuicConnectionId client_cid0 = TestConnectionId(1);
QuicConnectionId client_cid1 = TestConnectionId(3);
QuicConnectionId server_cid1;
SetClientConnectionId(client_cid0);
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
QuicConnectionPeer::SetAddressValidated(&connection_);
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
server_cid1 = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.OnHandshakeComplete();
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(2);
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
connection_.SendStreamData3();
EXPECT_EQ(1u, writer_->packets_write_attempts());
peer_creator_.SetServerConnectionId(server_cid1);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
EXPECT_CALL(visitor_, OnCanWrite()).Times(AnyNumber());
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
if (GetQuicFlag(quic_enforce_strict_amplification_factor)) {
frames2.push_back(QuicFrame(QuicPaddingFrame(-1)));
}
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(1u, writer_->packets_write_attempts());
QuicNewConnectionIdFrame new_cid_frame;
new_cid_frame.connection_id = client_cid1;
new_cid_frame.sequence_number = 1u;
new_cid_frame.retire_prior_to = 0u;
connection_.OnNewConnectionIdFrame(new_cid_frame);
connection_.SendStreamData3();
EXPECT_EQ(2u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, EffectivePeerAddressChangeAtServer) {
if (GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
const QuicSocketAddress kEffectivePeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 43210);
connection_.ReturnEffectivePeerAddressForNextPacket(kEffectivePeerAddress);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kEffectivePeerAddress, connection_.effective_peer_address());
const QuicSocketAddress kNewEffectivePeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 54321);
connection_.ReturnEffectivePeerAddressForNextPacket(kNewEffectivePeerAddress);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewEffectivePeerAddress, connection_.effective_peer_address());
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
if (GetParam().version.HasIetfQuicFrames()) {
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
EXPECT_EQ(1u, connection_.GetStats().num_validated_peer_migration);
EXPECT_EQ(1u, connection_.num_linkable_client_migration());
}
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
connection_.ReturnEffectivePeerAddressForNextPacket(kNewEffectivePeerAddress);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
if (!GetParam().version.HasIetfQuicFrames()) {
QuicAckFrame ack_frame = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramePacketWithAddresses(QuicFrame(&ack_frame), kSelfAddress,
kNewPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewEffectivePeerAddress, connection_.effective_peer_address());
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
}
const QuicSocketAddress kNewerEffectivePeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 65432);
const QuicSocketAddress kFinalPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 34567);
connection_.ReturnEffectivePeerAddressForNextPacket(
kNewerEffectivePeerAddress);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kFinalPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kFinalPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewerEffectivePeerAddress, connection_.effective_peer_address());
if (GetParam().version.HasIetfQuicFrames()) {
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
EXPECT_EQ(send_algorithm_,
connection_.sent_packet_manager().GetSendAlgorithm());
EXPECT_EQ(2u, connection_.GetStats().num_validated_peer_migration);
}
const QuicSocketAddress kNewestEffectivePeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 65430);
connection_.ReturnEffectivePeerAddressForNextPacket(
kNewestEffectivePeerAddress);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
if (!GetParam().version.HasIetfQuicFrames()) {
EXPECT_CALL(*send_algorithm_, OnConnectionMigration()).Times(1);
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kFinalPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kFinalPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewestEffectivePeerAddress, connection_.effective_peer_address());
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
if (GetParam().version.HasIetfQuicFrames()) {
EXPECT_NE(send_algorithm_,
connection_.sent_packet_manager().GetSendAlgorithm());
EXPECT_EQ(kFinalPeerAddress, writer_->last_write_peer_address());
EXPECT_FALSE(writer_->path_challenge_frames().empty());
EXPECT_EQ(0u, connection_.GetStats()
.num_peer_migration_while_validating_default_path);
EXPECT_TRUE(connection_.HasPendingPathValidation());
}
}
TEST_P(QuicConnectionTest, ConnectionMigrationWithPendingPaddingBytes) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicConnectionPeer::SetPeerAddress(&connection_, kPeerAddress);
QuicConnectionPeer::SetEffectivePeerAddress(&connection_, kPeerAddress);
QuicConnectionPeer::SetAddressValidated(&connection_);
QuicConnectionId new_cid;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
new_cid = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
packet_creator->FlushCurrentPacket();
packet_creator->AddPendingPadding(50u);
const QuicSocketAddress kPeerAddress3 =
QuicSocketAddress(QuicIpAddress::Loopback6(), 56789);
auto ack_frame = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
ProcessFramesPacketWithAddresses({QuicFrame(&ack_frame)}, kSelfAddress,
kPeerAddress3, ENCRYPTION_FORWARD_SECURE);
ASSERT_EQ(connection_.self_address_on_default_path_while_sending_packet()
.host()
.address_family(),
IpAddressFamily::IP_V6);
}
TEST_P(QuicConnectionTest,
ReversePathValidationResponseReceivedFromUnexpectedPeerAddress) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicConnectionPeer::SetPeerAddress(&connection_, kPeerAddress);
QuicConnectionPeer::SetEffectivePeerAddress(&connection_, kPeerAddress);
QuicConnectionPeer::SetAddressValidated(&connection_);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
QuicConnectionId new_cid;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
new_cid = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
const QuicSocketAddress kPeerAddress2 =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
peer_creator_.SetServerConnectionId(new_cid);
ProcessFramesPacketWithAddresses({QuicFrame(QuicPingFrame())}, kSelfAddress,
kPeerAddress2, ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(writer_->path_challenge_frames().empty());
QuicPathFrameBuffer reverse_path_challenge_payload =
writer_->path_challenge_frames().front().data_buffer;
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
const QuicSocketAddress kPeerAddress3 =
QuicSocketAddress(QuicIpAddress::Loopback6(), 56789);
auto ack_frame = InitAckFrame(1);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV4_TO_IPV6_CHANGE)).Times(1);
EXPECT_CALL(visitor_, MaybeSendAddressToken()).WillOnce(Invoke([this]() {
connection_.SendControlFrame(
QuicFrame(new QuicNewTokenFrame(1, "new_token")));
return true;
}));
ProcessFramesPacketWithAddresses(
{QuicFrame(QuicPathResponseFrame(0, reverse_path_challenge_payload)),
QuicFrame(&ack_frame)},
kSelfAddress, kPeerAddress3, ENCRYPTION_FORWARD_SECURE);
}
}
TEST_P(QuicConnectionTest, ReversePathValidationFailureAtServer) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
SetClientConnectionId(TestConnectionId(1));
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
QuicConnectionPeer::SetAddressValidated(&connection_);
QuicConnectionId client_cid0 = connection_.client_connection_id();
QuicConnectionId client_cid1 = TestConnectionId(2);
QuicConnectionId server_cid0 = connection_.connection_id();
QuicConnectionId server_cid1;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
server_cid1 = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.OnHandshakeComplete();
QuicNewConnectionIdFrame new_cid_frame;
new_cid_frame.connection_id = client_cid1;
new_cid_frame.sequence_number = 1u;
new_cid_frame.retire_prior_to = 0u;
connection_.OnNewConnectionIdFrame(new_cid_frame);
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(), client_cid0);
ASSERT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(Invoke(
[=, this]() { EXPECT_EQ(kPeerAddress, connection_.peer_address()); }))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
}));
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
EXPECT_CALL(*send_algorithm_, OnConnectionMigration()).Times(0);
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
QuicPaddingFrame padding;
frames2.push_back(QuicFrame(padding));
peer_creator_.SetServerConnectionId(server_cid1);
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
EXPECT_LT(0u, writer_->packets_write_attempts());
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_NE(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
const auto* alternative_path =
QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_EQ(default_path->client_connection_id, client_cid1);
EXPECT_EQ(default_path->server_connection_id, server_cid1);
EXPECT_EQ(alternative_path->client_connection_id, client_cid0);
EXPECT_EQ(alternative_path->server_connection_id, server_cid0);
EXPECT_EQ(packet_creator->GetDestinationConnectionId(), client_cid1);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid1);
for (size_t i = 0; i < QuicPathValidator::kMaxRetryTimes; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
ProcessFramesPacketWithAddresses(
{QuicFrame(QuicPingFrame()), QuicFrame(QuicPaddingFrame())}, kSelfAddress,
kNewPeerAddress, ENCRYPTION_FORWARD_SECURE);
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(default_path->client_connection_id, client_cid0);
EXPECT_EQ(default_path->server_connection_id, server_cid0);
EXPECT_TRUE(alternative_path->server_connection_id.IsEmpty());
EXPECT_FALSE(alternative_path->stateless_reset_token.has_value());
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(1u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_EQ(packet_creator->GetDestinationConnectionId(), client_cid0);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
}
TEST_P(QuicConnectionTest, ReceivePathProbeWithNoAddressChangeAtServer) {
if (!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
EXPECT_CALL(visitor_, OnPacketReceived(_, _, false)).Times(0);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessReceivedPacket(kSelfAddress, kPeerAddress, *received);
EXPECT_EQ(
num_probing_received + (GetParam().version.HasIetfQuicFrames() ? 1u : 0u),
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, BufferedMtuPacketTooBig) {
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(1);
writer_->SetWriteBlocked();
connection_.SendMtuDiscoveryPacket(kMaxOutgoingPacketSize);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
EXPECT_TRUE(writer_->IsWriteBlocked());
writer_->AlwaysGetPacketTooLarge();
writer_->SetWritable();
connection_.OnCanWrite();
}
TEST_P(QuicConnectionTest, WriteOutOfOrderQueuedPackets) {
if (!IsDefaultTestConfiguration()) {
return;
}
set_perspective(Perspective::IS_CLIENT);
BlockOnNextWrite();
QuicStreamId stream_id = 2;
connection_.SendStreamDataWithString(stream_id, "foo", 0, NO_FIN);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
writer_->SetWritable();
connection_.SendConnectivityProbingPacket(writer_.get(),
connection_.peer_address());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
connection_.OnCanWrite();
}
TEST_P(QuicConnectionTest, DiscardQueuedPacketsAfterConnectionClose) {
{
InSequence seq;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1));
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(AtLeast(1));
}
set_perspective(Perspective::IS_CLIENT);
writer_->SimulateNextPacketTooLarge();
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
EXPECT_FALSE(connection_.connected());
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_EQ(0u, connection_.GetStats().packets_discarded);
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.GetStats().packets_discarded);
}
class TestQuicPathValidationContext : public QuicPathValidationContext {
public:
TestQuicPathValidationContext(const QuicSocketAddress& self_address,
const QuicSocketAddress& peer_address,
QuicPacketWriter* writer)
: QuicPathValidationContext(self_address, peer_address),
writer_(writer) {}
QuicPacketWriter* WriterToUse() override { return writer_; }
private:
QuicPacketWriter* writer_;
};
class TestValidationResultDelegate : public QuicPathValidator::ResultDelegate {
public:
TestValidationResultDelegate(QuicConnection* connection,
const QuicSocketAddress& expected_self_address,
const QuicSocketAddress& expected_peer_address,
bool* success)
: QuicPathValidator::ResultDelegate(),
connection_(connection),
expected_self_address_(expected_self_address),
expected_peer_address_(expected_peer_address),
success_(success) {}
void OnPathValidationSuccess(
std::unique_ptr<QuicPathValidationContext> context,
QuicTime ) override {
EXPECT_EQ(expected_self_address_, context->self_address());
EXPECT_EQ(expected_peer_address_, context->peer_address());
*success_ = true;
}
void OnPathValidationFailure(
std::unique_ptr<QuicPathValidationContext> context) override {
EXPECT_EQ(expected_self_address_, context->self_address());
EXPECT_EQ(expected_peer_address_, context->peer_address());
if (connection_->perspective() == Perspective::IS_CLIENT) {
connection_->OnPathValidationFailureAtClient(false,
*context);
}
*success_ = false;
}
private:
QuicConnection* connection_;
QuicSocketAddress expected_self_address_;
QuicSocketAddress expected_peer_address_;
bool* success_;
};
class ServerPreferredAddressTestResultDelegate
: public QuicPathValidator::ResultDelegate {
public:
explicit ServerPreferredAddressTestResultDelegate(QuicConnection* connection)
: connection_(connection) {}
void OnPathValidationSuccess(
std::unique_ptr<QuicPathValidationContext> context,
QuicTime ) override {
connection_->OnServerPreferredAddressValidated(*context, false);
}
void OnPathValidationFailure(
std::unique_ptr<QuicPathValidationContext> context) override {
connection_->OnPathValidationFailureAtClient(false,
*context);
}
protected:
QuicConnection* connection() { return connection_; }
private:
QuicConnection* connection_;
};
TEST_P(QuicConnectionTest, ReceivePathProbingFromNewPeerAddressAtServer) {
if (!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
QuicPathFrameBuffer payload;
if (!GetParam().version.HasIetfQuicFrames()) {
EXPECT_CALL(visitor_,
OnPacketReceived(_, _, true))
.Times(1);
} else {
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->path_response_frames().size());
payload = writer_->path_challenge_frames().front().data_buffer;
}))
.WillRepeatedly(DoDefault());
}
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
23456);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessReceivedPacket(kSelfAddress, kNewPeerAddress, *received);
EXPECT_EQ(num_probing_received + 1,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
if (GetParam().version.HasIetfQuicFrames()) {
QuicByteCount bytes_sent =
QuicConnectionPeer::BytesSentOnAlternativePath(&connection_);
EXPECT_LT(0u, bytes_sent);
EXPECT_EQ(received->length(),
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_));
probing_packet = ConstructProbingPacket();
received.reset(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
ProcessReceivedPacket(kSelfAddress, kNewPeerAddress, *received);
EXPECT_EQ(num_probing_received + 2,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(2 * bytes_sent,
QuicConnectionPeer::BytesSentOnAlternativePath(&connection_));
EXPECT_EQ(2 * received->length(),
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_));
EXPECT_EQ(2 * bytes_sent,
QuicConnectionPeer::BytesSentOnAlternativePath(&connection_));
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
ProcessFramesPacketWithAddresses(frames, connection_.self_address(),
kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_LT(2 * received->length(),
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_));
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePathValidated(&connection_));
QuicSocketAddress kNewerPeerAddress(QuicIpAddress::Loopback4(),
34567);
probing_packet = ConstructProbingPacket();
received.reset(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
ProcessReceivedPacket(kSelfAddress, kNewerPeerAddress, *received);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePathValidated(&connection_));
}
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, ReceivePathProbingToPreferredAddressAtServer) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
ServerHandlePreferredAddressInit();
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(0);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(kSelfAddress.host(), writer_->last_write_source_address());
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
}));
ProcessReceivedPacket(kServerPreferredAddress, kPeerAddress, *received);
EXPECT_EQ(num_probing_received + 1,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_FALSE(QuicConnectionPeer::IsAlternativePath(
&connection_, kServerPreferredAddress, kPeerAddress));
EXPECT_NE(kServerPreferredAddress, connection_.self_address());
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
34567);
probing_packet = ConstructProbingPacket();
received.reset(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(kServerPreferredAddress.host(),
writer_->last_write_source_address());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
}));
ProcessReceivedPacket(kServerPreferredAddress, kNewPeerAddress, *received);
EXPECT_EQ(num_probing_received + 2,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(&connection_, kSelfAddress,
kNewPeerAddress));
EXPECT_LT(0u, QuicConnectionPeer::BytesSentOnAlternativePath(&connection_));
EXPECT_EQ(received->length(),
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_));
}
TEST_P(QuicConnectionTest, ReceivePaddedPingWithPortChangeAtServer) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
if (GetParam().version.UsesCryptoFrames()) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
if (GetParam().version.HasIetfQuicFrames() ||
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(0);
} else {
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
EXPECT_CALL(visitor_,
OnPacketReceived(_, _, true))
.Times(1);
}
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
QuicFrames frames;
QuicPingFrame ping_frame;
frames.push_back(QuicFrame(ping_frame));
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(padding_frame));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_INITIAL);
if (GetParam().version.HasIetfQuicFrames() ||
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
EXPECT_EQ(num_probing_received,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
} else {
EXPECT_EQ(num_probing_received + 1,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
if (GetParam().version.HasIetfQuicFrames() ||
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, ReceiveReorderedPathProbingAtServer) {
if (!GetParam().version.HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 4);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
if (!GetParam().version.HasIetfQuicFrames()) {
EXPECT_CALL(visitor_,
OnPacketReceived(_, _, true))
.Times(1);
} else {
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(0);
}
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessReceivedPacket(kSelfAddress, kNewPeerAddress, *received);
EXPECT_EQ(num_probing_received +
(!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)
? 0u
: 1u),
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ((!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)
? kNewPeerAddress
: kPeerAddress),
connection_.peer_address());
EXPECT_EQ((!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)
? kNewPeerAddress
: kPeerAddress),
connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, MigrateAfterProbingAtServer) {
if (!GetParam().version.HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
if (!GetParam().version.HasIetfQuicFrames()) {
EXPECT_CALL(visitor_,
OnPacketReceived(_, _, true))
.Times(1);
} else {
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(0);
}
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
ProcessReceivedPacket(kSelfAddress, kNewPeerAddress, *received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(1);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kNewPeerAddress, ENCRYPTION_INITIAL);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, ReceiveConnectivityProbingPacketAtClient) {
if (!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
PathProbeTestInit(Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessReceivedPacket(kSelfAddress, kPeerAddress, *received);
EXPECT_EQ(
num_probing_received + (GetParam().version.HasIetfQuicFrames() ? 1u : 0u),
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, ReceiveConnectivityProbingResponseAtClient) {
if (GetParam().version.HasIetfQuicFrames() ||
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
PathProbeTestInit(Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
if (!GetParam().version.HasIetfQuicFrames()) {
EXPECT_CALL(visitor_,
OnPacketReceived(_, _, true))
.Times(1);
} else {
EXPECT_CALL(visitor_, OnPacketReceived(_, _, _)).Times(0);
}
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessReceivedPacket(kNewSelfAddress, kPeerAddress, *received);
EXPECT_EQ(num_probing_received + 1,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest, PeerAddressChangeAtClient) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
set_perspective(Perspective::IS_CLIENT);
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
if (connection_.version().HasIetfQuicFrames()) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(2);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(2);
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kNewPeerAddress, ENCRYPTION_INITIAL);
if (connection_.version().HasIetfQuicFrames()) {
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
} else {
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
}
}
TEST_P(QuicConnectionTest, NoNormalizedPeerAddressChangeAtClient) {
if (!version().HasIetfQuicFrames()) {
return;
}
QuicIpAddress peer_ip;
peer_ip.FromString("1.1.1.1");
QuicSocketAddress peer_addr = QuicSocketAddress(peer_ip, 443);
QuicSocketAddress dualstack_peer_addr =
QuicSocketAddress(peer_addr.host().DualStacked(), peer_addr.port());
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_)).Times(AnyNumber());
set_perspective(Perspective::IS_CLIENT);
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
QuicConnectionPeer::SetDirectPeerAddress(&connection_, dualstack_peer_addr);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, peer_addr,
ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.connected());
if (GetQuicReloadableFlag(quic_test_peer_addr_change_after_normalize)) {
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
} else {
EXPECT_EQ(1u, connection_.GetStats().packets_dropped);
}
}
TEST_P(QuicConnectionTest, ServerAddressChangesToKnownAddress) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
set_perspective(Perspective::IS_CLIENT);
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(3);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
connection_.AddKnownServerAddress(kNewPeerAddress);
EXPECT_CALL(visitor_, OnConnectionMigration(_)).Times(0);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kNewPeerAddress, ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
}
TEST_P(QuicConnectionTest,
PeerAddressChangesToPreferredAddressBeforeClientInitiates) {
if (!version().HasIetfQuicFrames()) {
return;
}
ASSERT_EQ(Perspective::IS_CLIENT, connection_.perspective());
ASSERT_TRUE(connection_.self_address().host().IsIPv6());
const QuicConnectionId connection_id = TestConnectionId(17);
const StatelessResetToken reset_token =
QuicUtils::GenerateStatelessResetToken(connection_id);
connection_.CreateConnectionIdManager();
connection_.SendCryptoStreamData();
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame = InitAckFrame(1);
ProcessFramePacketAtLevel(1, QuicFrame(&frame), ENCRYPTION_INITIAL);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicConfig config;
config.SetConnectionOptionsToSend(QuicTagVector{kSPAD});
QuicConfigPeer::SetReceivedStatelessResetToken(&config,
kTestStatelessResetToken);
QuicConfigPeer::SetReceivedAlternateServerAddress(&config,
kServerPreferredAddress);
QuicConfigPeer::SetPreferredAddressConnectionIdAndToken(
&config, connection_id, reset_token);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
ASSERT_TRUE(
QuicConnectionPeer::GetReceivedServerPreferredAddress(&connection_)
.IsInitialized());
EXPECT_EQ(
kServerPreferredAddress,
QuicConnectionPeer::GetReceivedServerPreferredAddress(&connection_));
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(0);
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress,
kServerPreferredAddress, ENCRYPTION_INITIAL);
}
TEST_P(QuicConnectionTest, MaxPacketSize) {
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
EXPECT_EQ(1250u, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, PeerLowersMaxPacketSize) {
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
constexpr uint32_t kTestMaxPacketSize = 1233u;
QuicConfig config;
QuicConfigPeer::SetReceivedMaxPacketSize(&config, kTestMaxPacketSize);
connection_.SetFromConfig(config);
EXPECT_EQ(kTestMaxPacketSize, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, PeerCannotRaiseMaxPacketSize) {
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
constexpr uint32_t kTestMaxPacketSize = 1450u;
QuicConfig config;
QuicConfigPeer::SetReceivedMaxPacketSize(&config, kTestMaxPacketSize);
connection_.SetFromConfig(config);
EXPECT_EQ(kDefaultMaxPacketSize, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, SmallerServerMaxPacketSize) {
TestConnection connection(TestConnectionId(), kSelfAddress, kPeerAddress,
helper_.get(), alarm_factory_.get(), writer_.get(),
Perspective::IS_SERVER, version(),
connection_id_generator_);
EXPECT_EQ(Perspective::IS_SERVER, connection.perspective());
EXPECT_EQ(1000u, connection.max_packet_length());
}
TEST_P(QuicConnectionTest, LowerServerResponseMtuTest) {
set_perspective(Perspective::IS_SERVER);
connection_.SetMaxPacketLength(1000);
EXPECT_EQ(1000u, connection_.max_packet_length());
SetQuicFlag(quic_use_lower_server_response_mtu_for_test, true);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(::testing::AtMost(1));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(::testing::AtMost(1));
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_EQ(1250u, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, IncreaseServerMaxPacketSize) {
set_perspective(Perspective::IS_SERVER);
connection_.SetMaxPacketLength(1000);
QuicPacketHeader header;
header.destination_connection_id = connection_id_;
header.version_flag = true;
header.packet_number = QuicPacketNumber(12);
if (QuicVersionHasLongHeaderLengths(
peer_framer_.version().transport_version)) {
header.long_packet_type = INITIAL;
header.retry_token_length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
QuicFrames frames;
QuicPaddingFrame padding;
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
frames.push_back(QuicFrame(&crypto_frame_));
} else {
frames.push_back(QuicFrame(frame1_));
}
frames.push_back(QuicFrame(padding));
std::unique_ptr<QuicPacket> packet(ConstructPacket(header, frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(ENCRYPTION_INITIAL, QuicPacketNumber(12),
*packet, buffer, kMaxOutgoingPacketSize);
EXPECT_EQ(kMaxOutgoingPacketSize,
encrypted_length +
(connection_.version().KnowsWhichDecrypterToUse() ? 0 : 4));
framer_.set_version(version());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
}
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.ApproximateNow(),
false));
EXPECT_EQ(kMaxOutgoingPacketSize,
connection_.max_packet_length() +
(connection_.version().KnowsWhichDecrypterToUse() ? 0 : 4));
}
TEST_P(QuicConnectionTest, IncreaseServerMaxPacketSizeWhileWriterLimited) {
const QuicByteCount lower_max_packet_size = 1240;
writer_->set_max_packet_size(lower_max_packet_size);
set_perspective(Perspective::IS_SERVER);
connection_.SetMaxPacketLength(1000);
EXPECT_EQ(1000u, connection_.max_packet_length());
QuicPacketHeader header;
header.destination_connection_id = connection_id_;
header.version_flag = true;
header.packet_number = QuicPacketNumber(12);
if (QuicVersionHasLongHeaderLengths(
peer_framer_.version().transport_version)) {
header.long_packet_type = INITIAL;
header.retry_token_length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
QuicFrames frames;
QuicPaddingFrame padding;
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
frames.push_back(QuicFrame(&crypto_frame_));
} else {
frames.push_back(QuicFrame(frame1_));
}
frames.push_back(QuicFrame(padding));
std::unique_ptr<QuicPacket> packet(ConstructPacket(header, frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(ENCRYPTION_INITIAL, QuicPacketNumber(12),
*packet, buffer, kMaxOutgoingPacketSize);
EXPECT_EQ(kMaxOutgoingPacketSize,
encrypted_length +
(connection_.version().KnowsWhichDecrypterToUse() ? 0 : 4));
framer_.set_version(version());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
}
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.ApproximateNow(),
false));
EXPECT_EQ(lower_max_packet_size, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, LimitMaxPacketSizeByWriter) {
const QuicByteCount lower_max_packet_size = 1240;
writer_->set_max_packet_size(lower_max_packet_size);
static_assert(lower_max_packet_size < kDefaultMaxPacketSize,
"Default maximum packet size is too low");
connection_.SetMaxPacketLength(kDefaultMaxPacketSize);
EXPECT_EQ(lower_max_packet_size, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, LimitMaxPacketSizeByWriterForNewConnection) {
const QuicConnectionId connection_id = TestConnectionId(17);
const QuicByteCount lower_max_packet_size = 1240;
writer_->set_max_packet_size(lower_max_packet_size);
TestConnection connection(connection_id, kSelfAddress, kPeerAddress,
helper_.get(), alarm_factory_.get(), writer_.get(),
Perspective::IS_CLIENT, version(),
connection_id_generator_);
EXPECT_EQ(Perspective::IS_CLIENT, connection.perspective());
EXPECT_EQ(lower_max_packet_size, connection.max_packet_length());
}
TEST_P(QuicConnectionTest, PacketsInOrder) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(1);
EXPECT_EQ(QuicPacketNumber(1u), LargestAcked(connection_.ack_frame()));
EXPECT_EQ(1u, connection_.ack_frame().packets.NumIntervals());
ProcessPacket(2);
EXPECT_EQ(QuicPacketNumber(2u), LargestAcked(connection_.ack_frame()));
EXPECT_EQ(1u, connection_.ack_frame().packets.NumIntervals());
ProcessPacket(3);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_EQ(1u, connection_.ack_frame().packets.NumIntervals());
}
TEST_P(QuicConnectionTest, PacketsOutOfOrder) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(3);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_TRUE(IsMissing(2));
EXPECT_TRUE(IsMissing(1));
ProcessPacket(2);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_FALSE(IsMissing(2));
EXPECT_TRUE(IsMissing(1));
ProcessPacket(1);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_FALSE(IsMissing(2));
EXPECT_FALSE(IsMissing(1));
}
TEST_P(QuicConnectionTest, DuplicatePacket) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(3);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_TRUE(IsMissing(2));
EXPECT_TRUE(IsMissing(1));
ProcessDataPacket(3);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_TRUE(IsMissing(2));
EXPECT_TRUE(IsMissing(1));
}
TEST_P(QuicConnectionTest, PacketsOutOfOrderWithAdditionsAndLeastAwaiting) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(3);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_TRUE(IsMissing(2));
EXPECT_TRUE(IsMissing(1));
ProcessPacket(2);
EXPECT_EQ(QuicPacketNumber(3u), LargestAcked(connection_.ack_frame()));
EXPECT_TRUE(IsMissing(1));
ProcessPacket(5);
EXPECT_EQ(QuicPacketNumber(5u), LargestAcked(connection_.ack_frame()));
EXPECT_TRUE(IsMissing(1));
EXPECT_TRUE(IsMissing(4));
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessAckPacket(6, &frame);
SendAckPacketToPeer();
EXPECT_TRUE(IsMissing(4));
}
TEST_P(QuicConnectionTest, RejectUnencryptedStreamData) {
if (!IsDefaultTestConfiguration() ||
VersionHasIetfQuicFrames(version().transport_version)) {
return;
}
frame1_.stream_id = 3;
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_QUIC_PEER_BUG(ProcessDataPacketAtLevel(1, false, ENCRYPTION_INITIAL),
"");
TestConnectionCloseQuicErrorCode(QUIC_UNENCRYPTED_STREAM_DATA);
}
TEST_P(QuicConnectionTest, OutOfOrderReceiptCausesAckSend) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(3);
EXPECT_EQ(0u, writer_->packets_write_attempts());
ProcessPacket(2);
EXPECT_EQ(1u, writer_->packets_write_attempts());
ProcessPacket(1);
EXPECT_EQ(2u, writer_->packets_write_attempts());
ProcessPacket(4);
EXPECT_EQ(2u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, OutOfOrderAckReceiptCausesNoAck) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
SendStreamDataToPeer(1, "bar", 3, NO_FIN, nullptr);
EXPECT_EQ(2u, writer_->packets_write_attempts());
QuicAckFrame ack1 = InitAckFrame(1);
QuicAckFrame ack2 = InitAckFrame(2);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
if (connection_.SupportsMultiplePacketNumberSpaces()) {
EXPECT_CALL(visitor_, OnOneRttPacketAcknowledged()).Times(1);
}
ProcessAckPacket(2, &ack2);
EXPECT_EQ(2u, writer_->packets_write_attempts());
if (connection_.SupportsMultiplePacketNumberSpaces()) {
EXPECT_CALL(visitor_, OnOneRttPacketAcknowledged()).Times(0);
}
ProcessAckPacket(1, &ack1);
EXPECT_EQ(2u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, AckReceiptCausesAckSend) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicPacketNumber original, second;
QuicByteCount packet_size =
SendStreamDataToPeer(3, "foo", 0, NO_FIN, &original);
SendStreamDataToPeer(3, "bar", 3, NO_FIN, &second);
QuicAckFrame frame = InitAckFrame({{second, second + 1}});
LostPacketVector lost_packets;
lost_packets.push_back(LostPacket(original, kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicPacketNumber retransmission;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, packet_size, _))
.WillOnce(SaveArg<2>(&retransmission));
ProcessAckPacket(&frame);
QuicAckFrame frame2 = ConstructAckFrame(retransmission, original);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
ProcessAckPacket(&frame2);
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, HAS_RETRANSMITTABLE_DATA));
connection_.SendStreamDataWithString(3, "foo", 6, NO_FIN);
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.Times(AnyNumber());
ProcessAckPacket(&frame2);
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, HAS_RETRANSMITTABLE_DATA));
connection_.SendStreamDataWithString(3, "foofoofoo", 9, NO_FIN);
EXPECT_EQ(1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_TRUE(writer_->ack_frames().empty());
AckPacket(original, &frame2);
ProcessAckPacket(&frame2);
ProcessAckPacket(&frame2);
}
TEST_P(QuicConnectionTest, AckFrequencyUpdatedFromAckFrequencyFrame) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
connection_.set_can_receive_ack_frequency_frame();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(13);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicAckFrequencyFrame ack_frequency_frame;
ack_frequency_frame.packet_tolerance = 3;
ProcessFramePacketAtLevel(1, QuicFrame(&ack_frequency_frame),
ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(38);
for (size_t i = 2; i <= 39; ++i) {
ProcessDataPacket(i);
}
}
TEST_P(QuicConnectionTest, AckDecimationReducesAcks) {
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame()).Times(AnyNumber());
connection_.set_min_received_before_ack_decimation(10);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(30);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(6);
for (size_t i = 1; i <= 29; ++i) {
ProcessDataPacket(i);
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessDataPacket(30);
}
TEST_P(QuicConnectionTest, AckNeedsRetransmittableFrames) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(99);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(19);
for (size_t i = 1; i <= 39; ++i) {
ProcessDataPacket(i);
}
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame())
.WillOnce(Invoke([this]() {
connection_.SendControlFrame(QuicFrame(QuicWindowUpdateFrame(1, 0, 0)));
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_EQ(0u, writer_->window_update_frames().size());
ProcessDataPacket(40);
EXPECT_EQ(1u, writer_->window_update_frames().size());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(9);
for (size_t i = 41; i <= 59; ++i) {
ProcessDataPacket(i);
}
SendStreamDataToPeer(
QuicUtils::GetFirstBidirectionalStreamId(
connection_.version().transport_version, Perspective::IS_CLIENT),
"bar", 0, NO_FIN, nullptr);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(19);
for (size_t i = 60; i <= 98; ++i) {
ProcessDataPacket(i);
EXPECT_EQ(0u, writer_->window_update_frames().size());
}
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame())
.WillOnce(Invoke([this]() {
connection_.SendControlFrame(QuicFrame(QuicPingFrame(1)));
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_EQ(0u, writer_->ping_frames().size());
ProcessDataPacket(99);
EXPECT_EQ(0u, writer_->window_update_frames().size());
EXPECT_EQ(1u, writer_->ping_frames().size());
}
TEST_P(QuicConnectionTest, AckNeedsRetransmittableFramesAfterPto) {
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kEACK);
config.SetConnectionOptionsToSend(connection_options);
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(10);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(4);
for (size_t i = 1; i <= 9; ++i) {
ProcessDataPacket(i);
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
SendPing();
QuicTime retransmission_time =
connection_.GetRetransmissionAlarm()->deadline();
clock_.AdvanceTime(retransmission_time - clock_.Now());
connection_.GetRetransmissionAlarm()->Fire();
ASSERT_LT(0u, manager_->GetConsecutivePtoCount());
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame())
.WillOnce(Invoke([this]() {
connection_.SendControlFrame(QuicFrame(QuicWindowUpdateFrame(1, 0, 0)));
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessDataPacket(11);
EXPECT_EQ(1u, writer_->window_update_frames().size());
}
TEST_P(QuicConnectionTest, TooManySentPackets) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicPacketCount max_tracked_packets = 50;
QuicConnectionPeer::SetMaxTrackedPackets(&connection_, max_tracked_packets);
const int num_packets = max_tracked_packets + 5;
for (int i = 0; i < num_packets; ++i) {
SendStreamDataToPeer(1, "foo", 3 * i, NO_FIN, nullptr);
}
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
ProcessFramePacket(QuicFrame(QuicPingFrame()));
TestConnectionCloseQuicErrorCode(QUIC_TOO_MANY_OUTSTANDING_SENT_PACKETS);
}
TEST_P(QuicConnectionTest, LargestObservedLower) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
SendStreamDataToPeer(1, "bar", 3, NO_FIN, nullptr);
SendStreamDataToPeer(1, "eep", 6, NO_FIN, nullptr);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame1 = InitAckFrame(1);
QuicAckFrame frame2 = InitAckFrame(2);
ProcessAckPacket(&frame2);
EXPECT_CALL(visitor_, OnCanWrite()).Times(AnyNumber());
ProcessAckPacket(&frame1);
}
TEST_P(QuicConnectionTest, AckUnsentData) {
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(visitor_, OnCanWrite()).Times(0);
ProcessAckPacket(&frame);
TestConnectionCloseQuicErrorCode(QUIC_INVALID_ACK_DATA);
}
TEST_P(QuicConnectionTest, BasicSending) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
const QuicConnectionStats& stats = connection_.GetStats();
EXPECT_FALSE(stats.first_decrypted_packet.IsInitialized());
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(1);
EXPECT_EQ(QuicPacketNumber(1), stats.first_decrypted_packet);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 2);
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(1u), last_packet);
SendAckPacketToPeer();
SendAckPacketToPeer();
SendStreamDataToPeer(1, "bar", 3, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(4u), last_packet);
SendAckPacketToPeer();
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame = InitAckFrame(3);
ProcessAckPacket(&frame);
SendAckPacketToPeer();
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame2 = InitAckFrame(6);
ProcessAckPacket(&frame2);
EXPECT_EQ(QuicPacketNumber(6u), writer_->header().packet_number);
SendAckPacketToPeer();
SendStreamDataToPeer(1, "eep", 6, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(8u), last_packet);
SendAckPacketToPeer();
EXPECT_EQ(QuicPacketNumber(1), stats.first_decrypted_packet);
}
TEST_P(QuicConnectionTest, RecordSentTimeBeforePacketSent) {
QuicTime actual_recorded_send_time = QuicTime::Zero();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<0>(&actual_recorded_send_time));
QuicTime expected_recorded_send_time = clock_.Now();
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_EQ(expected_recorded_send_time, actual_recorded_send_time)
<< "Expected time = " << expected_recorded_send_time.ToDebuggingValue()
<< ". Actual time = " << actual_recorded_send_time.ToDebuggingValue();
actual_recorded_send_time = QuicTime::Zero();
const QuicTime::Delta write_pause_time_delta =
QuicTime::Delta::FromMilliseconds(5000);
SetWritePauseTimeDelta(write_pause_time_delta);
expected_recorded_send_time = clock_.Now();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<0>(&actual_recorded_send_time));
connection_.SendStreamDataWithString(2, "baz", 0, NO_FIN);
EXPECT_EQ(expected_recorded_send_time, actual_recorded_send_time)
<< "Expected time = " << expected_recorded_send_time.ToDebuggingValue()
<< ". Actual time = " << actual_recorded_send_time.ToDebuggingValue();
}
TEST_P(QuicConnectionTest, ConnectionStatsRetransmission_WithRetransmissions) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(1, connection_.transport_version()),
"helloworld", 0, NO_FIN, PTO_RETRANSMISSION);
connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(2, connection_.transport_version()),
"helloworld", 0, NO_FIN, LOSS_RETRANSMISSION);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
}
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(2u, writer_->frame_count());
for (auto& frame : writer_->stream_frames()) {
EXPECT_EQ(frame->data_length, 10u);
}
ASSERT_EQ(connection_.GetStats().packets_retransmitted, 1u);
ASSERT_GE(connection_.GetStats().bytes_retransmitted, 20u);
}
TEST_P(QuicConnectionTest, ConnectionStatsRetransmission_WithMixedFrames) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(1, connection_.transport_version()),
"helloworld", 0, NO_FIN, PTO_RETRANSMISSION);
connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(2, connection_.transport_version()),
"helloworld", 0, NO_FIN, NOT_RETRANSMISSION);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
}
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(2u, writer_->frame_count());
for (auto& frame : writer_->stream_frames()) {
EXPECT_EQ(frame->data_length, 10u);
}
ASSERT_EQ(connection_.GetStats().packets_retransmitted, 1u);
ASSERT_GE(connection_.GetStats().bytes_retransmitted, 10u);
}
TEST_P(QuicConnectionTest, ConnectionStatsRetransmission_NoRetransmission) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(1, connection_.transport_version()),
"helloworld", 0, NO_FIN, NOT_RETRANSMISSION);
connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(2, connection_.transport_version()),
"helloworld", 0, NO_FIN, NOT_RETRANSMISSION);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
}
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(2u, writer_->frame_count());
ASSERT_EQ(connection_.GetStats().packets_retransmitted, 0u);
ASSERT_EQ(connection_.GetStats().bytes_retransmitted, 0u);
}
TEST_P(QuicConnectionTest, FramePacking) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendStreamData3();
connection_.SendStreamData5();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
}
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(2u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_TRUE(writer_->ack_frames().empty());
ASSERT_EQ(2u, writer_->stream_frames().size());
EXPECT_EQ(GetNthClientInitiatedStreamId(1, connection_.transport_version()),
writer_->stream_frames()[0]->stream_id);
EXPECT_EQ(GetNthClientInitiatedStreamId(2, connection_.transport_version()),
writer_->stream_frames()[1]->stream_id);
}
TEST_P(QuicConnectionTest, FramePackingNonCryptoThenCrypto) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
{
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendStreamData3();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
if (!connection_.version().KnowsWhichDecrypterToUse()) {
writer_->framer()->framer()->SetAlternativeDecrypter(
ENCRYPTION_INITIAL,
std::make_unique<NullDecrypter>(Perspective::IS_SERVER), false);
}
connection_.SendCryptoStreamData();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
}
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_LE(2u, writer_->frame_count());
ASSERT_LE(1u, writer_->padding_frames().size());
if (!QuicVersionUsesCryptoFrames(connection_.transport_version())) {
ASSERT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(QuicUtils::GetCryptoStreamId(connection_.transport_version()),
writer_->stream_frames()[0]->stream_id);
} else {
EXPECT_LE(1u, writer_->crypto_frames().size());
}
}
TEST_P(QuicConnectionTest, FramePackingCryptoThenNonCrypto) {
{
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendCryptoStreamData();
connection_.SendStreamData3();
}
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
ASSERT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(GetNthClientInitiatedStreamId(1, connection_.transport_version()),
writer_->stream_frames()[0]->stream_id);
}
TEST_P(QuicConnectionTest, FramePackingAckResponse) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
}
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
QuicPacketNumber last_packet;
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
connection_.SendCryptoDataWithString("foo", 0);
} else {
SendStreamDataToPeer(
QuicUtils::GetCryptoStreamId(connection_.transport_version()), "foo", 0,
NO_FIN, &last_packet);
}
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_CALL(visitor_, OnCanWrite())
.WillOnce(DoAll(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendStreamData3)),
IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendStreamData5))));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessDataPacket(2);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(3u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(writer_->ack_frames().empty());
ASSERT_EQ(2u, writer_->stream_frames().size());
EXPECT_EQ(GetNthClientInitiatedStreamId(1, connection_.transport_version()),
writer_->stream_frames()[0]->stream_id);
EXPECT_EQ(GetNthClientInitiatedStreamId(2, connection_.transport_version()),
writer_->stream_frames()[1]->stream_id);
}
TEST_P(QuicConnectionTest, FramePackingSendv) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
connection_.transport_version(), Perspective::IS_CLIENT);
connection_.SaveAndSendStreamData(stream_id, "ABCDEF", 0, NO_FIN);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(0u, writer_->padding_frames().size());
QuicStreamFrame* frame = writer_->stream_frames()[0].get();
EXPECT_EQ(stream_id, frame->stream_id);
EXPECT_EQ("ABCDEF",
absl::string_view(frame->data_buffer, frame->data_length));
}
TEST_P(QuicConnectionTest, FramePackingSendvQueued) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
BlockOnNextWrite();
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
connection_.transport_version(), Perspective::IS_CLIENT);
connection_.SaveAndSendStreamData(stream_id, "ABCDEF", 0, NO_FIN);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
EXPECT_TRUE(connection_.HasQueuedData());
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_EQ(1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(0u, writer_->padding_frames().size());
QuicStreamFrame* frame = writer_->stream_frames()[0].get();
EXPECT_EQ(stream_id, frame->stream_id);
EXPECT_EQ("ABCDEF",
absl::string_view(frame->data_buffer, frame->data_length));
}
TEST_P(QuicConnectionTest, SendingZeroBytes) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
QuicStreamId stream_id = QuicUtils::GetFirstBidirectionalStreamId(
connection_.transport_version(), Perspective::IS_CLIENT);
connection_.SaveAndSendStreamData(stream_id, {}, 0, FIN);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
size_t extra_padding_frames = 0;
if (GetParam().version.HasHeaderProtection()) {
extra_padding_frames = 1;
}
EXPECT_EQ(1u + extra_padding_frames, writer_->frame_count());
EXPECT_EQ(extra_padding_frames, writer_->padding_frames().size());
ASSERT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(stream_id, writer_->stream_frames()[0]->stream_id);
EXPECT_TRUE(writer_->stream_frames()[0]->fin);
}
TEST_P(QuicConnectionTest, LargeSendWithPendingAck) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessFramePacket(QuicFrame(QuicPingFrame()));
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(9);
const std::string data(10000, '?');
QuicConsumedData consumed = connection_.SaveAndSendStreamData(
GetNthClientInitiatedStreamId(0, connection_.transport_version()), data,
0, FIN);
EXPECT_EQ(data.length(), consumed.bytes_consumed);
EXPECT_TRUE(consumed.fin_consumed);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.HasQueuedData());
EXPECT_EQ(1u, writer_->frame_count());
ASSERT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(GetNthClientInitiatedStreamId(0, connection_.transport_version()),
writer_->stream_frames()[0]->stream_id);
EXPECT_TRUE(writer_->stream_frames()[0]->fin);
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, OnCanWrite) {
EXPECT_CALL(visitor_, OnCanWrite())
.WillOnce(DoAll(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendStreamData3)),
IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendStreamData5))));
{
InSequence seq;
EXPECT_CALL(visitor_, WillingAndAbleToWrite()).WillOnce(Return(true));
EXPECT_CALL(visitor_, WillingAndAbleToWrite())
.WillRepeatedly(Return(false));
}
EXPECT_CALL(*send_algorithm_, CanSend(_))
.WillRepeatedly(testing::Return(true));
connection_.OnCanWrite();
EXPECT_EQ(2u, writer_->frame_count());
EXPECT_EQ(2u, writer_->stream_frames().size());
EXPECT_EQ(GetNthClientInitiatedStreamId(1, connection_.transport_version()),
writer_->stream_frames()[0]->stream_id);
EXPECT_EQ(GetNthClientInitiatedStreamId(2, connection_.transport_version()),
writer_->stream_frames()[1]->stream_id);
}
TEST_P(QuicConnectionTest, RetransmitOnNack) {
QuicPacketNumber last_packet;
SendStreamDataToPeer(3, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(3, "foos", 3, NO_FIN, &last_packet);
SendStreamDataToPeer(3, "fooos", 7, NO_FIN, &last_packet);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame ack_one = InitAckFrame(1);
ProcessAckPacket(&ack_one);
QuicAckFrame nack_two = ConstructAckFrame(3, 2);
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(2), kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_FALSE(QuicPacketCreatorPeer::SendVersionInPacket(creator_));
ProcessAckPacket(&nack_two);
}
TEST_P(QuicConnectionTest, DoNotSendQueuedPacketForResetStream) {
BlockOnNextWrite();
QuicStreamId stream_id = 2;
connection_.SendStreamDataWithString(stream_id, "foo", 0, NO_FIN);
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
writer_->SetWritable();
connection_.OnCanWrite();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->rst_stream_frames().size());
}
TEST_P(QuicConnectionTest, SendQueuedPacketForQuicRstStreamNoError) {
BlockOnNextWrite();
QuicStreamId stream_id = 2;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(stream_id, "foo", 0, NO_FIN);
SendRstStream(stream_id, QUIC_STREAM_NO_ERROR, 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
writer_->SetWritable();
connection_.OnCanWrite();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->rst_stream_frames().size());
}
TEST_P(QuicConnectionTest, DoNotRetransmitForResetStreamOnNack) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "foos", 3, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "fooos", 7, NO_FIN, &last_packet);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 12);
QuicAckFrame nack_two = ConstructAckFrame(last_packet, last_packet - 1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessAckPacket(&nack_two);
}
TEST_P(QuicConnectionTest, RetransmitForQuicRstStreamNoErrorOnNack) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "foos", 3, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "fooos", 7, NO_FIN, &last_packet);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
SendRstStream(stream_id, QUIC_STREAM_NO_ERROR, 12);
QuicAckFrame nack_two = ConstructAckFrame(last_packet, last_packet - 1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
LostPacketVector lost_packets;
lost_packets.push_back(LostPacket(last_packet - 1, kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
ProcessAckPacket(&nack_two);
}
TEST_P(QuicConnectionTest, DoNotRetransmitForResetStreamOnRTO) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_packet);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
clock_.AdvanceTime(DefaultRetransmissionTime());
connection_.GetRetransmissionAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->rst_stream_frames().size());
EXPECT_EQ(stream_id, writer_->rst_stream_frames().front().stream_id);
}
TEST_P(QuicConnectionTest, CancelRetransmissionAlarmAfterResetStream) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_data_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_data_packet);
const QuicPacketNumber rst_packet = last_data_packet + 1;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, rst_packet, _, _)).Times(1);
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 3);
QuicAckFrame nack_stream_data =
ConstructAckFrame(rst_packet, last_data_packet);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessAckPacket(&nack_stream_data);
EXPECT_GT(manager_->GetBytesInFlight(), 0u);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, RetransmitForQuicRstStreamNoErrorOnPTO) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_packet);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
SendRstStream(stream_id, QUIC_STREAM_NO_ERROR, 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
clock_.AdvanceTime(DefaultRetransmissionTime());
connection_.GetRetransmissionAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
}
TEST_P(QuicConnectionTest, DoNotSendPendingRetransmissionForResetStream) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "foos", 3, NO_FIN, &last_packet);
BlockOnNextWrite();
connection_.SendStreamDataWithString(stream_id, "fooos", 7, NO_FIN);
QuicAckFrame ack = ConstructAckFrame(last_packet, last_packet - 1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessAckPacket(&ack);
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 12);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
writer_->SetWritable();
connection_.OnCanWrite();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
ASSERT_EQ(1u, writer_->rst_stream_frames().size());
EXPECT_EQ(stream_id, writer_->rst_stream_frames().front().stream_id);
}
TEST_P(QuicConnectionTest, SendPendingRetransmissionForQuicRstStreamNoError) {
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "foos", 3, NO_FIN, &last_packet);
BlockOnNextWrite();
connection_.SendStreamDataWithString(stream_id, "fooos", 7, NO_FIN);
QuicAckFrame ack = ConstructAckFrame(last_packet, last_packet - 1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
LostPacketVector lost_packets;
lost_packets.push_back(LostPacket(last_packet - 1, kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessAckPacket(&ack);
SendRstStream(stream_id, QUIC_STREAM_NO_ERROR, 12);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(2));
writer_->SetWritable();
connection_.OnCanWrite();
connection_.SendControlFrame(QuicFrame(
new QuicRstStreamFrame(1, stream_id, QUIC_STREAM_NO_ERROR, 14)));
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->rst_stream_frames().size());
}
TEST_P(QuicConnectionTest, RetransmitAckedPacket) {
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(1, "foos", 3, NO_FIN, &last_packet);
SendStreamDataToPeer(1, "fooos", 7, NO_FIN, &last_packet);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicAckFrame nack_two = ConstructAckFrame(3, 2);
BlockOnNextWrite();
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(2), kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(4), _, _))
.Times(1);
ProcessAckPacket(&nack_two);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(false, _, _, _, _, _, _));
QuicAckFrame ack_all = InitAckFrame(3);
ProcessAckPacket(&ack_all);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(4), _, _))
.Times(0);
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_FALSE(QuicConnectionPeer::HasRetransmittableFrames(&connection_, 4));
}
TEST_P(QuicConnectionTest, RetransmitNackedLargestObserved) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicPacketNumber original, second;
QuicByteCount packet_size =
SendStreamDataToPeer(3, "foo", 0, NO_FIN, &original);
SendStreamDataToPeer(3, "bar", 3, NO_FIN, &second);
QuicAckFrame frame = InitAckFrame({{second, second + 1}});
LostPacketVector lost_packets;
lost_packets.push_back(LostPacket(original, kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, packet_size, _));
ProcessAckPacket(&frame);
}
TEST_P(QuicConnectionTest, WriteBlockedBufferedThenSent) {
BlockOnNextWrite();
writer_->set_is_write_blocked_data_buffered(true);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, WriteBlockedThenSent) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
BlockOnNextWrite();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(1u, connection_.NumQueuedPackets());
writer_->SetWritable();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_EQ(2u, connection_.NumQueuedPackets());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.OnCanWrite();
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(0u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, RetransmitWriteBlockedAckedOriginalThenSent) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
BlockOnNextWrite();
writer_->set_is_write_blocked_data_buffered(true);
clock_.AdvanceTime(DefaultRetransmissionTime());
connection_.GetRetransmissionAlarm()->Fire();
QuicAckFrame ack = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&ack);
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_FALSE(QuicConnectionPeer::HasRetransmittableFrames(&connection_, 3));
}
TEST_P(QuicConnectionTest, AlarmsWhenWriteBlocked) {
BlockOnNextWrite();
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_TRUE(writer_->IsWriteBlocked());
connection_.GetSendAlarm()->Set(clock_.ApproximateNow());
connection_.GetSendAlarm()->Fire();
EXPECT_TRUE(writer_->IsWriteBlocked());
EXPECT_EQ(1u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, NoSendAlarmAfterProcessPacketWhenWriteBlocked) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
BlockOnNextWrite();
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_TRUE(writer_->IsWriteBlocked());
EXPECT_EQ(1u, connection_.NumQueuedPackets());
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
const uint64_t received_packet_num = 1;
const bool has_stop_waiting = false;
const EncryptionLevel level = ENCRYPTION_FORWARD_SECURE;
std::unique_ptr<QuicPacket> packet(
ConstructDataPacket(received_packet_num, has_stop_waiting, level));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(level, QuicPacketNumber(received_packet_num),
*packet, buffer, kMaxOutgoingPacketSize);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false));
EXPECT_TRUE(writer_->IsWriteBlocked());
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, SendAlarmNonZeroDelay) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
connection_.set_defer_send_in_response_to_packets(true);
connection_.sent_packet_manager().SetDeferredSendAlarmDelay(
QuicTime::Delta::FromMilliseconds(10));
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
const uint64_t received_packet_num = 1;
const bool has_stop_waiting = false;
const EncryptionLevel level = ENCRYPTION_FORWARD_SECURE;
std::unique_ptr<QuicPacket> packet(
ConstructDataPacket(received_packet_num, has_stop_waiting, level));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(level, QuicPacketNumber(received_packet_num),
*packet, buffer, kMaxOutgoingPacketSize);
EXPECT_CALL(visitor_, WillingAndAbleToWrite()).WillRepeatedly(Return(true));
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false));
EXPECT_TRUE(connection_.GetSendAlarm()->IsSet());
EXPECT_TRUE(connection_.GetSendAlarm()->deadline() >
clock_.ApproximateNow() + QuicTime::Delta::FromMilliseconds(5));
}
TEST_P(QuicConnectionTest, AddToWriteBlockedListIfWriterBlockedWhenProcessing) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
writer_->SetWriteBlocked();
QuicAckFrame ack1 = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(1);
ProcessAckPacket(1, &ack1);
}
TEST_P(QuicConnectionTest, DoNotAddToWriteBlockedListAfterDisconnect) {
writer_->SetBatchMode(true);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(0);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.CloseConnection(QUIC_PEER_GOING_AWAY, "no reason",
ConnectionCloseBehavior::SILENT_CLOSE);
EXPECT_FALSE(connection_.connected());
writer_->SetWriteBlocked();
}
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PEER_GOING_AWAY));
}
TEST_P(QuicConnectionTest, AddToWriteBlockedListIfBlockedOnFlushPackets) {
writer_->SetBatchMode(true);
writer_->BlockOnNextFlush();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(1);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
}
}
TEST_P(QuicConnectionTest, NoLimitPacketsPerNack) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
int offset = 0;
for (int i = 0; i < 15; ++i) {
SendStreamDataToPeer(1, "foo", offset, NO_FIN, nullptr);
offset += 3;
}
QuicAckFrame nack =
InitAckFrame({{QuicPacketNumber(15), QuicPacketNumber(16)}});
LostPacketVector lost_packets;
for (int i = 1; i < 15; ++i) {
lost_packets.push_back(
LostPacket(QuicPacketNumber(i), kMaxOutgoingPacketSize));
}
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessAckPacket(&nack);
}
TEST_P(QuicConnectionTest, MultipleAcks) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(1);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 2);
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(1u), last_packet);
SendStreamDataToPeer(3, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(2u), last_packet);
SendAckPacketToPeer();
SendStreamDataToPeer(5, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(4u), last_packet);
SendStreamDataToPeer(1, "foo", 3, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(5u), last_packet);
SendStreamDataToPeer(3, "foo", 3, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(6u), last_packet);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame1 = ConstructAckFrame(5, 3);
ProcessAckPacket(&frame1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame2 = InitAckFrame(6);
ProcessAckPacket(&frame2);
}
TEST_P(QuicConnectionTest, DontLatchUnackedPacket) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(1);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 2);
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
SendAckPacketToPeer();
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame = InitAckFrame(1);
ProcessAckPacket(&frame);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
frame = InitAckFrame(2);
ProcessAckPacket(&frame);
SendAckPacketToPeer();
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
frame = InitAckFrame(3);
ProcessAckPacket(&frame);
SendStreamDataToPeer(1, "bar", 3, NO_FIN, nullptr);
SendAckPacketToPeer();
SendStreamDataToPeer(1, "bar", 6, NO_FIN, nullptr);
SendStreamDataToPeer(1, "bar", 9, NO_FIN, nullptr);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
frame = InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(5)},
{QuicPacketNumber(7), QuicPacketNumber(8)}});
ProcessAckPacket(&frame);
}
TEST_P(QuicConnectionTest, SendHandshakeMessages) {
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
BlockOnNextWrite();
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
writer_->SetWritable();
EXPECT_CALL(visitor_, OnCanWrite());
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_NE(0x02020202u, writer_->final_bytes_of_last_packet());
}
TEST_P(QuicConnectionTest, DropRetransmitsForInitialPacketAfterForwardSecure) {
connection_.SendCryptoStreamData();
BlockOnNextWrite();
clock_.AdvanceTime(DefaultRetransmissionTime());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(1u, connection_.NumQueuedPackets());
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
notifier_.NeuterUnencryptedData();
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_EQ(QuicTime::Zero(), connection_.GetRetransmissionAlarm()->deadline());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
writer_->SetWritable();
connection_.OnCanWrite();
}
TEST_P(QuicConnectionTest, RetransmitPacketsWithInitialEncryption) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0);
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
if (!connection_.version().KnowsWhichDecrypterToUse()) {
writer_->framer()->framer()->SetAlternativeDecrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT), false);
}
SendStreamDataToPeer(2, "bar", 0, NO_FIN, nullptr);
EXPECT_FALSE(notifier_.HasLostStreamData());
connection_.MarkZeroRttPacketsForRetransmission(0);
EXPECT_TRUE(notifier_.HasLostStreamData());
}
TEST_P(QuicConnectionTest, BufferNonDecryptablePackets) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
if (!connection_.version().KnowsWhichDecrypterToUse()) {
writer_->framer()->framer()->SetDecrypter(
ENCRYPTION_ZERO_RTT, std::make_unique<TaggingDecrypter>());
}
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(2);
ProcessDataPacketAtLevel(2, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
}
TEST_P(QuicConnectionTest, Buffer100NonDecryptablePacketsThenKeyChange) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.set_max_undecryptable_packets(100);
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
for (uint64_t i = 1; i <= 100; ++i) {
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
}
EXPECT_FALSE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
EXPECT_TRUE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(100);
if (!connection_.version().KnowsWhichDecrypterToUse()) {
writer_->framer()->framer()->SetDecrypter(
ENCRYPTION_ZERO_RTT, std::make_unique<TaggingDecrypter>());
}
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(102, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
}
TEST_P(QuicConnectionTest, SetRTOAfterWritingToSocket) {
BlockOnNextWrite();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
writer_->SetWritable();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.OnCanWrite();
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, TestQueued) {
EXPECT_EQ(0u, connection_.NumQueuedPackets());
BlockOnNextWrite();
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, InitialTimeout) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
QuicTime default_timeout =
clock_.ApproximateNow() +
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
EXPECT_EQ(default_timeout, connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1));
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
EXPECT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
EXPECT_FALSE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, IdleTimeoutAfterFirstSentPacket) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
QuicTime initial_ddl =
clock_.ApproximateNow() +
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
EXPECT_EQ(initial_ddl, connection_.GetTimeoutAlarm()->deadline());
EXPECT_TRUE(connection_.connected());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(20));
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(1u), last_packet);
QuicTime new_ddl = clock_.ApproximateNow() +
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
QuicTime::Delta delay = initial_ddl - clock_.ApproximateNow();
clock_.AdvanceTime(delay);
EXPECT_TRUE(connection_.connected());
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_EQ(new_ddl, connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
clock_.AdvanceTime(new_ddl - clock_.ApproximateNow());
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
EXPECT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, IdleTimeoutAfterSendTwoPackets) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
QuicTime initial_ddl =
clock_.ApproximateNow() +
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
EXPECT_EQ(initial_ddl, connection_.GetTimeoutAlarm()->deadline());
EXPECT_TRUE(connection_.connected());
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(1u), last_packet);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(20));
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(2u), last_packet);
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
clock_.AdvanceTime(initial_ddl - clock_.ApproximateNow());
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
EXPECT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, HandshakeTimeout) {
const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(5);
connection_.SetNetworkTimeouts(timeout, timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
QuicTime handshake_timeout =
clock_.ApproximateNow() + timeout - QuicTime::Delta::FromSeconds(1);
EXPECT_EQ(handshake_timeout, connection_.GetTimeoutAlarm()->deadline());
EXPECT_TRUE(connection_.connected());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(0, connection_.transport_version()),
"GET /", 0, FIN, nullptr);
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(3));
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_TRUE(connection_.connected());
clock_.AdvanceTime(timeout - QuicTime::Delta::FromSeconds(2));
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
TestConnectionCloseQuicErrorCode(QUIC_HANDSHAKE_TIMEOUT);
}
TEST_P(QuicConnectionTest, PingAfterSend) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(0, connection_.transport_version()),
"GET /", 0, FIN, nullptr);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(15),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(
QuicTime::Delta::FromSeconds(15) - QuicTime::Delta::FromMilliseconds(5),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));
connection_.GetPingAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
ASSERT_EQ(1u, writer_->ping_frames().size());
writer_->Reset();
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(false));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
SendAckPacketToPeer();
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, ReducedPingTimeout) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
connection_.set_keep_alive_ping_timeout(QuicTime::Delta::FromSeconds(10));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(0, connection_.transport_version()),
"GET /", 0, FIN, nullptr);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(10),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(
QuicTime::Delta::FromSeconds(10) - QuicTime::Delta::FromMilliseconds(5),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(10));
connection_.GetPingAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
ASSERT_EQ(1u, writer_->ping_frames().size());
writer_->Reset();
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(false));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
SendAckPacketToPeer();
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, SendMtuDiscoveryPacket) {
MtuDiscoveryTestInit();
const size_t new_mtu = kDefaultMaxPacketSize + 100;
QuicByteCount mtu_probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&mtu_probe_size));
connection_.SendMtuDiscoveryPacket(new_mtu);
EXPECT_EQ(new_mtu, mtu_probe_size);
EXPECT_EQ(QuicPacketNumber(1u), creator_->packet_number());
const std::string data(kDefaultMaxPacketSize + 1, '.');
QuicByteCount size_before_mtu_change;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(2)
.WillOnce(SaveArg<3>(&size_before_mtu_change))
.WillOnce(Return());
connection_.SendStreamDataWithString(3, data, 0, FIN);
EXPECT_EQ(QuicPacketNumber(3u), creator_->packet_number());
EXPECT_EQ(kDefaultMaxPacketSize, size_before_mtu_change);
QuicAckFrame probe_ack = InitAckFrame(3);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&probe_ack);
EXPECT_EQ(new_mtu, connection_.max_packet_length());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(3, data, 0, FIN);
EXPECT_EQ(QuicPacketNumber(4u), creator_->packet_number());
}
TEST_P(QuicConnectionTest, BatchWriterFlushedAfterMtuDiscoveryPacket) {
writer_->SetBatchMode(true);
MtuDiscoveryTestInit();
const size_t target_mtu = kDefaultMaxPacketSize + 100;
QuicByteCount mtu_probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&mtu_probe_size));
const uint32_t prior_flush_attempts = writer_->flush_attempts();
connection_.SendMtuDiscoveryPacket(target_mtu);
EXPECT_EQ(target_mtu, mtu_probe_size);
EXPECT_EQ(writer_->flush_attempts(), prior_flush_attempts + 1);
}
TEST_P(QuicConnectionTest, MtuDiscoveryDisabled) {
MtuDiscoveryTestInit();
const QuicPacketCount packets_between_probes_base = 10;
set_packets_between_probes_base(packets_between_probes_base);
const QuicPacketCount number_of_packets = packets_between_probes_base * 2;
for (QuicPacketCount i = 0; i < number_of_packets; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
EXPECT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
EXPECT_EQ(0u, connection_.mtu_probe_count());
}
}
TEST_P(QuicConnectionTest, MtuDiscoveryEnabled) {
MtuDiscoveryTestInit();
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(probe_size, InRange(connection_.max_packet_length(),
kMtuDiscoveryTargetPacketSizeHigh));
const QuicPacketNumber probe_packet_number =
FirstSendingPacketNumber() + packets_between_probes_base;
ASSERT_EQ(probe_packet_number, creator_->packet_number());
{
QuicAckFrame probe_ack = InitAckFrame(probe_packet_number);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
ProcessAckPacket(&probe_ack);
EXPECT_EQ(probe_size, connection_.max_packet_length());
EXPECT_EQ(0u, connection_.GetBytesInFlight());
EXPECT_EQ(1u, connection_.mtu_probe_count());
}
QuicStreamOffset stream_offset = packets_between_probes_base;
QuicByteCount last_probe_size = 0;
for (size_t num_probes = 1; num_probes < kMtuDiscoveryAttempts;
++num_probes) {
for (QuicPacketCount i = 0;
i < (packets_between_probes_base << num_probes) - 1; ++i) {
SendStreamDataToPeer(3, ".", stream_offset++, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", stream_offset++, NO_FIN, nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount new_probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&new_probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(new_probe_size,
InRange(probe_size, kMtuDiscoveryTargetPacketSizeHigh));
EXPECT_EQ(num_probes + 1, connection_.mtu_probe_count());
QuicAckFrame probe_ack = InitAckFrame(creator_->packet_number());
ProcessAckPacket(&probe_ack);
EXPECT_EQ(new_probe_size, connection_.max_packet_length());
EXPECT_EQ(0u, connection_.GetBytesInFlight());
last_probe_size = probe_size;
probe_size = new_probe_size;
}
EXPECT_EQ(probe_size, kMtuDiscoveryTargetPacketSizeHigh);
writer_->SetShouldWriteFail();
SendStreamDataToPeer(3, "(", stream_offset++, NO_FIN, nullptr);
EXPECT_EQ(last_probe_size, connection_.max_packet_length());
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
SendStreamDataToPeer(3, ")", stream_offset++, NO_FIN, nullptr);
EXPECT_EQ(last_probe_size, connection_.max_packet_length());
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PACKET_WRITE_ERROR));
}
TEST_P(QuicConnectionTest,
MtuDiscoveryIgnoreOneWriteErrorInFlushAfterSuccessfulProbes) {
MtuDiscoveryTestInit();
writer_->SetBatchMode(true);
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
const QuicByteCount original_max_packet_length =
connection_.max_packet_length();
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(probe_size, InRange(connection_.max_packet_length(),
kMtuDiscoveryTargetPacketSizeHigh));
const QuicPacketNumber probe_packet_number =
FirstSendingPacketNumber() + packets_between_probes_base;
ASSERT_EQ(probe_packet_number, creator_->packet_number());
{
QuicAckFrame probe_ack = InitAckFrame(probe_packet_number);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
ProcessAckPacket(&probe_ack);
EXPECT_EQ(probe_size, connection_.max_packet_length());
EXPECT_EQ(0u, connection_.GetBytesInFlight());
}
EXPECT_EQ(1u, connection_.mtu_probe_count());
writer_->SetShouldWriteFail();
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
}
EXPECT_EQ(original_max_packet_length, connection_.max_packet_length());
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
}
EXPECT_EQ(original_max_packet_length, connection_.max_packet_length());
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PACKET_WRITE_ERROR));
}
TEST_P(QuicConnectionTest, MtuDiscoveryWriteBlocked) {
MtuDiscoveryTestInit();
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
QuicByteCount original_max_packet_length = connection_.max_packet_length();
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
BlockOnNextWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_EQ(1u, connection_.mtu_probe_count());
EXPECT_EQ(1u, connection_.NumQueuedPackets());
ASSERT_TRUE(connection_.connected());
writer_->SetWritable();
SimulateNextPacketTooLarge();
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_EQ(original_max_packet_length, connection_.max_packet_length());
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, MtuDiscoveryFailed) {
MtuDiscoveryTestInit();
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
const QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(100);
EXPECT_EQ(packets_between_probes_base,
QuicConnectionPeer::GetPacketsBetweenMtuProbes(&connection_));
const QuicPacketCount number_of_packets =
packets_between_probes_base * (1 << (kMtuDiscoveryAttempts + 1));
std::vector<QuicPacketNumber> mtu_discovery_packets;
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
for (QuicPacketCount i = 0; i < number_of_packets; i++) {
SendStreamDataToPeer(3, "!", i, NO_FIN, nullptr);
clock_.AdvanceTime(rtt);
QuicAckFrame ack;
if (!mtu_discovery_packets.empty()) {
QuicPacketNumber min_packet = *min_element(mtu_discovery_packets.begin(),
mtu_discovery_packets.end());
QuicPacketNumber max_packet = *max_element(mtu_discovery_packets.begin(),
mtu_discovery_packets.end());
ack.packets.AddRange(QuicPacketNumber(1), min_packet);
ack.packets.AddRange(QuicPacketNumber(max_packet + 1),
creator_->packet_number() + 1);
ack.largest_acked = creator_->packet_number();
} else {
ack.packets.AddRange(QuicPacketNumber(1), creator_->packet_number() + 1);
ack.largest_acked = creator_->packet_number();
}
ProcessAckPacket(&ack);
if (!connection_.GetMtuDiscoveryAlarm()->IsSet()) {
continue;
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.GetMtuDiscoveryAlarm()->Fire();
mtu_discovery_packets.push_back(creator_->packet_number());
}
ASSERT_EQ(kMtuDiscoveryAttempts, mtu_discovery_packets.size());
for (uint64_t i = 0; i < kMtuDiscoveryAttempts; i++) {
const QuicPacketCount packets_between_probes =
packets_between_probes_base * ((1 << (i + 1)) - 1);
EXPECT_EQ(QuicPacketNumber(packets_between_probes + (i + 1)),
mtu_discovery_packets[i]);
}
EXPECT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
EXPECT_EQ(kDefaultMaxPacketSize, connection_.max_packet_length());
EXPECT_EQ(kMtuDiscoveryAttempts, connection_.mtu_probe_count());
}
TEST_P(QuicConnectionTest, MtuDiscoverySecondProbeFailed) {
MtuDiscoveryTestInit();
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
QuicStreamOffset stream_offset = 0;
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", stream_offset++, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(probe_size, InRange(connection_.max_packet_length(),
kMtuDiscoveryTargetPacketSizeHigh));
const QuicPacketNumber probe_packet_number =
FirstSendingPacketNumber() + packets_between_probes_base;
ASSERT_EQ(probe_packet_number, creator_->packet_number());
QuicAckFrame first_ack = InitAckFrame(probe_packet_number);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
ProcessAckPacket(&first_ack);
EXPECT_EQ(probe_size, connection_.max_packet_length());
EXPECT_EQ(0u, connection_.GetBytesInFlight());
EXPECT_EQ(1u, connection_.mtu_probe_count());
for (QuicPacketCount i = 0; i < (packets_between_probes_base << 1) - 1; ++i) {
SendStreamDataToPeer(3, ".", stream_offset++, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", stream_offset++, NO_FIN, nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount second_probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&second_probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(second_probe_size,
InRange(probe_size, kMtuDiscoveryTargetPacketSizeHigh));
EXPECT_EQ(2u, connection_.mtu_probe_count());
QuicPacketNumber second_probe_packet_number = creator_->packet_number();
QuicAckFrame second_ack = InitAckFrame(second_probe_packet_number - 1);
ProcessAckPacket(&first_ack);
EXPECT_EQ(probe_size, connection_.max_packet_length());
for (QuicPacketCount i = 0; i < (packets_between_probes_base << 2) - 1; ++i) {
SendStreamDataToPeer(3, "@", stream_offset++, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "#", stream_offset++, NO_FIN, nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount third_probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&third_probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(third_probe_size, InRange(probe_size, second_probe_size));
EXPECT_EQ(3u, connection_.mtu_probe_count());
QuicAckFrame third_ack =
ConstructAckFrame(creator_->packet_number(), second_probe_packet_number);
ProcessAckPacket(&third_ack);
EXPECT_EQ(third_probe_size, connection_.max_packet_length());
SendStreamDataToPeer(3, "$", stream_offset++, NO_FIN, nullptr);
EXPECT_TRUE(connection_.PathMtuReductionDetectionInProgress());
if (connection_.PathDegradingDetectionInProgress() &&
QuicConnectionPeer::GetPathDegradingDeadline(&connection_) <
QuicConnectionPeer::GetPathMtuReductionDetectionDeadline(
&connection_)) {
connection_.PathDegradingTimeout();
}
EXPECT_EQ(third_probe_size, connection_.max_packet_length());
EXPECT_TRUE(connection_.PathMtuReductionDetectionInProgress());
connection_.GetBlackholeDetectorAlarm()->Fire();
EXPECT_EQ(probe_size, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, MtuDiscoveryWriterLimited) {
MtuDiscoveryTestInit();
const QuicByteCount mtu_limit = kMtuDiscoveryTargetPacketSizeHigh - 1;
writer_->set_max_packet_size(mtu_limit);
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(probe_size, InRange(connection_.max_packet_length(), mtu_limit));
const QuicPacketNumber probe_sequence_number =
FirstSendingPacketNumber() + packets_between_probes_base;
ASSERT_EQ(probe_sequence_number, creator_->packet_number());
{
QuicAckFrame probe_ack = InitAckFrame(probe_sequence_number);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
ProcessAckPacket(&probe_ack);
EXPECT_EQ(probe_size, connection_.max_packet_length());
EXPECT_EQ(0u, connection_.GetBytesInFlight());
}
EXPECT_EQ(1u, connection_.mtu_probe_count());
QuicStreamOffset stream_offset = packets_between_probes_base;
for (size_t num_probes = 1; num_probes < kMtuDiscoveryAttempts;
++num_probes) {
for (QuicPacketCount i = 0;
i < (packets_between_probes_base << num_probes) - 1; ++i) {
SendStreamDataToPeer(3, ".", stream_offset++, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", stream_offset++, NO_FIN, nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
QuicByteCount new_probe_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(SaveArg<3>(&new_probe_size));
connection_.GetMtuDiscoveryAlarm()->Fire();
EXPECT_THAT(new_probe_size, InRange(probe_size, mtu_limit));
EXPECT_EQ(num_probes + 1, connection_.mtu_probe_count());
QuicAckFrame probe_ack = InitAckFrame(creator_->packet_number());
ProcessAckPacket(&probe_ack);
EXPECT_EQ(new_probe_size, connection_.max_packet_length());
EXPECT_EQ(0u, connection_.GetBytesInFlight());
probe_size = new_probe_size;
}
EXPECT_EQ(probe_size, mtu_limit);
}
TEST_P(QuicConnectionTest, MtuDiscoveryWriterFailed) {
MtuDiscoveryTestInit();
const QuicByteCount mtu_limit = kMtuDiscoveryTargetPacketSizeHigh - 1;
const QuicByteCount initial_mtu = connection_.max_packet_length();
EXPECT_LT(initial_mtu, mtu_limit);
writer_->set_max_packet_size(mtu_limit);
const QuicPacketCount packets_between_probes_base = 5;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
ASSERT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
writer_->SimulateNextPacketTooLarge();
connection_.GetMtuDiscoveryAlarm()->Fire();
ASSERT_TRUE(connection_.connected());
QuicPacketNumber probe_number = creator_->packet_number();
QuicPacketCount extra_packets = packets_between_probes_base * 3;
for (QuicPacketCount i = 0; i < extra_packets; i++) {
connection_.EnsureWritableAndSendStreamData5();
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
QuicAckFrame probe_ack =
ConstructAckFrame(creator_->packet_number(), probe_number);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&probe_ack);
EXPECT_EQ(initial_mtu, connection_.max_packet_length());
for (QuicPacketCount i = 0; i < 4 * packets_between_probes_base; i++) {
connection_.EnsureWritableAndSendStreamData5();
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
EXPECT_EQ(initial_mtu, connection_.max_packet_length());
EXPECT_EQ(1u, connection_.mtu_probe_count());
}
TEST_P(QuicConnectionTest, NoMtuDiscoveryAfterConnectionClosed) {
MtuDiscoveryTestInit();
const QuicPacketCount packets_between_probes_base = 10;
set_packets_between_probes_base(packets_between_probes_base);
connection_.EnablePathMtuDiscovery(send_algorithm_);
for (QuicPacketCount i = 0; i < packets_between_probes_base - 1; i++) {
SendStreamDataToPeer(3, ".", i, NO_FIN, nullptr);
ASSERT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
SendStreamDataToPeer(3, "!", packets_between_probes_base - 1, NO_FIN,
nullptr);
EXPECT_TRUE(connection_.GetMtuDiscoveryAlarm()->IsSet());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(QUIC_PEER_GOING_AWAY, "no reason",
ConnectionCloseBehavior::SILENT_CLOSE);
EXPECT_FALSE(connection_.GetMtuDiscoveryAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, TimeoutAfterSendDuringHandshake) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
const QuicTime::Delta initial_idle_timeout =
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
const QuicTime::Delta five_ms = QuicTime::Delta::FromMilliseconds(5);
QuicTime default_timeout = clock_.ApproximateNow() + initial_idle_timeout;
clock_.AdvanceTime(five_ms);
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(five_ms);
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
3, FIN, nullptr);
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(initial_idle_timeout - five_ms - five_ms);
EXPECT_EQ(default_timeout, clock_.ApproximateNow());
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
clock_.AdvanceTime(five_ms);
EXPECT_EQ(default_timeout + five_ms, clock_.ApproximateNow());
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, TimeoutAfterSendAfterHandshake) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
CryptoHandshakeMessage msg;
std::string error_details;
QuicConfig client_config;
client_config.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
client_config.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
client_config.SetIdleNetworkTimeout(
QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs));
client_config.ToHandshakeMessage(&msg, connection_.transport_version());
const QuicErrorCode error =
config.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
connection_.SetFromConfig(config);
const QuicTime::Delta default_idle_timeout =
QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs - 1);
const QuicTime::Delta five_ms = QuicTime::Delta::FromMilliseconds(5);
QuicTime default_timeout = clock_.ApproximateNow() + default_idle_timeout;
clock_.AdvanceTime(five_ms);
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(five_ms);
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
3, FIN, nullptr);
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(default_idle_timeout - five_ms - five_ms);
EXPECT_EQ(default_timeout, clock_.ApproximateNow());
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
clock_.AdvanceTime(five_ms);
EXPECT_EQ(default_timeout + five_ms, clock_.ApproximateNow());
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_NETWORK_IDLE_TIMEOUT));
}
TEST_P(QuicConnectionTest, TimeoutAfterSendSilentCloseWithOpenStreams) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
CryptoHandshakeMessage msg;
std::string error_details;
QuicConfig client_config;
client_config.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
client_config.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
client_config.SetIdleNetworkTimeout(
QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs));
client_config.ToHandshakeMessage(&msg, connection_.transport_version());
const QuicErrorCode error =
config.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
connection_.SetFromConfig(config);
const QuicTime::Delta default_idle_timeout =
QuicTime::Delta::FromSeconds(kMaximumIdleTimeoutSecs - 1);
const QuicTime::Delta five_ms = QuicTime::Delta::FromMilliseconds(5);
QuicTime default_timeout = clock_.ApproximateNow() + default_idle_timeout;
clock_.AdvanceTime(five_ms);
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
if (GetQuicReloadableFlag(quic_add_stream_info_to_idle_close_detail)) {
EXPECT_CALL(visitor_, GetStreamsInfoForLogging()).WillOnce(Return(""));
}
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
clock_.AdvanceTime(connection_.GetTimeoutAlarm()->deadline() -
clock_.ApproximateNow() + five_ms);
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, TimeoutAfterReceive) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
const QuicTime::Delta initial_idle_timeout =
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
const QuicTime::Delta five_ms = QuicTime::Delta::FromMilliseconds(5);
QuicTime default_timeout = clock_.ApproximateNow() + initial_idle_timeout;
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, NO_FIN);
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
3, NO_FIN);
EXPECT_EQ(default_timeout, connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(five_ms);
QuicAckFrame ack = InitAckFrame(2);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&ack);
clock_.AdvanceTime(initial_idle_timeout - five_ms);
EXPECT_EQ(default_timeout, clock_.ApproximateNow());
EXPECT_TRUE(connection_.connected());
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
clock_.AdvanceTime(five_ms);
EXPECT_EQ(default_timeout + five_ms, clock_.ApproximateNow());
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, TimeoutAfterReceiveNotSendWhenUnacked) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
const QuicTime::Delta initial_idle_timeout =
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
connection_.SetNetworkTimeouts(
QuicTime::Delta::Infinite(),
initial_idle_timeout + QuicTime::Delta::FromSeconds(1));
const QuicTime::Delta five_ms = QuicTime::Delta::FromMilliseconds(5);
QuicTime default_timeout = clock_.ApproximateNow() + initial_idle_timeout;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, NO_FIN);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
3, NO_FIN);
EXPECT_EQ(default_timeout, connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(five_ms);
QuicAckFrame ack = InitAckFrame(2);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&ack);
clock_.AdvanceTime(initial_idle_timeout - five_ms);
EXPECT_EQ(default_timeout, clock_.ApproximateNow());
EXPECT_TRUE(connection_.connected());
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_EQ(default_timeout + five_ms,
connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
for (int i = 0; i < 100 && connection_.connected(); ++i) {
QUIC_LOG(INFO) << "sending data packet";
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()),
"foo", 0, NO_FIN);
connection_.GetTimeoutAlarm()->Fire();
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
}
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
TestConnectionCloseQuicErrorCode(QUIC_NETWORK_IDLE_TIMEOUT);
}
TEST_P(QuicConnectionTest, SendScheduler) {
QuicFramerPeer::SetPerspective(&peer_framer_, Perspective::IS_CLIENT);
std::unique_ptr<QuicPacket> packet =
ConstructDataPacket(1, !kHasStopWaiting, ENCRYPTION_INITIAL);
QuicPacketCreatorPeer::SetPacketNumber(creator_, 1);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.SendPacket(ENCRYPTION_INITIAL, 1, std::move(packet),
HAS_RETRANSMITTABLE_DATA, false, false);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, FailToSendFirstPacket) {
QuicFramerPeer::SetPerspective(&peer_framer_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(1);
std::unique_ptr<QuicPacket> packet =
ConstructDataPacket(1, !kHasStopWaiting, ENCRYPTION_INITIAL);
QuicPacketCreatorPeer::SetPacketNumber(creator_, 1);
writer_->SetShouldWriteFail();
connection_.SendPacket(ENCRYPTION_INITIAL, 1, std::move(packet),
HAS_RETRANSMITTABLE_DATA, false, false);
}
TEST_P(QuicConnectionTest, SendSchedulerEAGAIN) {
QuicFramerPeer::SetPerspective(&peer_framer_, Perspective::IS_CLIENT);
std::unique_ptr<QuicPacket> packet =
ConstructDataPacket(1, !kHasStopWaiting, ENCRYPTION_INITIAL);
QuicPacketCreatorPeer::SetPacketNumber(creator_, 1);
BlockOnNextWrite();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(2u), _, _))
.Times(0);
connection_.SendPacket(ENCRYPTION_INITIAL, 1, std::move(packet),
HAS_RETRANSMITTABLE_DATA, false, false);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, TestQueueLimitsOnSendStreamData) {
size_t payload_length = connection_.max_packet_length();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillOnce(testing::Return(false));
const std::string payload(payload_length, 'a');
QuicStreamId first_bidi_stream_id(QuicUtils::GetFirstBidirectionalStreamId(
connection_.version().transport_version, Perspective::IS_CLIENT));
EXPECT_EQ(0u, connection_
.SendStreamDataWithString(first_bidi_stream_id, payload, 0,
NO_FIN)
.bytes_consumed);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, SendingThreePackets) {
size_t total_payload_length = 2 * connection_.max_packet_length();
const std::string payload(total_payload_length, 'a');
QuicStreamId first_bidi_stream_id(QuicUtils::GetFirstBidirectionalStreamId(
connection_.version().transport_version, Perspective::IS_CLIENT));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(3);
EXPECT_EQ(payload.size(), connection_
.SendStreamDataWithString(first_bidi_stream_id,
payload, 0, NO_FIN)
.bytes_consumed);
}
TEST_P(QuicConnectionTest, LoopThroughSendingPacketsWithTruncation) {
set_perspective(Perspective::IS_SERVER);
const std::string payload(connection_.max_packet_length(), 'a');
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
EXPECT_EQ(payload.size(),
connection_.SendStreamDataWithString(3, payload, 0, NO_FIN)
.bytes_consumed);
size_t non_truncated_packet_size = writer_->last_packet_size();
QuicConfig config;
QuicConfigPeer::SetReceivedBytesForConnectionId(&config, 0);
connection_.SetFromConfig(config);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
EXPECT_EQ(payload.size(),
connection_.SendStreamDataWithString(3, payload, 1350, NO_FIN)
.bytes_consumed);
EXPECT_EQ(non_truncated_packet_size, writer_->last_packet_size() - 2);
}
TEST_P(QuicConnectionTest, SendDelayedAck) {
QuicTime ack_time = clock_.ApproximateNow() + DefaultDelayedAckTime();
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.HasPendingAcks());
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
frame1_.stream_id = 3;
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
clock_.AdvanceTime(DefaultDelayedAckTime());
connection_.GetAckAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, SendDelayedAckDecimation) {
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame()).Times(AnyNumber());
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime ack_time = clock_.ApproximateNow() +
QuicTime::Delta::FromMilliseconds(kMinRttMs / 4);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.HasPendingAcks());
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
frame1_.stream_id = 3;
uint64_t kFirstDecimatedPacket = 101;
for (unsigned int i = 0; i < kFirstDecimatedPacket - 1; ++i) {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1 + i, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
}
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(kFirstDecimatedPacket, !kHasStopWaiting,
ENCRYPTION_ZERO_RTT);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
for (int i = 0; i < 9; ++i) {
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(kFirstDecimatedPacket + 1 + i, !kHasStopWaiting,
ENCRYPTION_ZERO_RTT);
}
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, SendDelayedAckDecimationUnlimitedAggregation) {
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kAKDU);
config.SetConnectionOptionsToSend(connection_options);
connection_.SetFromConfig(config);
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime ack_time = clock_.ApproximateNow() +
QuicTime::Delta::FromMilliseconds(kMinRttMs / 4);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.HasPendingAcks());
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
frame1_.stream_id = 3;
uint64_t kFirstDecimatedPacket = 101;
for (unsigned int i = 0; i < kFirstDecimatedPacket - 1; ++i) {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1 + i, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
}
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(kFirstDecimatedPacket, !kHasStopWaiting,
ENCRYPTION_ZERO_RTT);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
for (int i = 0; i < 18; ++i) {
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(kFirstDecimatedPacket + 1 + i, !kHasStopWaiting,
ENCRYPTION_ZERO_RTT);
}
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
}
TEST_P(QuicConnectionTest, SendDelayedAckDecimationEighthRtt) {
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame()).Times(AnyNumber());
QuicConnectionPeer::SetAckDecimationDelay(&connection_, 0.125);
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
QuicTime ack_time = clock_.ApproximateNow() +
QuicTime::Delta::FromMilliseconds(kMinRttMs / 8);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.HasPendingAcks());
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
frame1_.stream_id = 3;
uint64_t kFirstDecimatedPacket = 101;
for (unsigned int i = 0; i < kFirstDecimatedPacket - 1; ++i) {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1 + i, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
}
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(kFirstDecimatedPacket, !kHasStopWaiting,
ENCRYPTION_ZERO_RTT);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
for (int i = 0; i < 9; ++i) {
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(kFirstDecimatedPacket + 1 + i, !kHasStopWaiting,
ENCRYPTION_ZERO_RTT);
}
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, SendDelayedAckOnHandshakeConfirmed) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(1);
EXPECT_TRUE(connection_.HasPendingAcks());
QuicTime ack_time = clock_.ApproximateNow() + DefaultDelayedAckTime();
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
QuicConnectionPeer::SetPerspective(&connection_, Perspective::IS_SERVER);
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(ack_time, connection_.GetAckAlarm()->deadline());
QuicConnectionPeer::SetPerspective(&connection_, Perspective::IS_CLIENT);
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.HasPendingAcks());
if (connection_.SupportsMultiplePacketNumberSpaces()) {
EXPECT_EQ(clock_.ApproximateNow() + DefaultDelayedAckTime(),
connection_.GetAckAlarm()->deadline());
} else {
EXPECT_EQ(clock_.ApproximateNow(), connection_.GetAckAlarm()->deadline());
}
}
TEST_P(QuicConnectionTest, SendDelayedAckOnSecondPacket) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(1);
ProcessPacket(2);
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, NoAckOnOldNacks) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessPacket(2);
size_t frames_per_ack = 1;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessPacket(3);
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + frames_per_ack, writer_->frame_count());
EXPECT_FALSE(writer_->ack_frames().empty());
writer_->Reset();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessPacket(4);
EXPECT_EQ(0u, writer_->frame_count());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessPacket(5);
padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + frames_per_ack, writer_->frame_count());
EXPECT_FALSE(writer_->ack_frames().empty());
writer_->Reset();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessPacket(6);
padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count, writer_->frame_count());
EXPECT_TRUE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, SendDelayedAckOnOutgoingPacket) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_));
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
ProcessDataPacket(1);
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, NO_FIN);
EXPECT_EQ(2u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, SendDelayedAckOnOutgoingCryptoPacket) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
}
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_EQ(3u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, BlockAndBufferOnFirstCHLOPacketOfTwo) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(1);
BlockOnNextWrite();
writer_->set_is_write_blocked_data_buffered(true);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
} else {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
}
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_TRUE(writer_->IsWriteBlocked());
EXPECT_FALSE(connection_.HasQueuedData());
connection_.SendCryptoDataWithString("bar", 3);
EXPECT_TRUE(writer_->IsWriteBlocked());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_FALSE(connection_.HasQueuedData());
} else {
EXPECT_TRUE(connection_.HasQueuedData());
}
}
TEST_P(QuicConnectionTest, BundleAckForSecondCHLO) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnCanWrite())
.WillOnce(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendCryptoStreamData)));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
}
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
EXPECT_EQ(3u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
if (!QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_EQ(1u, writer_->stream_frames().size());
} else {
EXPECT_EQ(1u, writer_->crypto_frames().size());
}
EXPECT_EQ(1u, writer_->padding_frames().size());
ASSERT_FALSE(writer_->ack_frames().empty());
EXPECT_EQ(QuicPacketNumber(2u), LargestAcked(writer_->ack_frames().front()));
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, BundleAckForSecondCHLOTwoPacketReject) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.HasPendingAcks());
{
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
}
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.WillOnce(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendCryptoStreamData)));
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendCryptoStreamData)));
}
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
}
EXPECT_EQ(3u, writer_->frame_count());
EXPECT_TRUE(writer_->stop_waiting_frames().empty());
if (!QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_EQ(1u, writer_->stream_frames().size());
} else {
EXPECT_EQ(1u, writer_->crypto_frames().size());
}
EXPECT_EQ(1u, writer_->padding_frames().size());
ASSERT_FALSE(writer_->ack_frames().empty());
EXPECT_EQ(QuicPacketNumber(2u), LargestAcked(writer_->ack_frames().front()));
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, BundleAckWithDataOnIncomingAck) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, NO_FIN);
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
3, NO_FIN);
QuicAckFrame ack = ConstructAckFrame(2, 1);
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(1), kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&ack);
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->stream_frames().size());
writer_->Reset();
ack = ConstructAckFrame(3, 1);
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(&ack);
EXPECT_EQ(0u, writer_->frame_count());
EXPECT_FALSE(connection_.HasPendingAcks());
writer_->Reset();
ack = ConstructAckFrame(3, 1);
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(visitor_, OnCanWrite())
.WillOnce(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::EnsureWritableAndSendStreamData5)));
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessAckPacket(&ack);
EXPECT_EQ(1u, writer_->frame_count());
EXPECT_TRUE(writer_->ack_frames().empty());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, NoAckSentForClose) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(1);
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_PEER))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessClosePacket(2);
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PEER_GOING_AWAY));
}
TEST_P(QuicConnectionTest, SendWhenDisconnected) {
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
connection_.CloseConnection(QUIC_PEER_GOING_AWAY, "no reason",
ConnectionCloseBehavior::SILENT_CLOSE);
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.CanWrite(HAS_RETRANSMITTABLE_DATA));
EXPECT_EQ(DISCARD, connection_.GetSerializedPacketFate(
false, ENCRYPTION_INITIAL));
}
TEST_P(QuicConnectionTest, SendConnectivityProbingWhenDisconnected) {
if (!IsDefaultTestConfiguration()) {
return;
}
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
connection_.CloseConnection(QUIC_PEER_GOING_AWAY, "no reason",
ConnectionCloseBehavior::SILENT_CLOSE);
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.CanWrite(HAS_RETRANSMITTABLE_DATA));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(1), _, _))
.Times(0);
EXPECT_QUIC_BUG(connection_.SendConnectivityProbingPacket(
writer_.get(), connection_.peer_address()),
"Not sending connectivity probing packet as connection is "
"disconnected.");
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PEER_GOING_AWAY));
}
TEST_P(QuicConnectionTest, WriteBlockedAfterClientSendsConnectivityProbe) {
PathProbeTestInit(Perspective::IS_CLIENT);
TestPacketWriter probing_writer(version(), &clock_, Perspective::IS_CLIENT);
probing_writer.BlockOnNextWrite();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(1), _, _))
.Times(1);
connection_.SendConnectivityProbingPacket(&probing_writer,
connection_.peer_address());
}
TEST_P(QuicConnectionTest, WriterBlockedAfterServerSendsConnectivityProbe) {
PathProbeTestInit(Perspective::IS_SERVER);
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
writer_->BlockOnNextWrite();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(1);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(1), _, _))
.Times(1);
if (VersionHasIetfQuicFrames(GetParam().version.transport_version)) {
QuicPathFrameBuffer payload{
{0xde, 0xad, 0xbe, 0xef, 0xba, 0xdc, 0x0f, 0xfe}};
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendPathChallenge(
payload, connection_.self_address(), connection_.peer_address(),
connection_.effective_peer_address(), writer_.get());
} else {
connection_.SendConnectivityProbingPacket(writer_.get(),
connection_.peer_address());
}
}
TEST_P(QuicConnectionTest, WriterErrorWhenClientSendsConnectivityProbe) {
PathProbeTestInit(Perspective::IS_CLIENT);
TestPacketWriter probing_writer(version(), &clock_, Perspective::IS_CLIENT);
probing_writer.SetShouldWriteFail();
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(1), _, _))
.Times(0);
connection_.SendConnectivityProbingPacket(&probing_writer,
connection_.peer_address());
}
TEST_P(QuicConnectionTest, WriterErrorWhenServerSendsConnectivityProbe) {
PathProbeTestInit(Perspective::IS_SERVER);
writer_->SetShouldWriteFail();
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(1), _, _))
.Times(0);
connection_.SendConnectivityProbingPacket(writer_.get(),
connection_.peer_address());
}
TEST_P(QuicConnectionTest, IetfStatelessReset) {
QuicConfig config;
QuicConfigPeer::SetReceivedStatelessResetToken(&config,
kTestStatelessResetToken);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
std::unique_ptr<QuicEncryptedPacket> packet(
QuicFramer::BuildIetfStatelessResetPacket(connection_id_,
100,
kTestStatelessResetToken));
std::unique_ptr<QuicReceivedPacket> received(
ConstructReceivedPacket(*packet, QuicTime::Zero()));
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_PEER))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *received);
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PUBLIC_RESET));
}
TEST_P(QuicConnectionTest, GoAway) {
if (VersionHasIetfQuicFrames(GetParam().version.transport_version)) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicGoAwayFrame* goaway = new QuicGoAwayFrame();
goaway->last_good_stream_id = 1;
goaway->error_code = QUIC_PEER_GOING_AWAY;
goaway->reason_phrase = "Going away.";
EXPECT_CALL(visitor_, OnGoAway(_));
ProcessGoAwayPacket(goaway);
}
TEST_P(QuicConnectionTest, WindowUpdate) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicWindowUpdateFrame window_update;
window_update.stream_id = 3;
window_update.max_data = 1234;
EXPECT_CALL(visitor_, OnWindowUpdateFrame(_));
ProcessFramePacket(QuicFrame(window_update));
}
TEST_P(QuicConnectionTest, Blocked) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicBlockedFrame blocked;
blocked.stream_id = 3;
EXPECT_CALL(visitor_, OnBlockedFrame(_));
ProcessFramePacket(QuicFrame(blocked));
EXPECT_EQ(1u, connection_.GetStats().blocked_frames_received);
EXPECT_EQ(0u, connection_.GetStats().blocked_frames_sent);
}
TEST_P(QuicConnectionTest, ZeroBytePacket) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
QuicReceivedPacket encrypted(nullptr, 0, QuicTime::Zero());
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, encrypted);
}
TEST_P(QuicConnectionTest, ClientHandlesVersionNegotiation) {
ParsedQuicVersionVector versions;
for (auto version : AllSupportedVersions()) {
if (version != connection_.version()) {
versions.push_back(version);
}
}
std::unique_ptr<QuicEncryptedPacket> encrypted(
QuicFramer::BuildVersionNegotiationPacket(
connection_id_, EmptyQuicConnectionId(), true,
connection_.version().HasLengthPrefixedConnectionIds(), versions));
std::unique_ptr<QuicReceivedPacket> received(
ConstructReceivedPacket(*encrypted, QuicTime::Zero()));
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *received);
EXPECT_FALSE(connection_.connected());
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_INVALID_VERSION));
}
TEST_P(QuicConnectionTest, ClientHandlesVersionNegotiationWithConnectionClose) {
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kINVC);
config.SetClientConnectionOptions(connection_options);
connection_.SetFromConfig(config);
ParsedQuicVersionVector versions;
for (auto version : AllSupportedVersions()) {
if (version != connection_.version()) {
versions.push_back(version);
}
}
std::unique_ptr<QuicEncryptedPacket> encrypted(
QuicFramer::BuildVersionNegotiationPacket(
connection_id_, EmptyQuicConnectionId(), true,
connection_.version().HasLengthPrefixedConnectionIds(), versions));
std::unique_ptr<QuicReceivedPacket> received(
ConstructReceivedPacket(*encrypted, QuicTime::Zero()));
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1u));
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *received);
EXPECT_FALSE(connection_.connected());
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_INVALID_VERSION));
}
TEST_P(QuicConnectionTest, BadVersionNegotiation) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
std::unique_ptr<QuicEncryptedPacket> encrypted(
QuicFramer::BuildVersionNegotiationPacket(
connection_id_, EmptyQuicConnectionId(), true,
connection_.version().HasLengthPrefixedConnectionIds(),
AllSupportedVersions()));
std::unique_ptr<QuicReceivedPacket> received(
ConstructReceivedPacket(*encrypted, QuicTime::Zero()));
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *received);
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_INVALID_VERSION_NEGOTIATION_PACKET));
}
TEST_P(QuicConnectionTest, ProcessFramesIfPacketClosedConnection) {
QuicPacketHeader header;
if (peer_framer_.perspective() == Perspective::IS_SERVER) {
header.source_connection_id = connection_id_;
header.destination_connection_id_included = CONNECTION_ID_ABSENT;
} else {
header.destination_connection_id = connection_id_;
header.destination_connection_id_included = CONNECTION_ID_ABSENT;
}
header.packet_number = QuicPacketNumber(1);
header.version_flag = false;
QuicErrorCode kQuicErrorCode = QUIC_PEER_GOING_AWAY;
QuicConnectionCloseFrame qccf(peer_framer_.transport_version(),
kQuicErrorCode, NO_IETF_QUIC_ERROR, "",
0);
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
frames.push_back(QuicFrame(&qccf));
std::unique_ptr<QuicPacket> packet(ConstructPacket(header, frames));
EXPECT_TRUE(nullptr != packet);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(1), *packet, buffer,
kMaxOutgoingPacketSize);
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_PEER))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, QuicTime::Zero(), false));
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_PEER_GOING_AWAY));
}
TEST_P(QuicConnectionTest, SelectMutualVersion) {
connection_.SetSupportedVersions(AllSupportedVersions());
connection_.set_version(QuicVersionMin());
EXPECT_EQ(QuicVersionMin(), connection_.version());
ParsedQuicVersionVector supported_versions = AllSupportedVersions();
EXPECT_TRUE(connection_.SelectMutualVersion(supported_versions));
EXPECT_EQ(QuicVersionMax(), connection_.version());
ParsedQuicVersionVector lowest_version_vector;
lowest_version_vector.push_back(QuicVersionMin());
EXPECT_TRUE(connection_.SelectMutualVersion(lowest_version_vector));
EXPECT_EQ(QuicVersionMin(), connection_.version());
ParsedQuicVersionVector unsupported_version;
unsupported_version.push_back(UnsupportedQuicVersion());
EXPECT_FALSE(connection_.SelectMutualVersion(unsupported_version));
}
TEST_P(QuicConnectionTest, ConnectionCloseWhenWritable) {
EXPECT_FALSE(writer_->IsWriteBlocked());
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
EXPECT_EQ(1u, writer_->packets_write_attempts());
TriggerConnectionClose();
EXPECT_LE(2u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, ConnectionCloseGettingWriteBlocked) {
BlockOnNextWrite();
TriggerConnectionClose();
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_TRUE(writer_->IsWriteBlocked());
}
TEST_P(QuicConnectionTest, ConnectionCloseWhenWriteBlocked) {
BlockOnNextWrite();
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_TRUE(writer_->IsWriteBlocked());
TriggerConnectionClose();
EXPECT_EQ(1u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, OnPacketSentDebugVisitor) {
PathProbeTestInit(Perspective::IS_CLIENT);
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(1, "foo", 0, NO_FIN);
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _)).Times(1);
connection_.SendConnectivityProbingPacket(writer_.get(),
connection_.peer_address());
}
TEST_P(QuicConnectionTest, OnPacketHeaderDebugVisitor) {
QuicPacketHeader header;
header.packet_number = QuicPacketNumber(1);
header.form = IETF_QUIC_LONG_HEADER_PACKET;
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnPacketHeader(Ref(header), _, _)).Times(1);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_)).Times(1);
EXPECT_CALL(debug_visitor, OnSuccessfulVersionNegotiation(_)).Times(1);
connection_.OnPacketHeader(header);
}
TEST_P(QuicConnectionTest, Pacing) {
TestConnection server(connection_id_, kPeerAddress, kSelfAddress,
helper_.get(), alarm_factory_.get(), writer_.get(),
Perspective::IS_SERVER, version(),
connection_id_generator_);
TestConnection client(connection_id_, kSelfAddress, kPeerAddress,
helper_.get(), alarm_factory_.get(), writer_.get(),
Perspective::IS_CLIENT, version(),
connection_id_generator_);
EXPECT_FALSE(QuicSentPacketManagerPeer::UsingPacing(
static_cast<const QuicSentPacketManager*>(
&client.sent_packet_manager())));
EXPECT_FALSE(QuicSentPacketManagerPeer::UsingPacing(
static_cast<const QuicSentPacketManager*>(
&server.sent_packet_manager())));
}
TEST_P(QuicConnectionTest, WindowUpdateInstigateAcks) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicWindowUpdateFrame window_update;
window_update.stream_id = 3;
window_update.max_data = 1234;
EXPECT_CALL(visitor_, OnWindowUpdateFrame(_));
ProcessFramePacket(QuicFrame(window_update));
EXPECT_TRUE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, BlockedFrameInstigateAcks) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicBlockedFrame blocked;
blocked.stream_id = 3;
EXPECT_CALL(visitor_, OnBlockedFrame(_));
ProcessFramePacket(QuicFrame(blocked));
EXPECT_TRUE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, ReevaluateTimeUntilSendOnAck) {
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, NO_FIN);
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "bar",
3, NO_FIN);
connection_.OnCanWrite();
QuicSentPacketManagerPeer::DisablePacerBursts(manager_);
QuicTime scheduled_pacing_time =
clock_.Now() + QuicTime::Delta::FromMilliseconds(5);
QuicSentPacketManagerPeer::SetNextPacedPacketTime(manager_,
scheduled_pacing_time);
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(false));
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "baz",
6, NO_FIN);
EXPECT_FALSE(connection_.GetSendAlarm()->IsSet());
QuicAckFrame ack = InitAckFrame(1);
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
ProcessAckPacket(&ack);
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_TRUE(connection_.GetSendAlarm()->IsSet());
EXPECT_EQ(scheduled_pacing_time, connection_.GetSendAlarm()->deadline());
writer_->Reset();
}
TEST_P(QuicConnectionTest, SendAcksImmediately) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(1);
CongestionBlockWrites();
SendAckPacketToPeer();
}
TEST_P(QuicConnectionTest, SendPingImmediately) {
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
CongestionBlockWrites();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _)).Times(1);
EXPECT_CALL(debug_visitor, OnPingSent()).Times(1);
connection_.SendControlFrame(QuicFrame(QuicPingFrame(1)));
EXPECT_FALSE(connection_.HasQueuedData());
}
TEST_P(QuicConnectionTest, SendBlockedImmediately) {
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _)).Times(1);
EXPECT_EQ(0u, connection_.GetStats().blocked_frames_sent);
connection_.SendControlFrame(QuicFrame(QuicBlockedFrame(1, 3, 0)));
EXPECT_EQ(1u, connection_.GetStats().blocked_frames_sent);
EXPECT_FALSE(connection_.HasQueuedData());
}
TEST_P(QuicConnectionTest, FailedToSendBlockedFrames) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
QuicBlockedFrame blocked(1, 3, 0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _)).Times(0);
EXPECT_EQ(0u, connection_.GetStats().blocked_frames_sent);
connection_.SendControlFrame(QuicFrame(blocked));
EXPECT_EQ(0u, connection_.GetStats().blocked_frames_sent);
EXPECT_FALSE(connection_.HasQueuedData());
}
TEST_P(QuicConnectionTest, SendingUnencryptedStreamDataFails) {
if (!IsDefaultTestConfiguration()) {
return;
}
EXPECT_QUIC_BUG(
{
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(
Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
connection_.SaveAndSendStreamData(3, {}, 0, FIN);
EXPECT_FALSE(connection_.connected());
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_ATTEMPT_TO_SEND_UNENCRYPTED_STREAM_DATA));
},
"Cannot send stream data with level: ENCRYPTION_INITIAL");
}
TEST_P(QuicConnectionTest, SetRetransmissionAlarmForCryptoPacket) {
EXPECT_TRUE(connection_.connected());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoStreamData();
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
QuicTime retransmission_time =
QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetRetransmissionTime();
EXPECT_NE(retransmission_time, clock_.ApproximateNow());
EXPECT_EQ(retransmission_time,
connection_.GetRetransmissionAlarm()->deadline());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetRetransmissionAlarm()->Fire();
}
TEST_P(QuicConnectionTest, PathDegradingDetectionForNonCryptoPackets) {
EXPECT_TRUE(connection_.connected());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
for (int i = 0; i < 2; ++i) {
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), data,
offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
QuicTime::Delta delay =
QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicTime prev_deadline =
connection_.GetBlackholeDetectorAlarm()->deadline();
connection_.SendStreamDataWithString(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), data,
offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
EXPECT_EQ(prev_deadline,
connection_.GetBlackholeDetectorAlarm()->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
if (i == 0) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
}
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(1u + 2u * i), QuicPacketNumber(2u + 2u * i)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
if (i == 0) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
frame = InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
ProcessAckPacket(&frame);
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
} else {
clock_.AdvanceTime(delay);
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.PathDegradingTimeout();
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
}
}
EXPECT_TRUE(connection_.IsPathDegrading());
}
TEST_P(QuicConnectionTest, RetransmittableOnWireSetsPingAlarm) {
const QuicTime::Delta retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(50);
connection_.set_initial_retransmittable_on_wire_timeout(
retransmittable_on_wire_timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
QuicTime::Delta delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
ASSERT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
QuicTime::Delta ping_delay = QuicTime::Delta::FromSeconds(kPingTimeoutSecs);
EXPECT_EQ(ping_delay,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
ASSERT_TRUE(connection_.PathDegradingDetectionInProgress());
delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
}
TEST_P(QuicConnectionTest, ServerRetransmittableOnWire) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
SetQuicReloadableFlag(quic_enable_server_on_wire_ping, true);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kSRWP);
config.SetInitialReceivedConnectionOptions(connection_options);
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
ProcessPacket(1);
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
QuicTime::Delta ping_delay = QuicTime::Delta::FromMilliseconds(200);
EXPECT_EQ(ping_delay,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(2, &frame);
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(ping_delay,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
TEST_P(QuicConnectionTest, RetransmittableOnWireSendFirstPacket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
const QuicTime::Delta kRetransmittableOnWireTimeout =
QuicTime::Delta::FromMilliseconds(200);
const QuicTime::Delta kTestRtt = QuicTime::Delta::FromMilliseconds(100);
connection_.set_initial_retransmittable_on_wire_timeout(
kRetransmittableOnWireTimeout);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kROWF);
config.SetClientConnectionOptions(connection_options);
connection_.SetFromConfig(config);
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
clock_.AdvanceTime(kTestRtt);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(kRetransmittableOnWireTimeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
EXPECT_EQ(1u, writer_->packets_write_attempts());
clock_.AdvanceTime(kRetransmittableOnWireTimeout);
connection_.GetPingAlarm()->Fire();
EXPECT_EQ(2u, writer_->packets_write_attempts());
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
TEST_P(QuicConnectionTest, RetransmittableOnWireSendRandomBytes) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
const QuicTime::Delta kRetransmittableOnWireTimeout =
QuicTime::Delta::FromMilliseconds(200);
const QuicTime::Delta kTestRtt = QuicTime::Delta::FromMilliseconds(100);
connection_.set_initial_retransmittable_on_wire_timeout(
kRetransmittableOnWireTimeout);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kROWR);
config.SetClientConnectionOptions(connection_options);
connection_.SetFromConfig(config);
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
clock_.AdvanceTime(kTestRtt);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(kRetransmittableOnWireTimeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
EXPECT_EQ(1u, writer_->packets_write_attempts());
clock_.AdvanceTime(kRetransmittableOnWireTimeout);
ExpectNextPacketUnprocessable();
connection_.GetPingAlarm()->Fire();
EXPECT_EQ(2u, writer_->packets_write_attempts());
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
TEST_P(QuicConnectionTest,
RetransmittableOnWireSendRandomBytesWithWriterBlocked) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
const QuicTime::Delta kRetransmittableOnWireTimeout =
QuicTime::Delta::FromMilliseconds(200);
const QuicTime::Delta kTestRtt = QuicTime::Delta::FromMilliseconds(100);
connection_.set_initial_retransmittable_on_wire_timeout(
kRetransmittableOnWireTimeout);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kROWR);
config.SetClientConnectionOptions(connection_options);
connection_.SetFromConfig(config);
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
clock_.AdvanceTime(kTestRtt);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
ASSERT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(kRetransmittableOnWireTimeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
EXPECT_EQ(1u, writer_->packets_write_attempts());
BlockOnNextWrite();
ProcessDataPacket(3);
EXPECT_EQ(2u, writer_->packets_write_attempts());
EXPECT_EQ(1u, connection_.NumQueuedPackets());
clock_.AdvanceTime(kRetransmittableOnWireTimeout);
connection_.GetPingAlarm()->Fire();
EXPECT_EQ(2u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, NoPathDegradingDetectionIfPathIsDegrading) {
EXPECT_TRUE(connection_.connected());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
QuicTime::Delta delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicTime prev_deadline = connection_.GetBlackholeDetectorAlarm()->deadline();
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
EXPECT_EQ(prev_deadline, connection_.GetBlackholeDetectorAlarm()->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1u), QuicPacketNumber(2u)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
clock_.AdvanceTime(delay);
EXPECT_CALL(visitor_, OnPathDegrading()).Times(1);
connection_.PathDegradingTimeout();
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_TRUE(connection_.IsPathDegrading());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_TRUE(connection_.IsPathDegrading());
}
TEST_P(QuicConnectionTest, NoPathDegradingDetectionBeforeHandshakeConfirmed) {
EXPECT_TRUE(connection_.connected());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
connection_.SendStreamDataWithString(1, "data", 0, NO_FIN);
if (GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed) &&
connection_.SupportsMultiplePacketNumberSpaces()) {
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
} else {
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
}
}
TEST_P(QuicConnectionTest, UnmarkPathDegradingOnForwardProgress) {
EXPECT_TRUE(connection_.connected());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
QuicTime::Delta delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicTime prev_deadline = connection_.GetBlackholeDetectorAlarm()->deadline();
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
EXPECT_EQ(prev_deadline, connection_.GetBlackholeDetectorAlarm()->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1u), QuicPacketNumber(2u)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
delay = QuicConnectionPeer::GetSentPacketManager(&connection_)
->GetPathDegradingDelay();
EXPECT_EQ(delay, connection_.GetBlackholeDetectorAlarm()->deadline() -
clock_.ApproximateNow());
clock_.AdvanceTime(delay);
EXPECT_CALL(visitor_, OnPathDegrading()).Times(1);
connection_.PathDegradingTimeout();
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_TRUE(connection_.IsPathDegrading());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
EXPECT_TRUE(connection_.IsPathDegrading());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(visitor_, OnForwardProgressMadeAfterPathDegrading()).Times(1);
frame = InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
ProcessAckPacket(&frame);
EXPECT_EQ(1,
connection_.GetStats().num_forward_progress_after_path_degrading);
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
}
TEST_P(QuicConnectionTest, NoPathDegradingOnServer) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
const char data[] = "data";
connection_.SendStreamDataWithString(1, data, 0, NO_FIN);
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1u), QuicPacketNumber(2u)}});
ProcessAckPacket(&frame);
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
}
TEST_P(QuicConnectionTest, NoPathDegradingAfterSendingAck) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(1);
SendAckPacketToPeer();
EXPECT_FALSE(connection_.sent_packet_manager().unacked_packets().empty());
EXPECT_FALSE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
}
TEST_P(QuicConnectionTest, MultipleCallsToCloseConnection) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(1);
connection_.CloseConnection(QUIC_NO_ERROR, "no reason",
ConnectionCloseBehavior::SILENT_CLOSE);
connection_.CloseConnection(QUIC_NO_ERROR, "no reason",
ConnectionCloseBehavior::SILENT_CLOSE);
}
TEST_P(QuicConnectionTest, ServerReceivesChloOnNonCryptoStream) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
QuicConnectionPeer::SetAddressValidated(&connection_);
CryptoHandshakeMessage message;
CryptoFramer framer;
message.set_tag(kCHLO);
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
frame1_.stream_id = 10;
frame1_.data_buffer = data->data();
frame1_.data_length = data->length();
if (version().handshake_protocol == PROTOCOL_TLS1_3) {
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
}
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
ForceProcessFramePacket(QuicFrame(frame1_));
if (VersionHasIetfQuicFrames(version().transport_version)) {
TestConnectionCloseQuicErrorCode(IETF_QUIC_PROTOCOL_VIOLATION);
} else {
TestConnectionCloseQuicErrorCode(QUIC_MAYBE_CORRUPTED_MEMORY);
}
}
TEST_P(QuicConnectionTest, ClientReceivesRejOnNonCryptoStream) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
CryptoHandshakeMessage message;
CryptoFramer framer;
message.set_tag(kREJ);
std::unique_ptr<QuicData> data = framer.ConstructHandshakeMessage(message);
frame1_.stream_id = 10;
frame1_.data_buffer = data->data();
frame1_.data_length = data->length();
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
ForceProcessFramePacket(QuicFrame(frame1_));
if (VersionHasIetfQuicFrames(version().transport_version)) {
TestConnectionCloseQuicErrorCode(IETF_QUIC_PROTOCOL_VIOLATION);
} else {
TestConnectionCloseQuicErrorCode(QUIC_MAYBE_CORRUPTED_MEMORY);
}
}
TEST_P(QuicConnectionTest, CloseConnectionOnPacketTooLarge) {
SimulateNextPacketTooLarge();
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
TestConnectionCloseQuicErrorCode(QUIC_PACKET_WRITE_ERROR);
}
TEST_P(QuicConnectionTest, AlwaysGetPacketTooLarge) {
AlwaysGetPacketTooLarge();
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
TestConnectionCloseQuicErrorCode(QUIC_PACKET_WRITE_ERROR);
}
TEST_P(QuicConnectionTest, CloseConnectionOnQueuedWriteError) {
BlockOnNextWrite();
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
AlwaysGetPacketTooLarge();
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
TestConnectionCloseQuicErrorCode(QUIC_PACKET_WRITE_ERROR);
}
TEST_P(QuicConnectionTest, SendDataAndBecomeApplicationLimited) {
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(1);
{
InSequence seq;
EXPECT_CALL(visitor_, WillingAndAbleToWrite()).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
EXPECT_CALL(visitor_, WillingAndAbleToWrite())
.WillRepeatedly(Return(false));
}
connection_.SendStreamData3();
}
TEST_P(QuicConnectionTest, NotBecomeApplicationLimitedIfMoreDataAvailable) {
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(0);
{
InSequence seq;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
EXPECT_CALL(visitor_, WillingAndAbleToWrite()).WillRepeatedly(Return(true));
}
connection_.SendStreamData3();
}
TEST_P(QuicConnectionTest, NotBecomeApplicationLimitedDueToWriteBlock) {
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(0);
EXPECT_CALL(visitor_, WillingAndAbleToWrite()).WillRepeatedly(Return(true));
BlockOnNextWrite();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamData3();
writer_->SetWritable();
CongestionBlockWrites();
EXPECT_CALL(visitor_, WillingAndAbleToWrite()).WillRepeatedly(Return(false));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(1);
connection_.OnCanWrite();
}
TEST_P(QuicConnectionTest, DoNotForceSendingAckOnPacketTooLarge) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(1);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.GetAckAlarm()->Fire();
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
SimulateNextPacketTooLarge();
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_EQ(1u, writer_->connection_close_frames().size());
EXPECT_TRUE(writer_->ack_frames().empty());
if (writer_->padding_frames().empty()) {
EXPECT_EQ(1u, writer_->frame_count());
} else {
EXPECT_EQ(2u, writer_->frame_count());
}
TestConnectionCloseQuicErrorCode(QUIC_PACKET_WRITE_ERROR);
}
TEST_P(QuicConnectionTest, CloseConnectionAllLevels) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
const QuicErrorCode kQuicErrorCode = QUIC_INTERNAL_ERROR;
connection_.CloseConnection(
kQuicErrorCode, "Some random error message",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_EQ(2u, QuicConnectionPeer::GetNumEncryptionLevels(&connection_));
TestConnectionCloseQuicErrorCode(kQuicErrorCode);
EXPECT_EQ(1u, writer_->connection_close_frames().size());
if (!connection_.version().CanSendCoalescedPackets()) {
EXPECT_EQ(QuicConnectionPeer::GetNumEncryptionLevels(&connection_),
writer_->connection_close_packets());
EXPECT_EQ(QuicConnectionPeer::GetNumEncryptionLevels(&connection_),
writer_->packets_write_attempts());
return;
}
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->connection_close_packets());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
auto packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(1u, writer_->connection_close_packets());
ASSERT_TRUE(writer_->coalesced_packet() == nullptr);
}
TEST_P(QuicConnectionTest, CloseConnectionOneLevel) {
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
const QuicErrorCode kQuicErrorCode = QUIC_INTERNAL_ERROR;
connection_.CloseConnection(
kQuicErrorCode, "Some random error message",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_EQ(2u, QuicConnectionPeer::GetNumEncryptionLevels(&connection_));
TestConnectionCloseQuicErrorCode(kQuicErrorCode);
EXPECT_EQ(1u, writer_->connection_close_frames().size());
EXPECT_EQ(1u, writer_->connection_close_packets());
EXPECT_EQ(1u, writer_->packets_write_attempts());
ASSERT_TRUE(writer_->coalesced_packet() == nullptr);
}
TEST_P(QuicConnectionTest, DoNotPadServerInitialConnectionClose) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
if (version().handshake_protocol == PROTOCOL_TLS1_3) {
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
}
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
const QuicErrorCode kQuicErrorCode = QUIC_INTERNAL_ERROR;
connection_.CloseConnection(
kQuicErrorCode, "Some random error message",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_EQ(2u, QuicConnectionPeer::GetNumEncryptionLevels(&connection_));
TestConnectionCloseQuicErrorCode(kQuicErrorCode);
EXPECT_EQ(1u, writer_->connection_close_frames().size());
EXPECT_TRUE(writer_->padding_frames().empty());
EXPECT_EQ(ENCRYPTION_INITIAL, writer_->framer()->last_decrypted_level());
}
TEST_P(QuicConnectionTest, FailedToWriteHandshakePacket) {
SimulateNextPacketTooLarge();
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
connection_.SendCryptoStreamData();
TestConnectionCloseQuicErrorCode(QUIC_PACKET_WRITE_ERROR);
}
TEST_P(QuicConnectionTest, MaxPacingRate) {
EXPECT_EQ(0, connection_.MaxPacingRate().ToBytesPerSecond());
connection_.SetMaxPacingRate(QuicBandwidth::FromBytesPerSecond(100));
EXPECT_EQ(100, connection_.MaxPacingRate().ToBytesPerSecond());
}
TEST_P(QuicConnectionTest, ClientAlwaysSendConnectionId) {
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_EQ(CONNECTION_ID_PRESENT,
writer_->last_packet_header().destination_connection_id_included);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
QuicConfigPeer::SetReceivedBytesForConnectionId(&config, 0);
connection_.SetFromConfig(config);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(3, "bar", 3, NO_FIN);
EXPECT_EQ(CONNECTION_ID_PRESENT,
writer_->last_packet_header().destination_connection_id_included);
}
TEST_P(QuicConnectionTest, PingAfterLastRetransmittablePacketAcked) {
const QuicTime::Delta retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(50);
connection_.set_initial_retransmittable_on_wire_timeout(
retransmittable_on_wire_timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
QuicTime::Delta ping_delay = QuicTime::Delta::FromSeconds(kPingTimeoutSecs);
EXPECT_EQ(ping_delay,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(ping_delay - QuicTime::Delta::FromMilliseconds(10),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
frame = InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
QuicTime prev_deadline = connection_.GetPingAlarm()->deadline();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
frame = InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(prev_deadline, connection_.GetPingAlarm()->deadline());
prev_deadline = connection_.GetPingAlarm()->deadline();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
ProcessPacket(4);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(prev_deadline, connection_.GetPingAlarm()->deadline());
connection_.GetPingAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 2u, writer_->frame_count());
ASSERT_EQ(1u, writer_->ping_frames().size());
}
TEST_P(QuicConnectionTest, NoPingIfRetransmittablePacketSent) {
const QuicTime::Delta retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(50);
connection_.set_initial_retransmittable_on_wire_timeout(
retransmittable_on_wire_timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
QuicTime::Delta ping_delay = QuicTime::Delta::FromSeconds(kPingTimeoutSecs);
EXPECT_EQ(ping_delay,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
frame = InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
connection_.GetPingAlarm()->Fire();
size_t padding_frame_count = writer_->padding_frames().size();
EXPECT_EQ(padding_frame_count + 1u, writer_->frame_count());
ASSERT_EQ(1u, writer_->ping_frames().size());
}
TEST_P(QuicConnectionTest, BackOffRetransmittableOnWireTimeout) {
int max_aggressive_retransmittable_on_wire_ping_count = 5;
SetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count,
max_aggressive_retransmittable_on_wire_ping_count);
const QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
connection_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
const char data[] = "data";
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
connection_.SendStreamDataWithString(1, data, 0, NO_FIN);
EXPECT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
for (int i = 0; i <= max_aggressive_retransmittable_on_wire_ping_count; i++) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
}
QuicTime::Delta retransmittable_on_wire_timeout =
initial_retransmittable_on_wire_timeout;
while (retransmittable_on_wire_timeout * 2 <
QuicTime::Delta::FromSeconds(kPingTimeoutSecs)) {
retransmittable_on_wire_timeout = retransmittable_on_wire_timeout * 2;
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
clock_.AdvanceTime(retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
}
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs) -
QuicTime::Delta::FromMilliseconds(5),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
TEST_P(QuicConnectionTest, ResetBackOffRetransmitableOnWireTimeout) {
int max_aggressive_retransmittable_on_wire_ping_count = 3;
SetQuicFlag(quic_max_aggressive_retransmittable_on_wire_ping_count, 3);
const QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
connection_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
const char data[] = "data";
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
connection_.SendStreamDataWithString(1, data, 0, NO_FIN);
EXPECT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
{
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(2)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
writer_->Reset();
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
{
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacket(peer_creator_.packet_number() + 1);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_,
peer_creator_.packet_number() + 1);
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
for (int i = 0; i < max_aggressive_retransmittable_on_wire_ping_count; i++) {
const QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
}
{
const QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout * 2,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
writer_->Reset();
clock_.AdvanceTime(2 * initial_retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
{
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
ProcessDataPacket(peer_creator_.packet_number() + 1);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_,
peer_creator_.packet_number() + 1);
const QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
}
TEST_P(QuicConnectionTest, RetransmittableOnWirePingLimit) {
static constexpr int kMaxRetransmittableOnWirePingCount = 3;
SetQuicFlag(quic_max_retransmittable_on_wire_ping_count,
kMaxRetransmittableOnWirePingCount);
static constexpr QuicTime::Delta initial_retransmittable_on_wire_timeout =
QuicTime::Delta::FromMilliseconds(200);
static constexpr QuicTime::Delta short_delay =
QuicTime::Delta::FromMilliseconds(5);
ASSERT_LT(short_delay * 10, initial_retransmittable_on_wire_timeout);
connection_.set_initial_retransmittable_on_wire_timeout(
initial_retransmittable_on_wire_timeout);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
const char data[] = "data";
clock_.AdvanceTime(short_delay);
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
connection_.SendStreamDataWithString(1, data, 0, NO_FIN);
EXPECT_TRUE(connection_.sent_packet_manager().HasInFlightPackets());
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _))
.Times(AnyNumber());
for (int i = 0; i <= kMaxRetransmittableOnWirePingCount; i++) {
clock_.AdvanceTime(short_delay);
QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(initial_retransmittable_on_wire_timeout,
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
writer_->Reset();
clock_.AdvanceTime(initial_retransmittable_on_wire_timeout);
connection_.GetPingAlarm()->Fire();
}
QuicPacketNumber ack_num = creator_->packet_number();
QuicAckFrame frame = InitAckFrame(
{{QuicPacketNumber(ack_num), QuicPacketNumber(ack_num + 1)}});
ProcessAckPacket(&frame);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(kPingTimeoutSecs),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
}
TEST_P(QuicConnectionTest, ValidStatelessResetToken) {
const StatelessResetToken kTestToken{0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 1};
const StatelessResetToken kWrongTestToken{0, 1, 0, 1, 0, 1, 0, 1,
0, 1, 0, 1, 0, 1, 0, 2};
QuicConfig config;
EXPECT_FALSE(connection_.IsValidStatelessResetToken(kTestToken));
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(2);
QuicConfigPeer::SetReceivedStatelessResetToken(&config, kTestToken);
connection_.SetFromConfig(config);
EXPECT_FALSE(connection_.IsValidStatelessResetToken(kWrongTestToken));
QuicConfigPeer::SetReceivedStatelessResetToken(&config, kTestToken);
connection_.SetFromConfig(config);
EXPECT_TRUE(connection_.IsValidStatelessResetToken(kTestToken));
}
TEST_P(QuicConnectionTest, WriteBlockedWithInvalidAck) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
BlockOnNextWrite();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(5, "foo", 0, FIN);
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessAckPacket(1, &frame);
EXPECT_EQ(0, connection_close_frame_count_);
}
TEST_P(QuicConnectionTest, SendMessage) {
if (connection_.version().UsesTls()) {
QuicConfig config;
QuicConfigPeer::SetReceivedMaxDatagramFrameSize(
&config, kMaxAcceptedDatagramFrameSize);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
}
std::string message(connection_.GetCurrentLargestMessagePayload() * 2, 'a');
quiche::QuicheMemSlice slice;
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendStreamData3();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
slice = MemSliceFromString(absl::string_view(
message.data(), connection_.GetCurrentLargestMessagePayload()));
EXPECT_EQ(MESSAGE_STATUS_SUCCESS,
connection_.SendMessage(1, absl::MakeSpan(&slice, 1), false));
}
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillOnce(Return(false));
slice = MemSliceFromString("message");
EXPECT_EQ(MESSAGE_STATUS_BLOCKED,
connection_.SendMessage(2, absl::MakeSpan(&slice, 1), false));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
slice = MemSliceFromString(absl::string_view(
message.data(), connection_.GetCurrentLargestMessagePayload() + 1));
EXPECT_EQ(MESSAGE_STATUS_TOO_LARGE,
connection_.SendMessage(3, absl::MakeSpan(&slice, 1), false));
}
TEST_P(QuicConnectionTest, GetCurrentLargestMessagePayload) {
QuicPacketLength expected_largest_payload = 1215;
if (connection_.version().SendsVariableLengthPacketNumberInLongHeader()) {
expected_largest_payload += 3;
}
if (connection_.version().HasLongHeaderLengths()) {
expected_largest_payload -= 2;
}
if (connection_.version().HasLengthPrefixedConnectionIds()) {
expected_largest_payload -= 1;
}
if (connection_.version().UsesTls()) {
EXPECT_EQ(connection_.GetCurrentLargestMessagePayload(), 0);
QuicConfig config;
QuicConfigPeer::SetReceivedMaxDatagramFrameSize(
&config, kMaxAcceptedDatagramFrameSize);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_EQ(connection_.GetCurrentLargestMessagePayload(),
expected_largest_payload);
} else {
EXPECT_EQ(connection_.GetCurrentLargestMessagePayload(),
expected_largest_payload);
}
}
TEST_P(QuicConnectionTest, GetGuaranteedLargestMessagePayload) {
QuicPacketLength expected_largest_payload = 1215;
if (connection_.version().HasLongHeaderLengths()) {
expected_largest_payload -= 2;
}
if (connection_.version().HasLengthPrefixedConnectionIds()) {
expected_largest_payload -= 1;
}
if (connection_.version().UsesTls()) {
EXPECT_EQ(connection_.GetGuaranteedLargestMessagePayload(), 0);
QuicConfig config;
QuicConfigPeer::SetReceivedMaxDatagramFrameSize(
&config, kMaxAcceptedDatagramFrameSize);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_EQ(connection_.GetGuaranteedLargestMessagePayload(),
expected_largest_payload);
} else {
EXPECT_EQ(connection_.GetGuaranteedLargestMessagePayload(),
expected_largest_payload);
}
}
TEST_P(QuicConnectionTest, LimitedLargestMessagePayload) {
if (!connection_.version().UsesTls()) {
return;
}
constexpr QuicPacketLength kFrameSizeLimit = 1000;
constexpr QuicPacketLength kPayloadSizeLimit =
kFrameSizeLimit - kQuicFrameTypeSize;
EXPECT_EQ(connection_.GetCurrentLargestMessagePayload(), 0);
EXPECT_EQ(connection_.GetGuaranteedLargestMessagePayload(), 0);
QuicConfig config;
QuicConfigPeer::SetReceivedMaxDatagramFrameSize(&config, kFrameSizeLimit);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_EQ(connection_.GetCurrentLargestMessagePayload(), kPayloadSizeLimit);
EXPECT_EQ(connection_.GetGuaranteedLargestMessagePayload(),
kPayloadSizeLimit);
}
TEST_P(QuicConnectionTest, ServerResponseToPathChallenge) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicConnectionPeer::SetAddressValidated(&connection_);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendConnectivityProbingPacket(writer_.get(),
connection_.peer_address());
ASSERT_GE(writer_->path_challenge_frames().size(), 1u);
QuicPathFrameBuffer challenge_data =
writer_->path_challenge_frames().front().data_buffer;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_TRUE(connection_.OnPathChallengeFrame(
writer_->path_challenge_frames().front()));
EXPECT_TRUE(connection_.OnPaddingFrame(writer_->padding_frames().front()));
creator_->FlushCurrentPacket();
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(0, memcmp(&challenge_data,
&(writer_->path_response_frames().front().data_buffer),
sizeof(challenge_data)));
}
TEST_P(QuicConnectionTest, ClientResponseToPathChallengeOnDefaulSocket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendConnectivityProbingPacket(writer_.get(),
connection_.peer_address());
ASSERT_GE(writer_->path_challenge_frames().size(), 1u);
QuicPathFrameBuffer challenge_data =
writer_->path_challenge_frames().front().data_buffer;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_TRUE(connection_.OnPathChallengeFrame(
writer_->path_challenge_frames().front()));
EXPECT_TRUE(connection_.OnPaddingFrame(writer_->padding_frames().front()));
creator_->FlushCurrentPacket();
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(0, memcmp(&challenge_data,
&(writer_->path_response_frames().front().data_buffer),
sizeof(challenge_data)));
}
TEST_P(QuicConnectionTest, ClientResponseToPathChallengeOnAlternativeSocket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
QuicSocketAddress kNewSelfAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}));
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(2u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_response_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}))
.WillRepeatedly(DoDefault());
;
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
ProcessReceivedPacket(kNewSelfAddress, kPeerAddress, *received);
QuicSocketAddress kNewerSelfAddress(QuicIpAddress::Loopback6(),
34567);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0u);
ProcessReceivedPacket(kNewerSelfAddress, kPeerAddress, *received);
}
TEST_P(QuicConnectionTest,
RestartPathDegradingDetectionAfterMigrationWithProbe) {
if (!version().HasIetfQuicFrames() &&
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
PathProbeTestInit(Perspective::IS_CLIENT);
const char data[] = "data";
size_t data_size = strlen(data);
QuicStreamOffset offset = 0;
connection_.SendStreamDataWithString(1, data, offset, NO_FIN);
offset += data_size;
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
EXPECT_FALSE(connection_.IsPathDegrading());
QuicTime ddl = connection_.GetBlackholeDetectorAlarm()->deadline();
clock_.AdvanceTime(ddl - clock_.ApproximateNow());
EXPECT_CALL(visitor_, OnPathDegrading()).Times(1);
connection_.PathDegradingTimeout();
EXPECT_TRUE(connection_.IsPathDegrading());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
if (!GetParam().version.HasIetfQuicFrames()) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
TestPacketWriter probing_writer(version(), &clock_, Perspective::IS_CLIENT);
connection_.SendConnectivityProbingPacket(&probing_writer,
connection_.peer_address());
EXPECT_FALSE(connection_.PathDegradingDetectionInProgress());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(20));
EXPECT_CALL(visitor_,
OnPacketReceived(_, _, true))
.Times(1);
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
ProcessReceivedPacket(kNewSelfAddress, kPeerAddress, *received);
EXPECT_EQ(num_probing_received +
(GetQuicReloadableFlag(quic_ignore_gquic_probing) ? 0u : 1u),
connection_.GetStats().num_connectivity_probing_received);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_TRUE(connection_.IsPathDegrading());
}
EXPECT_CALL(visitor_, OnForwardProgressMadeAfterPathDegrading()).Times(1);
connection_.OnSuccessfulMigration( true);
EXPECT_FALSE(connection_.IsPathDegrading());
EXPECT_TRUE(connection_.PathDegradingDetectionInProgress());
}
TEST_P(QuicConnectionTest, ClientsResetCwndAfterConnectionMigration) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
PathProbeTestInit(Perspective::IS_CLIENT);
EXPECT_EQ(kSelfAddress, connection_.self_address());
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
QuicTime::Delta default_init_rtt = rtt_stats->initial_rtt();
rtt_stats->set_initial_rtt(default_init_rtt * 2);
EXPECT_EQ(2 * default_init_rtt, rtt_stats->initial_rtt());
QuicSentPacketManagerPeer::SetConsecutivePtoCount(manager_, 1);
EXPECT_EQ(1u, manager_->GetConsecutivePtoCount());
const SendAlgorithmInterface* send_algorithm = manager_->GetSendAlgorithm();
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
connection_.MigratePath(kNewSelfAddress, connection_.peer_address(),
&new_writer, false);
EXPECT_EQ(default_init_rtt, manager_->GetRttStats()->initial_rtt());
EXPECT_EQ(0u, manager_->GetConsecutivePtoCount());
EXPECT_NE(send_algorithm, manager_->GetSendAlgorithm());
}
TEST_P(QuicConnectionTest, DoNotScheduleSpuriousAckAlarm) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AtLeast(1));
writer_->SetWriteBlocked();
ProcessPacket(1);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.GetAckAlarm()->Fire();
writer_->SetWritable();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessPacket(2);
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, DisablePacingOffloadConnectionOptions) {
EXPECT_FALSE(QuicConnectionPeer::SupportsReleaseTime(&connection_));
writer_->set_supports_release_time(true);
QuicConfig config;
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_TRUE(QuicConnectionPeer::SupportsReleaseTime(&connection_));
QuicTagVector connection_options;
connection_options.push_back(kNPCO);
config.SetConnectionOptionsToSend(connection_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_FALSE(QuicConnectionPeer::SupportsReleaseTime(&connection_));
}
TEST_P(QuicConnectionTest, OrphanPathResponse) {
QuicPathFrameBuffer data = {{0, 1, 2, 3, 4, 5, 6, 7}};
QuicPathResponseFrame frame(99, data);
EXPECT_TRUE(connection_.OnPathResponseFrame(frame));
EXPECT_NE(QuicConnection::FIRST_FRAME_IS_PING,
QuicConnectionPeer::GetCurrentPacketContent(&connection_));
}
TEST_P(QuicConnectionTest, AcceptPacketNumberZero) {
if (!VersionHasIetfQuicFrames(version().transport_version)) {
return;
}
QuicFramerPeer::SetFirstSendingPacketNumber(writer_->framer()->framer(), 0);
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
ProcessPacket(0);
EXPECT_EQ(QuicPacketNumber(0), LargestAcked(connection_.ack_frame()));
EXPECT_EQ(1u, connection_.ack_frame().packets.NumIntervals());
ProcessPacket(1);
EXPECT_EQ(QuicPacketNumber(1), LargestAcked(connection_.ack_frame()));
EXPECT_EQ(1u, connection_.ack_frame().packets.NumIntervals());
ProcessPacket(2);
EXPECT_EQ(QuicPacketNumber(2), LargestAcked(connection_.ack_frame()));
EXPECT_EQ(1u, connection_.ack_frame().packets.NumIntervals());
}
TEST_P(QuicConnectionTest, MultiplePacketNumberSpacesBasicSending) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.SendCryptoStreamData();
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
QuicAckFrame frame1 = InitAckFrame(1);
ProcessFramePacketAtLevel(30, QuicFrame(&frame1), ENCRYPTION_INITIAL);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(4);
connection_.SendApplicationDataAtLevel(ENCRYPTION_ZERO_RTT, 5, "data", 0,
NO_FIN);
connection_.SendApplicationDataAtLevel(ENCRYPTION_ZERO_RTT, 5, "data", 4,
NO_FIN);
connection_.SendApplicationDataAtLevel(ENCRYPTION_FORWARD_SECURE, 5, "data",
8, NO_FIN);
connection_.SendApplicationDataAtLevel(ENCRYPTION_FORWARD_SECURE, 5, "data",
12, FIN);
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
QuicAckFrame frame2 =
InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)},
{QuicPacketNumber(4), QuicPacketNumber(6)}});
ProcessFramePacketAtLevel(30, QuicFrame(&frame2), ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, PeerAcksPacketsInWrongPacketNumberSpace) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x01));
connection_.SendCryptoStreamData();
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
QuicAckFrame frame1 = InitAckFrame(1);
ProcessFramePacketAtLevel(30, QuicFrame(&frame1), ENCRYPTION_INITIAL);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
connection_.SendApplicationDataAtLevel(ENCRYPTION_ZERO_RTT, 5, "data", 0,
NO_FIN);
connection_.SendApplicationDataAtLevel(ENCRYPTION_ZERO_RTT, 5, "data", 4,
NO_FIN);
QuicAckFrame invalid_ack =
InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(4)}});
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
ProcessFramePacketAtLevel(300, QuicFrame(&invalid_ack), ENCRYPTION_INITIAL);
TestConnectionCloseQuicErrorCode(QUIC_INVALID_ACK_DATA);
}
TEST_P(QuicConnectionTest, MultiplePacketNumberSpacesBasicReceiving) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
ProcessDataPacketAtLevel(1000, false, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SendApplicationDataAtLevel(ENCRYPTION_FORWARD_SECURE, 5, "data",
0, NO_FIN);
EXPECT_EQ(2u, writer_->frame_count());
EXPECT_TRUE(connection_.HasPendingAcks());
ProcessDataPacketAtLevel(1001, false, ENCRYPTION_FORWARD_SECURE);
clock_.AdvanceTime(DefaultRetransmissionTime());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
connection_.GetAckAlarm()->Fire();
EXPECT_FALSE(connection_.HasPendingAcks());
ProcessDataPacketAtLevel(1002, false, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessDataPacket(1003);
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, CancelAckAlarmOnWriteBlocked) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
ProcessDataPacketAtLevel(1000, false, ENCRYPTION_ZERO_RTT);
EXPECT_TRUE(connection_.HasPendingAcks());
writer_->SetWriteBlocked();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AnyNumber());
clock_.AdvanceTime(DefaultDelayedAckTime());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.GetAckAlarm()->Fire();
EXPECT_FALSE(connection_.HasPendingAcks());
writer_->SetWritable();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
connection_.OnCanWrite();
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, ValidClientConnectionId) {
if (!framer_.version().SupportsClientConnectionIds()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
SetClientConnectionId(TestConnectionId(0x33));
QuicPacketHeader header = ConstructPacketHeader(1, ENCRYPTION_FORWARD_SECURE);
header.destination_connection_id = TestConnectionId(0x33);
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id_included = CONNECTION_ID_ABSENT;
QuicFrames frames;
QuicPingFrame ping_frame;
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(ping_frame));
frames.push_back(QuicFrame(padding_frame));
std::unique_ptr<QuicPacket> packet =
BuildUnsizedDataPacket(&peer_framer_, header, frames);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(1), *packet, buffer,
kMaxOutgoingPacketSize);
QuicReceivedPacket received_packet(buffer, encrypted_length, clock_.Now(),
false);
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
ProcessReceivedPacket(kSelfAddress, kPeerAddress, received_packet);
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
}
TEST_P(QuicConnectionTest, InvalidClientConnectionId) {
if (!framer_.version().SupportsClientConnectionIds()) {
return;
}
SetClientConnectionId(TestConnectionId(0x33));
QuicPacketHeader header = ConstructPacketHeader(1, ENCRYPTION_FORWARD_SECURE);
header.destination_connection_id = TestConnectionId(0xbad);
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.source_connection_id_included = CONNECTION_ID_ABSENT;
QuicFrames frames;
QuicPingFrame ping_frame;
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(ping_frame));
frames.push_back(QuicFrame(padding_frame));
std::unique_ptr<QuicPacket> packet =
BuildUnsizedDataPacket(&peer_framer_, header, frames);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(1), *packet, buffer,
kMaxOutgoingPacketSize);
QuicReceivedPacket received_packet(buffer, encrypted_length, clock_.Now(),
false);
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
ProcessReceivedPacket(kSelfAddress, kPeerAddress, received_packet);
EXPECT_EQ(1u, connection_.GetStats().packets_dropped);
}
TEST_P(QuicConnectionTest, UpdateClientConnectionIdFromFirstPacket) {
if (!framer_.version().SupportsClientConnectionIds()) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicPacketHeader header = ConstructPacketHeader(1, ENCRYPTION_INITIAL);
header.source_connection_id = TestConnectionId(0x33);
header.source_connection_id_included = CONNECTION_ID_PRESENT;
QuicFrames frames;
QuicPingFrame ping_frame;
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(ping_frame));
frames.push_back(QuicFrame(padding_frame));
std::unique_ptr<QuicPacket> packet =
BuildUnsizedDataPacket(&peer_framer_, header, frames);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(ENCRYPTION_INITIAL, QuicPacketNumber(1),
*packet, buffer, kMaxOutgoingPacketSize);
QuicReceivedPacket received_packet(buffer, encrypted_length, clock_.Now(),
false);
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
ProcessReceivedPacket(kSelfAddress, kPeerAddress, received_packet);
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
EXPECT_EQ(TestConnectionId(0x33), connection_.client_connection_id());
}
void QuicConnectionTest::TestReplaceConnectionIdFromInitial() {
if (!framer_.version().AllowsVariableLengthConnectionIds()) {
return;
}
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
EXPECT_NE(TestConnectionId(0x33), connection_.connection_id());
{
QuicPacketHeader header = ConstructPacketHeader(1, ENCRYPTION_INITIAL);
header.source_connection_id = TestConnectionId(0x33);
header.source_connection_id_included = CONNECTION_ID_PRESENT;
QuicFrames frames;
QuicPingFrame ping_frame;
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(ping_frame));
frames.push_back(QuicFrame(padding_frame));
std::unique_ptr<QuicPacket> packet =
BuildUnsizedDataPacket(&peer_framer_, header, frames);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(ENCRYPTION_INITIAL, QuicPacketNumber(1),
*packet, buffer, kMaxOutgoingPacketSize);
QuicReceivedPacket received_packet(buffer, encrypted_length, clock_.Now(),
false);
ProcessReceivedPacket(kSelfAddress, kPeerAddress, received_packet);
}
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(0u, connection_.GetStats().packets_dropped);
EXPECT_EQ(TestConnectionId(0x33), connection_.connection_id());
{
QuicPacketHeader header = ConstructPacketHeader(2, ENCRYPTION_INITIAL);
header.source_connection_id = TestConnectionId(0x66);
header.source_connection_id_included = CONNECTION_ID_PRESENT;
QuicFrames frames;
QuicPingFrame ping_frame;
QuicPaddingFrame padding_frame;
frames.push_back(QuicFrame(ping_frame));
frames.push_back(QuicFrame(padding_frame));
std::unique_ptr<QuicPacket> packet =
BuildUnsizedDataPacket(&peer_framer_, header, frames);
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length =
peer_framer_.EncryptPayload(ENCRYPTION_INITIAL, QuicPacketNumber(2),
*packet, buffer, kMaxOutgoingPacketSize);
QuicReceivedPacket received_packet(buffer, encrypted_length, clock_.Now(),
false);
ProcessReceivedPacket(kSelfAddress, kPeerAddress, received_packet);
}
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(1u, connection_.GetStats().packets_dropped);
EXPECT_EQ(TestConnectionId(0x33), connection_.connection_id());
}
TEST_P(QuicConnectionTest, ReplaceServerConnectionIdFromInitial) {
TestReplaceConnectionIdFromInitial();
}
TEST_P(QuicConnectionTest, ReplaceServerConnectionIdFromRetryAndInitial) {
TestClientRetryHandling(false,
false,
false,
false,
false);
peer_framer_.SetInitialObfuscators(connection_.connection_id());
TestReplaceConnectionIdFromInitial();
}
TEST_P(QuicConnectionTest, CheckConnectedBeforeFlush) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
const QuicErrorCode kErrorCode = QUIC_INTERNAL_ERROR;
std::unique_ptr<QuicConnectionCloseFrame> connection_close_frame(
new QuicConnectionCloseFrame(connection_.transport_version(), kErrorCode,
NO_IETF_QUIC_ERROR, "",
0));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
}
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
ProcessFramePacketWithAddresses(QuicFrame(connection_close_frame.release()),
kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, CoalescedPacket) {
if (!QuicVersionHasLongHeaderLengths(connection_.transport_version())) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_TRUE(connection_.connected());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(3);
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(3);
}
uint64_t packet_numbers[3] = {1, 2, 3};
EncryptionLevel encryption_levels[3] = {
ENCRYPTION_INITIAL, ENCRYPTION_INITIAL, ENCRYPTION_FORWARD_SECURE};
char buffer[kMaxOutgoingPacketSize] = {};
size_t total_encrypted_length = 0;
for (int i = 0; i < 3; i++) {
QuicPacketHeader header =
ConstructPacketHeader(packet_numbers[i], encryption_levels[i]);
QuicFrames frames;
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
frames.push_back(QuicFrame(&crypto_frame_));
} else {
frames.push_back(QuicFrame(frame1_));
}
std::unique_ptr<QuicPacket> packet = ConstructPacket(header, frames);
peer_creator_.set_encryption_level(encryption_levels[i]);
size_t encrypted_length = peer_framer_.EncryptPayload(
encryption_levels[i], QuicPacketNumber(packet_numbers[i]), *packet,
buffer + total_encrypted_length,
sizeof(buffer) - total_encrypted_length);
EXPECT_GT(encrypted_length, 0u);
total_encrypted_length += encrypted_length;
}
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, total_encrypted_length, clock_.Now(), false));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, CoalescedPacketThatSavesFrames) {
if (!QuicVersionHasLongHeaderLengths(connection_.transport_version())) {
return;
}
if (connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_TRUE(connection_.connected());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.Times(3)
.WillRepeatedly([this](const QuicCryptoFrame& ) {
connection_.SendControlFrame(QuicFrame(QuicBlockedFrame(1, 3, 0)));
});
} else {
EXPECT_CALL(visitor_, OnStreamFrame(_))
.Times(3)
.WillRepeatedly([this](const QuicStreamFrame& ) {
connection_.SendControlFrame(QuicFrame(QuicBlockedFrame(1, 3, 0)));
});
}
uint64_t packet_numbers[3] = {1, 2, 3};
EncryptionLevel encryption_levels[3] = {
ENCRYPTION_INITIAL, ENCRYPTION_INITIAL, ENCRYPTION_FORWARD_SECURE};
char buffer[kMaxOutgoingPacketSize] = {};
size_t total_encrypted_length = 0;
for (int i = 0; i < 3; i++) {
QuicPacketHeader header =
ConstructPacketHeader(packet_numbers[i], encryption_levels[i]);
QuicFrames frames;
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
frames.push_back(QuicFrame(&crypto_frame_));
} else {
frames.push_back(QuicFrame(frame1_));
}
std::unique_ptr<QuicPacket> packet = ConstructPacket(header, frames);
peer_creator_.set_encryption_level(encryption_levels[i]);
size_t encrypted_length = peer_framer_.EncryptPayload(
encryption_levels[i], QuicPacketNumber(packet_numbers[i]), *packet,
buffer + total_encrypted_length,
sizeof(buffer) - total_encrypted_length);
EXPECT_GT(encrypted_length, 0u);
total_encrypted_length += encrypted_length;
}
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, total_encrypted_length, clock_.Now(), false));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
EXPECT_TRUE(connection_.connected());
SendAckPacketToPeer();
}
TEST_P(QuicConnectionTest, RtoAndWriteBlocked) {
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
QuicStreamId stream_id = 2;
QuicPacketNumber last_data_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_data_packet);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
writer_->SetWriteBlocked();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AtLeast(1));
EXPECT_CALL(visitor_, WillingAndAbleToWrite())
.WillRepeatedly(
Invoke(¬ifier_, &SimpleSessionNotifier::WillingToWrite));
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 3);
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, PtoAndWriteBlocked) {
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
QuicStreamId stream_id = 2;
QuicPacketNumber last_data_packet;
SendStreamDataToPeer(stream_id, "foo", 0, NO_FIN, &last_data_packet);
SendStreamDataToPeer(4, "foo", 0, NO_FIN, &last_data_packet);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
writer_->SetWriteBlocked();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AtLeast(1));
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(1u, connection_.NumQueuedPackets());
}
TEST_P(QuicConnectionTest, ProbeTimeout) {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k2PTO);
config.SetConnectionOptionsToSend(connection_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foooooo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "foooooo", 7, NO_FIN, &last_packet);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
SendRstStream(stream_id, QUIC_ERROR_PROCESSING_STREAM, 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0u, writer_->stream_frames().size());
EXPECT_EQ(1u, writer_->rst_stream_frames().size());
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, CloseConnectionAfter6ClientPTOs) {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k1PTO);
connection_options.push_back(k6PTO);
config.SetConnectionOptionsToSend(connection_options);
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2) ||
GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
}
connection_.OnHandshakeComplete();
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
for (int i = 0; i < 5; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_TRUE(connection_.connected());
}
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.PathDegradingTimeout();
EXPECT_EQ(5u, connection_.sent_packet_manager().GetConsecutivePtoCount());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
ASSERT_TRUE(connection_.BlackholeDetectionInProgress());
connection_.GetBlackholeDetectorAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_TOO_MANY_RTOS);
}
TEST_P(QuicConnectionTest, CloseConnectionAfter7ClientPTOs) {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k2PTO);
connection_options.push_back(k7PTO);
config.SetConnectionOptionsToSend(connection_options);
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2) ||
GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
}
connection_.OnHandshakeComplete();
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
for (int i = 0; i < 6; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_TRUE(connection_.connected());
}
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.PathDegradingTimeout();
EXPECT_EQ(6u, connection_.sent_packet_manager().GetConsecutivePtoCount());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
ASSERT_TRUE(connection_.BlackholeDetectionInProgress());
connection_.GetBlackholeDetectorAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_TOO_MANY_RTOS);
}
TEST_P(QuicConnectionTest, CloseConnectionAfter8ClientPTOs) {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k2PTO);
connection_options.push_back(k8PTO);
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
config.SetConnectionOptionsToSend(connection_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2) ||
GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
}
connection_.OnHandshakeComplete();
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
for (int i = 0; i < 7; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_TRUE(connection_.connected());
}
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.PathDegradingTimeout();
EXPECT_EQ(7u, connection_.sent_packet_manager().GetConsecutivePtoCount());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AtLeast(1));
ASSERT_TRUE(connection_.BlackholeDetectionInProgress());
connection_.GetBlackholeDetectorAlarm()->Fire();
EXPECT_FALSE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_TOO_MANY_RTOS);
}
TEST_P(QuicConnectionTest, DeprecateHandshakeMode) {
if (!connection_.version().SupportsAntiAmplificationLimit()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
connection_.SendCryptoStreamData();
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
QuicAckFrame frame1 = InitAckFrame(1);
ProcessFramePacketAtLevel(1, QuicFrame(&frame1), ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(0u, connection_.GetStats().pto_count);
EXPECT_EQ(0u, connection_.GetStats().crypto_retransmit_count);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(3), _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(1u, connection_.GetStats().pto_count);
EXPECT_EQ(1u, connection_.GetStats().crypto_retransmit_count);
EXPECT_EQ(1u, writer_->ping_frames().size());
}
TEST_P(QuicConnectionTest, AntiAmplificationLimit) {
if (!connection_.version().SupportsAntiAmplificationLimit() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_FALSE(connection_.CanWrite(HAS_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.CanWrite(NO_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
const size_t anti_amplification_factor =
GetQuicFlag(quic_anti_amplification_factor);
for (size_t i = 1; i < anti_amplification_factor; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoDataWithString("foo", i * 3);
EXPECT_EQ(i != anti_amplification_factor - 1,
connection_.GetRetransmissionAlarm()->IsSet());
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", anti_amplification_factor * 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
for (size_t i = anti_amplification_factor + 1;
i < anti_amplification_factor * 2; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoDataWithString("foo", i * 3);
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo",
2 * anti_amplification_factor * 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessPacket(3);
for (size_t i = 0; i < 100; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(3, "first", i * 0, NO_FIN);
}
}
TEST_P(QuicConnectionTest, 3AntiAmplificationLimit) {
if (!connection_.version().SupportsAntiAmplificationLimit() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
set_perspective(Perspective::IS_SERVER);
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k3AFF);
config.SetInitialReceivedConnectionOptions(connection_options);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(&config,
QuicConnectionId());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_FALSE(connection_.CanWrite(HAS_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.CanWrite(NO_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
const size_t anti_amplification_factor = 3;
for (size_t i = 1; i < anti_amplification_factor; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoDataWithString("foo", i * 3);
EXPECT_EQ(i != anti_amplification_factor - 1,
connection_.GetRetransmissionAlarm()->IsSet());
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", anti_amplification_factor * 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
for (size_t i = anti_amplification_factor + 1;
i < anti_amplification_factor * 2; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoDataWithString("foo", i * 3);
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo",
2 * anti_amplification_factor * 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessPacket(3);
for (size_t i = 0; i < 100; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(3, "first", i * 0, NO_FIN);
}
}
TEST_P(QuicConnectionTest, 10AntiAmplificationLimit) {
if (!connection_.version().SupportsAntiAmplificationLimit() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
set_perspective(Perspective::IS_SERVER);
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k10AF);
config.SetInitialReceivedConnectionOptions(connection_options);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(&config,
QuicConnectionId());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_FALSE(connection_.CanWrite(HAS_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.CanWrite(NO_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
const size_t anti_amplification_factor = 10;
for (size_t i = 1; i < anti_amplification_factor; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoDataWithString("foo", i * 3);
EXPECT_EQ(i != anti_amplification_factor - 1,
connection_.GetRetransmissionAlarm()->IsSet());
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", anti_amplification_factor * 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
for (size_t i = anti_amplification_factor + 1;
i < anti_amplification_factor * 2; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendCryptoDataWithString("foo", i * 3);
}
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo",
2 * anti_amplification_factor * 3);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessPacket(3);
for (size_t i = 0; i < 100; ++i) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.SendStreamDataWithString(3, "first", i * 0, NO_FIN);
}
}
TEST_P(QuicConnectionTest, AckPendingWithAmplificationLimited) {
if (!connection_.version().SupportsAntiAmplificationLimit()) {
return;
}
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(AnyNumber());
set_perspective(Perspective::IS_SERVER);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_TRUE(connection_.HasPendingAcks());
size_t i = 0;
while (connection_.CanWrite(HAS_RETRANSMITTABLE_DATA)) {
connection_.SendCryptoDataWithString(std::string(1024, 'a'), i * 1024,
ENCRYPTION_HANDSHAKE);
++i;
}
EXPECT_TRUE(connection_.HasPendingAcks());
clock_.AdvanceTime(connection_.GetAckAlarm()->deadline() - clock_.Now());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.GetAckAlarm()->Fire();
EXPECT_FALSE(connection_.HasPendingAcks());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
EXPECT_FALSE(writer_->ack_frames().empty());
}
TEST_P(QuicConnectionTest, ConnectionCloseFrameType) {
if (!VersionHasIetfQuicFrames(version().transport_version)) {
return;
}
const QuicErrorCode kQuicErrorCode = IETF_QUIC_PROTOCOL_VIOLATION;
const uint64_t kTransportCloseFrameType = 9999u;
QuicFramerPeer::set_current_received_frame_type(
QuicConnectionPeer::GetFramer(&connection_), kTransportCloseFrameType);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(
kQuicErrorCode, "Some random error message",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
const std::vector<QuicConnectionCloseFrame>& connection_close_frames =
writer_->connection_close_frames();
ASSERT_EQ(1u, connection_close_frames.size());
EXPECT_EQ(IETF_QUIC_TRANSPORT_CONNECTION_CLOSE,
connection_close_frames[0].close_type);
EXPECT_EQ(kQuicErrorCode, connection_close_frames[0].quic_error_code);
EXPECT_EQ(kTransportCloseFrameType,
connection_close_frames[0].transport_close_frame_type);
}
TEST_P(QuicConnectionTest, PtoSkipsPacketNumber) {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k1PTO);
connection_options.push_back(kPTOS);
config.SetConnectionOptionsToSend(connection_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
QuicStreamId stream_id = 2;
QuicPacketNumber last_packet;
SendStreamDataToPeer(stream_id, "foooooo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(stream_id, "foooooo", 7, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(2), last_packet);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(QuicPacketNumber(4), writer_->last_packet_header().packet_number);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, SendCoalescedPackets) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _)).Times(3);
EXPECT_CALL(debug_visitor, OnCoalescedPacketSent(_, _)).Times(1);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_EQ(0u, writer_->packets_write_attempts());
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString("bar", 3);
EXPECT_EQ(0u, writer_->packets_write_attempts());
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x03));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
SendStreamDataToPeer(2, "baz", 3, NO_FIN, nullptr);
}
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
EXPECT_EQ(connection_.max_packet_length(), writer_->last_packet_size());
EXPECT_EQ(1u, writer_->crypto_frames().size());
EXPECT_EQ(0u, writer_->stream_frames().size());
EXPECT_NE(nullptr, writer_->coalesced_packet());
}
TEST_P(QuicConnectionTest, FailToCoalescePacket) {
if (!IsDefaultTestConfiguration() ||
!connection_.version().CanSendCoalescedPackets() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
set_perspective(Perspective::IS_SERVER);
auto test_body = [&] {
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_INITIAL);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_EQ(0u, writer_->packets_write_attempts());
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString("bar", 3);
EXPECT_EQ(0u, writer_->packets_write_attempts());
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x03));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
SendStreamDataToPeer(2, "baz", 3, NO_FIN, nullptr);
creator_->Flush();
auto& coalesced_packet =
QuicConnectionPeer::GetCoalescedPacket(&connection_);
QuicPacketLength coalesced_packet_max_length =
coalesced_packet.max_packet_length();
QuicCoalescedPacketPeer::SetMaxPacketLength(coalesced_packet,
coalesced_packet.length());
*QuicCoalescedPacketPeer::GetMutableEncryptedBuffer(
coalesced_packet, ENCRYPTION_FORWARD_SECURE) += "!!! TEST !!!";
QUIC_LOG(INFO) << "Reduced coalesced_packet_max_length from "
<< coalesced_packet_max_length << " to "
<< coalesced_packet.max_packet_length()
<< ", coalesced_packet.length:"
<< coalesced_packet.length()
<< ", coalesced_packet.packet_lengths:"
<< absl::StrJoin(coalesced_packet.packet_lengths(), ":");
}
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(QUIC_FAILED_TO_SERIALIZE_PACKET));
EXPECT_EQ(saved_connection_close_frame_.error_details,
"Failed to serialize coalesced packet.");
};
EXPECT_QUIC_BUG(test_body(), "SerializeCoalescedPacket failed.");
}
TEST_P(QuicConnectionTest, ClientReceivedHandshakeDone) {
if (!connection_.version().UsesTls()) {
return;
}
EXPECT_CALL(visitor_, OnHandshakeDoneReceived());
QuicFrames frames;
frames.push_back(QuicFrame(QuicHandshakeDoneFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
ProcessFramesPacketAtLevel(1, frames, ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, ServerReceivedHandshakeDone) {
if (!connection_.version().UsesTls()) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnHandshakeDoneReceived()).Times(0);
if (version().handshake_protocol == PROTOCOL_TLS1_3) {
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
}
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
QuicFrames frames;
frames.push_back(QuicFrame(QuicHandshakeDoneFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
ProcessFramesPacketAtLevel(1, frames, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(1, connection_close_frame_count_);
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(IETF_QUIC_PROTOCOL_VIOLATION));
}
TEST_P(QuicConnectionTest, MultiplePacketNumberSpacePto) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(0x01010101u, writer_->final_bytes_of_last_packet());
connection_.SendApplicationDataAtLevel(ENCRYPTION_FORWARD_SECURE, 5, "data",
0, NO_FIN);
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
QuicTime retransmission_time =
connection_.GetRetransmissionAlarm()->deadline();
EXPECT_NE(QuicTime::Zero(), retransmission_time);
clock_.AdvanceTime(retransmission_time - clock_.Now());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(4), _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
connection_.SendApplicationDataAtLevel(ENCRYPTION_FORWARD_SECURE, 5, "data",
4, NO_FIN);
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
retransmission_time = connection_.GetRetransmissionAlarm()->deadline();
EXPECT_NE(QuicTime::Zero(), retransmission_time);
clock_.AdvanceTime(retransmission_time - clock_.Now());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(9), _, _));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(8), _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
connection_.OnHandshakeComplete();
retransmission_time = connection_.GetRetransmissionAlarm()->deadline();
EXPECT_NE(QuicTime::Zero(), retransmission_time);
clock_.AdvanceTime(retransmission_time - clock_.Now());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(11), _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
}
void QuicConnectionTest::TestClientRetryHandling(
bool invalid_retry_tag, bool missing_original_id_in_config,
bool wrong_original_id_in_config, bool missing_retry_id_in_config,
bool wrong_retry_id_in_config) {
if (invalid_retry_tag) {
ASSERT_FALSE(missing_original_id_in_config);
ASSERT_FALSE(wrong_original_id_in_config);
ASSERT_FALSE(missing_retry_id_in_config);
ASSERT_FALSE(wrong_retry_id_in_config);
} else {
ASSERT_FALSE(missing_original_id_in_config && wrong_original_id_in_config);
ASSERT_FALSE(missing_retry_id_in_config && wrong_retry_id_in_config);
}
if (!version().UsesTls()) {
return;
}
uint8_t retry_packet_rfcv2[] = {
0xcf, 0x6b, 0x33, 0x43, 0xcf, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a,
0x42, 0x62, 0xb5, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0xc8, 0x64, 0x6c, 0xe8,
0xbf, 0xe3, 0x39, 0x52, 0xd9, 0x55, 0x54, 0x36, 0x65, 0xdc, 0xc7, 0xb6};
uint8_t retry_packet_rfcv1[] = {
0xff, 0x00, 0x00, 0x00, 0x01, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a,
0x42, 0x62, 0xb5, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x04, 0xa2, 0x65, 0xba,
0x2e, 0xff, 0x4d, 0x82, 0x90, 0x58, 0xfb, 0x3f, 0x0f, 0x24, 0x96, 0xba};
uint8_t retry_packet29[] = {
0xff, 0xff, 0x00, 0x00, 0x1d, 0x00, 0x08, 0xf0, 0x67, 0xa5, 0x50, 0x2a,
0x42, 0x62, 0xb5, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0xd1, 0x69, 0x26, 0xd8,
0x1f, 0x6f, 0x9c, 0xa2, 0x95, 0x3a, 0x8a, 0xa4, 0x57, 0x5e, 0x1e, 0x49};
uint8_t* retry_packet;
size_t retry_packet_length;
if (version() == ParsedQuicVersion::RFCv2()) {
retry_packet = retry_packet_rfcv2;
retry_packet_length = ABSL_ARRAYSIZE(retry_packet_rfcv2);
} else if (version() == ParsedQuicVersion::RFCv1()) {
retry_packet = retry_packet_rfcv1;
retry_packet_length = ABSL_ARRAYSIZE(retry_packet_rfcv1);
} else if (version() == ParsedQuicVersion::Draft29()) {
retry_packet = retry_packet29;
retry_packet_length = ABSL_ARRAYSIZE(retry_packet29);
} else {
return;
}
uint8_t original_connection_id_bytes[] = {0x83, 0x94, 0xc8, 0xf0,
0x3e, 0x51, 0x57, 0x08};
uint8_t new_connection_id_bytes[] = {0xf0, 0x67, 0xa5, 0x50,
0x2a, 0x42, 0x62, 0xb5};
uint8_t retry_token_bytes[] = {0x74, 0x6f, 0x6b, 0x65, 0x6e};
QuicConnectionId original_connection_id(
reinterpret_cast<char*>(original_connection_id_bytes),
ABSL_ARRAYSIZE(original_connection_id_bytes));
QuicConnectionId new_connection_id(
reinterpret_cast<char*>(new_connection_id_bytes),
ABSL_ARRAYSIZE(new_connection_id_bytes));
std::string retry_token(reinterpret_cast<char*>(retry_token_bytes),
ABSL_ARRAYSIZE(retry_token_bytes));
if (invalid_retry_tag) {
retry_packet[retry_packet_length - 1] ^= 1;
}
QuicConnectionId config_original_connection_id = original_connection_id;
if (wrong_original_id_in_config) {
ASSERT_FALSE(config_original_connection_id.IsEmpty());
config_original_connection_id.mutable_data()[0] ^= 0x80;
}
QuicConnectionId config_retry_source_connection_id = new_connection_id;
if (wrong_retry_id_in_config) {
ASSERT_FALSE(config_retry_source_connection_id.IsEmpty());
config_retry_source_connection_id.mutable_data()[0] ^= 0x80;
}
QuicConnectionPeer::SetServerConnectionId(&connection_,
original_connection_id);
writer_->framer()->framer()->SetInitialObfuscators(new_connection_id);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(reinterpret_cast<char*>(retry_packet),
retry_packet_length, clock_.Now()));
if (invalid_retry_tag) {
EXPECT_FALSE(connection_.GetStats().retry_packet_processed);
EXPECT_EQ(connection_.connection_id(), original_connection_id);
EXPECT_TRUE(QuicPacketCreatorPeer::GetRetryToken(
QuicConnectionPeer::GetPacketCreator(&connection_))
.empty());
return;
}
EXPECT_TRUE(connection_.GetStats().retry_packet_processed);
EXPECT_EQ(connection_.connection_id(), new_connection_id);
EXPECT_EQ(QuicPacketCreatorPeer::GetRetryToken(
QuicConnectionPeer::GetPacketCreator(&connection_)),
retry_token);
QuicConfig received_config;
QuicConfigPeer::SetNegotiated(&received_config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&received_config, connection_.connection_id());
if (!missing_retry_id_in_config) {
QuicConfigPeer::SetReceivedRetrySourceConnectionId(
&received_config, config_retry_source_connection_id);
}
}
if (!missing_original_id_in_config) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&received_config, config_original_connection_id);
}
if (missing_original_id_in_config || wrong_original_id_in_config ||
missing_retry_id_in_config || wrong_retry_id_in_config) {
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
} else {
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(0);
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(AnyNumber());
connection_.SetFromConfig(received_config);
if (missing_original_id_in_config || wrong_original_id_in_config ||
missing_retry_id_in_config || wrong_retry_id_in_config) {
ASSERT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(IETF_QUIC_PROTOCOL_VIOLATION);
} else {
EXPECT_TRUE(connection_.connected());
}
}
TEST_P(QuicConnectionTest, FixTimeoutsClient) {
if (!connection_.version().UsesTls()) {
return;
}
set_perspective(Perspective::IS_CLIENT);
if (GetQuicReloadableFlag(quic_fix_timeouts)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_START));
}
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kFTOE);
config.SetConnectionOptionsToSend(connection_options);
QuicConfigPeer::SetNegotiated(&config, true);
QuicConfigPeer::SetReceivedOriginalConnectionId(&config,
connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(1);
connection_.SetFromConfig(config);
QuicIdleNetworkDetector& idle_network_detector =
QuicConnectionPeer::GetIdleNetworkDetector(&connection_);
if (GetQuicReloadableFlag(quic_fix_timeouts)) {
EXPECT_NE(idle_network_detector.handshake_timeout(),
QuicTime::Delta::Infinite());
} else {
EXPECT_EQ(idle_network_detector.handshake_timeout(),
QuicTime::Delta::Infinite());
}
}
TEST_P(QuicConnectionTest, FixTimeoutsServer) {
if (!connection_.version().UsesTls()) {
return;
}
set_perspective(Perspective::IS_SERVER);
if (GetQuicReloadableFlag(quic_fix_timeouts)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_START));
}
QuicConfig config;
quic::QuicTagVector initial_received_options;
initial_received_options.push_back(quic::kFTOE);
ASSERT_TRUE(
config.SetInitialReceivedConnectionOptions(initial_received_options));
QuicConfigPeer::SetNegotiated(&config, true);
QuicConfigPeer::SetReceivedOriginalConnectionId(&config,
connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(&config,
QuicConnectionId());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(1);
connection_.SetFromConfig(config);
QuicIdleNetworkDetector& idle_network_detector =
QuicConnectionPeer::GetIdleNetworkDetector(&connection_);
if (GetQuicReloadableFlag(quic_fix_timeouts)) {
EXPECT_NE(idle_network_detector.handshake_timeout(),
QuicTime::Delta::Infinite());
} else {
EXPECT_EQ(idle_network_detector.handshake_timeout(),
QuicTime::Delta::Infinite());
}
}
TEST_P(QuicConnectionTest, ClientParsesRetry) {
TestClientRetryHandling(false,
false,
false,
false,
false);
}
TEST_P(QuicConnectionTest, ClientParsesRetryInvalidTag) {
TestClientRetryHandling(true,
false,
false,
false,
false);
}
TEST_P(QuicConnectionTest, ClientParsesRetryMissingOriginalId) {
TestClientRetryHandling(false,
true,
false,
false,
false);
}
TEST_P(QuicConnectionTest, ClientParsesRetryWrongOriginalId) {
TestClientRetryHandling(false,
false,
true,
false,
false);
}
TEST_P(QuicConnectionTest, ClientParsesRetryMissingRetryId) {
if (!connection_.version().UsesTls()) {
return;
}
TestClientRetryHandling(false,
false,
false,
true,
false);
}
TEST_P(QuicConnectionTest, ClientParsesRetryWrongRetryId) {
if (!connection_.version().UsesTls()) {
return;
}
TestClientRetryHandling(false,
false,
false,
false,
true);
}
TEST_P(QuicConnectionTest, ClientRetransmitsInitialPacketsOnRetry) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
EXPECT_EQ(1u, writer_->packets_write_attempts());
TestClientRetryHandling(false,
false,
false,
false,
false);
if (GetParam().ack_response == AckResponse::kImmediate) {
EXPECT_EQ(2u, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->framer()->crypto_frames().size());
}
}
TEST_P(QuicConnectionTest, NoInitialPacketsRetransmissionOnInvalidRetry) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
EXPECT_EQ(1u, writer_->packets_write_attempts());
TestClientRetryHandling(true,
false,
false,
false,
false);
EXPECT_EQ(1u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, ClientReceivesOriginalConnectionIdWithoutRetry) {
if (!connection_.version().UsesTls()) {
return;
}
if (connection_.version().UsesTls()) {
return;
}
QuicConfig received_config;
QuicConfigPeer::SetNegotiated(&received_config, true);
QuicConfigPeer::SetReceivedOriginalConnectionId(&received_config,
TestConnectionId(0x12345));
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
connection_.SetFromConfig(received_config);
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(IETF_QUIC_PROTOCOL_VIOLATION);
}
TEST_P(QuicConnectionTest, ClientReceivesRetrySourceConnectionIdWithoutRetry) {
if (!connection_.version().UsesTls()) {
return;
}
QuicConfig received_config;
QuicConfigPeer::SetNegotiated(&received_config, true);
QuicConfigPeer::SetReceivedRetrySourceConnectionId(&received_config,
TestConnectionId(0x12345));
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(1);
connection_.SetFromConfig(received_config);
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(IETF_QUIC_PROTOCOL_VIOLATION);
}
TEST_P(QuicConnectionTest, MaxStreamsFrameCausesConnectionClose) {
if (!VersionHasIetfQuicFrames(connection_.transport_version())) {
return;
}
EXPECT_CALL(visitor_, OnMaxStreamsFrame(_))
.WillOnce(InvokeWithoutArgs([this]() {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(
QUIC_TOO_MANY_BUFFERED_CONTROL_FRAMES, "error",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return true;
}));
QuicFrames frames;
frames.push_back(QuicFrame(QuicMaxStreamsFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
ProcessFramesPacketAtLevel(1, frames, ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, StreamsBlockedFrameCausesConnectionClose) {
if (!VersionHasIetfQuicFrames(connection_.transport_version())) {
return;
}
EXPECT_CALL(visitor_, OnStreamsBlockedFrame(_))
.WillOnce(InvokeWithoutArgs([this]() {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(
QUIC_TOO_MANY_BUFFERED_CONTROL_FRAMES, "error",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return true;
}));
QuicFrames frames;
frames.push_back(
QuicFrame(QuicStreamsBlockedFrame(kInvalidControlFrameId, 10, false)));
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
ProcessFramesPacketAtLevel(1, frames, ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest,
BundleAckWithConnectionCloseMultiplePacketNumberSpace) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
ProcessDataPacketAtLevel(2000, false, ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
const QuicErrorCode kQuicErrorCode = QUIC_INTERNAL_ERROR;
connection_.CloseConnection(
kQuicErrorCode, "Some random error message",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_EQ(2u, QuicConnectionPeer::GetNumEncryptionLevels(&connection_));
TestConnectionCloseQuicErrorCode(kQuicErrorCode);
EXPECT_EQ(1u, writer_->connection_close_frames().size());
EXPECT_EQ(1u, writer_->ack_frames().size());
if (!connection_.version().CanSendCoalescedPackets()) {
EXPECT_EQ(QuicConnectionPeer::GetNumEncryptionLevels(&connection_),
writer_->connection_close_packets());
EXPECT_EQ(QuicConnectionPeer::GetNumEncryptionLevels(&connection_),
writer_->packets_write_attempts());
return;
}
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->connection_close_packets());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
auto packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(1u, writer_->connection_close_packets());
EXPECT_EQ(1u, writer_->connection_close_frames().size());
EXPECT_EQ(1u, writer_->ack_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() == nullptr);
}
TEST_P(QuicConnectionTest, SendPingWhenSkipPacketNumberForPto) {
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kPTOS);
connection_options.push_back(k1PTO);
config.SetConnectionOptionsToSend(connection_options);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedMaxDatagramFrameSize(
&config, kMaxAcceptedDatagramFrameSize);
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
connection_.OnHandshakeComplete();
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(MESSAGE_STATUS_SUCCESS, SendMessage("message"));
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, QuicPacketNumber(3), _, _));
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(1u, connection_.GetStats().pto_count);
EXPECT_EQ(0u, connection_.GetStats().crypto_retransmit_count);
EXPECT_EQ(1u, writer_->ping_frames().size());
}
TEST_P(QuicConnectionTest, DonotChangeQueuedAcks) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
ProcessPacket(2);
ProcessPacket(3);
ProcessPacket(4);
QuicFrames frames;
frames.push_back(QuicFrame(QuicStreamFrame(
QuicUtils::GetFirstBidirectionalStreamId(
connection_.version().transport_version, Perspective::IS_CLIENT),
false, 0u, absl::string_view())));
QuicAckFrame ack_frame = InitAckFrame(1);
frames.push_back(QuicFrame(&ack_frame));
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillOnce(Invoke([this]() {
connection_.SendControlFrame(QuicFrame(QuicWindowUpdateFrame(1, 0, 0)));
EXPECT_TRUE(QuicPacketCreatorPeer::QueuedFrames(
QuicConnectionPeer::GetPacketCreator(&connection_))[0]
.ack_frame->packets.Contains(QuicPacketNumber(2)));
}));
ProcessFramesPacketAtLevel(9, frames, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(writer_->ack_frames()[0].packets.Contains(QuicPacketNumber(2)));
}
TEST_P(QuicConnectionTest, DoNotExtendIdleTimeOnUndecryptablePackets) {
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
QuicTime initial_deadline =
clock_.ApproximateNow() +
QuicTime::Delta::FromSeconds(kInitialIdleTimeoutSecs - 1);
EXPECT_EQ(initial_deadline, connection_.GetTimeoutAlarm()->deadline());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<quic::NullEncrypter>(Perspective::IS_CLIENT));
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(initial_deadline, connection_.GetTimeoutAlarm()->deadline());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(1);
QuicTime::Delta delay = initial_deadline - clock_.ApproximateNow();
clock_.AdvanceTime(delay);
connection_.GetTimeoutAlarm()->Fire();
EXPECT_FALSE(connection_.connected());
}
TEST_P(QuicConnectionTest, BundleAckWithImmediateResponse) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillOnce(Invoke([this]() {
notifier_.WriteOrBufferWindowUpate(0, 0);
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
ProcessDataPacket(1);
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, AckAlarmFiresEarly) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
ProcessDataPacketAtLevel(1000, false, ENCRYPTION_ZERO_RTT);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(clock_.ApproximateNow() + kAlarmGranularity,
connection_.GetAckAlarm()->deadline());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetAckAlarm()->Fire();
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(clock_.ApproximateNow() + DefaultDelayedAckTime(),
connection_.GetAckAlarm()->deadline());
}
TEST_P(QuicConnectionTest, ClientOnlyBlackholeDetectionClient) {
if (!GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2)) {
return;
}
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kCBHD);
config.SetConnectionOptionsToSend(connection_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
EXPECT_FALSE(connection_.GetBlackholeDetectorAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
EXPECT_TRUE(connection_.GetBlackholeDetectorAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, ClientOnlyBlackholeDetectionServer) {
if (!GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2)) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(kCBHD);
config.SetInitialReceivedConnectionOptions(connection_options);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
EXPECT_FALSE(connection_.GetBlackholeDetectorAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
EXPECT_FALSE(connection_.GetBlackholeDetectorAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, MadeForwardProgressOnDiscardingKeys) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
QuicConfig config;
QuicTagVector connection_options;
connection_options.push_back(k5RTO);
config.SetConnectionOptionsToSend(connection_options);
QuicConfigPeer::SetNegotiated(&config, true);
if (GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2) ||
GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
}
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
if (GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed)) {
EXPECT_FALSE(connection_.BlackholeDetectionInProgress());
} else {
EXPECT_TRUE(connection_.BlackholeDetectionInProgress());
}
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
if (GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2) ||
GetQuicReloadableFlag(
quic_no_path_degrading_before_handshake_confirmed)) {
EXPECT_FALSE(connection_.BlackholeDetectionInProgress());
} else {
EXPECT_TRUE(connection_.BlackholeDetectionInProgress());
}
}
TEST_P(QuicConnectionTest, ProcessUndecryptablePacketsBasedOnEncryptionLevel) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(AnyNumber());
QuicConfig config;
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
peer_framer_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
for (uint64_t i = 1; i <= 3; ++i) {
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_HANDSHAKE);
}
ProcessDataPacketAtLevel(4, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
for (uint64_t j = 5; j <= 7; ++j) {
ProcessDataPacketAtLevel(j, !kHasStopWaiting, ENCRYPTION_HANDSHAKE);
}
EXPECT_EQ(7u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
EXPECT_FALSE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
EXPECT_TRUE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
if (!VersionHasIetfQuicFrames(version().transport_version)) {
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(6);
}
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
EXPECT_EQ(1u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
EXPECT_TRUE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
EXPECT_EQ(0u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
}
TEST_P(QuicConnectionTest, ServerBundlesInitialDataWithInitialAck) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
QuicTime expected_pto_time =
connection_.sent_packet_manager().GetRetransmissionTime();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(expected_pto_time,
connection_.sent_packet_manager().GetRetransmissionTime());
ProcessCryptoPacketAtLevel(1001, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
ProcessCryptoPacketAtLevel(1002, ENCRYPTION_INITIAL);
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(writer_->crypto_frames().empty());
EXPECT_NE(expected_pto_time,
connection_.sent_packet_manager().GetRetransmissionTime());
}
TEST_P(QuicConnectionTest, ClientBundlesHandshakeDataWithHandshakeAck) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_EQ(Perspective::IS_CLIENT, connection_.perspective());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
peer_framer_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_HANDSHAKE);
EXPECT_TRUE(connection_.HasPendingAcks());
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
ProcessCryptoPacketAtLevel(1001, ENCRYPTION_HANDSHAKE);
EXPECT_TRUE(connection_.HasPendingAcks());
ProcessCryptoPacketAtLevel(1002, ENCRYPTION_HANDSHAKE);
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(writer_->crypto_frames().empty());
}
TEST_P(QuicConnectionTest, CoalescePacketOfLowerEncryptionLevel) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
SendStreamDataToPeer(2, std::string(1286, 'a'), 0, NO_FIN, nullptr);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString("a", 0, ENCRYPTION_HANDSHAKE);
}
}
TEST_P(QuicConnectionTest, ServerRetransmitsHandshakeDataEarly) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
QuicTime expected_pto_time =
connection_.sent_packet_manager().GetRetransmissionTime();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString("bar", 3, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(expected_pto_time,
connection_.sent_packet_manager().GetRetransmissionTime());
QuicFrames frames;
auto ack_frame = InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
frames.push_back(QuicFrame(&ack_frame));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramesPacketAtLevel(30, frames, ENCRYPTION_HANDSHAKE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
frames.clear();
frames.push_back(QuicFrame(QuicPingFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(3)));
ProcessFramesPacketAtLevel(31, frames, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(clock_.Now() + kAlarmGranularity,
connection_.GetAckAlarm()->deadline());
clock_.AdvanceTime(kAlarmGranularity);
connection_.GetAckAlarm()->Fire();
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(writer_->crypto_frames().empty());
}
TEST_P(QuicConnectionTest, InflatedRttSample) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(30);
set_perspective(Perspective::IS_SERVER);
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
std::string initial_crypto_data(512, 'a');
connection_.SendCryptoDataWithString(initial_crypto_data, 0,
ENCRYPTION_INITIAL);
ASSERT_TRUE(connection_.sent_packet_manager()
.GetRetransmissionTime()
.IsInitialized());
QuicTime::Delta pto_timeout =
connection_.sent_packet_manager().GetRetransmissionTime() - clock_.Now();
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
std::string handshake_crypto_data(1024, 'a');
connection_.SendCryptoDataWithString(handshake_crypto_data, 0,
ENCRYPTION_HANDSHAKE);
clock_.AdvanceTime(pto_timeout);
connection_.GetRetransmissionAlarm()->Fire();
clock_.AdvanceTime(kTestRTT);
QuicFrames frames;
auto ack_frame = InitAckFrame({{QuicPacketNumber(4), QuicPacketNumber(5)}});
frames.push_back(QuicFrame(&ack_frame));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
ProcessFramesPacketAtLevel(1001, frames, ENCRYPTION_INITIAL);
EXPECT_EQ(kTestRTT, rtt_stats->latest_rtt());
frames.clear();
QuicAckFrame ack_frame2 =
InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)},
{QuicPacketNumber(5), QuicPacketNumber(6)}});
ack_frame2.ack_delay_time = QuicTime::Delta::Zero();
frames.push_back(QuicFrame(&ack_frame2));
ProcessFramesPacketAtLevel(1, frames, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(rtt_stats->latest_rtt(), kTestRTT);
}
TEST_P(QuicConnectionTest, CoalescingPacketCausesInfiniteLoop) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
SetQuicFlag(quic_anti_amplification_factor, 2);
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
std::string initial_crypto_data(512, 'a');
connection_.SendCryptoDataWithString(initial_crypto_data, 0,
ENCRYPTION_INITIAL);
ASSERT_TRUE(connection_.sent_packet_manager()
.GetRetransmissionTime()
.IsInitialized());
QuicTime::Delta pto_timeout =
connection_.sent_packet_manager().GetRetransmissionTime() - clock_.Now();
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
std::string handshake_crypto_data(1024, 'a');
connection_.SendCryptoDataWithString(handshake_crypto_data, 0,
ENCRYPTION_HANDSHAKE);
clock_.AdvanceTime(pto_timeout);
connection_.GetRetransmissionAlarm()->Fire();
}
TEST_P(QuicConnectionTest, ClientAckDelayForAsyncPacketProcessing) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
EXPECT_CALL(visitor_, OnHandshakePacketSent()).WillOnce(Invoke([this]() {
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
}));
QuicConfig config;
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
peer_framer_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
EXPECT_EQ(0u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
ProcessDataPacketAtLevel(2, !kHasStopWaiting, ENCRYPTION_HANDSHAKE);
ASSERT_EQ(1u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
ProcessDataPacketAtLevel(4, !kHasStopWaiting, ENCRYPTION_INITIAL);
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
EXPECT_TRUE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
ASSERT_FALSE(connection_.HasPendingAcks());
ASSERT_FALSE(writer_->ack_frames().empty());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100),
writer_->ack_frames()[0].ack_delay_time);
ASSERT_TRUE(writer_->coalesced_packet() == nullptr);
}
TEST_P(QuicConnectionTest, TestingLiveness) {
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
CryptoHandshakeMessage msg;
std::string error_details;
QuicConfig client_config;
client_config.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
client_config.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
client_config.SetIdleNetworkTimeout(QuicTime::Delta::FromSeconds(30));
client_config.ToHandshakeMessage(&msg, connection_.transport_version());
const QuicErrorCode error =
config.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
connection_.SetFromConfig(config);
connection_.OnHandshakeComplete();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
ASSERT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.MaybeTestLiveness());
QuicTime deadline = QuicConnectionPeer::GetIdleNetworkDeadline(&connection_);
QuicTime::Delta timeout = deadline - clock_.ApproximateNow();
clock_.AdvanceTime(timeout - QuicTime::Delta::FromMilliseconds(1));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_TRUE(connection_.MaybeTestLiveness());
EXPECT_EQ(deadline, QuicConnectionPeer::GetIdleNetworkDeadline(&connection_));
}
TEST_P(QuicConnectionTest, DisableLivenessTesting) {
const size_t kMinRttMs = 40;
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(kMinRttMs),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
CryptoHandshakeMessage msg;
std::string error_details;
QuicConfig client_config;
client_config.SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
client_config.SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
client_config.SetIdleNetworkTimeout(QuicTime::Delta::FromSeconds(30));
client_config.ToHandshakeMessage(&msg, connection_.transport_version());
const QuicErrorCode error =
config.ProcessPeerHello(msg, CLIENT, &error_details);
EXPECT_THAT(error, IsQuicNoError());
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
connection_.SetFromConfig(config);
connection_.OnHandshakeComplete();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.DisableLivenessTesting();
ASSERT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_FALSE(connection_.MaybeTestLiveness());
QuicTime deadline = QuicConnectionPeer::GetIdleNetworkDeadline(&connection_);
QuicTime::Delta timeout = deadline - clock_.ApproximateNow();
clock_.AdvanceTime(timeout - QuicTime::Delta::FromMilliseconds(1));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
EXPECT_FALSE(connection_.MaybeTestLiveness());
}
TEST_P(QuicConnectionTest, SilentIdleTimeout) {
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
QuicConfig config;
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(&config,
QuicConnectionId());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_TRUE(connection_.connected());
EXPECT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
if (version().handshake_protocol == PROTOCOL_TLS1_3) {
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
}
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.GetTimeoutAlarm()->Fire();
EXPECT_NE(nullptr,
QuicConnectionPeer::GetConnectionClosePacket(&connection_));
}
TEST_P(QuicConnectionTest, DoNotSendPing) {
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
EXPECT_FALSE(connection_.GetPingAlarm()->IsSet());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(0, connection_.transport_version()),
"GET /", 0, FIN, nullptr);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(15),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
QuicFrames frames;
QuicAckFrame ack_frame = InitAckFrame(1);
frames.push_back(QuicFrame(&ack_frame));
frames.push_back(QuicFrame(QuicStreamFrame(
GetNthClientInitiatedStreamId(0, connection_.transport_version()), true,
0u, absl::string_view())));
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessFramesPacketAtLevel(1, frames, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.GetPingAlarm()->IsSet());
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(
QuicTime::Delta::FromSeconds(15) - QuicTime::Delta::FromMilliseconds(5),
connection_.GetPingAlarm()->deadline() - clock_.ApproximateNow());
clock_.AdvanceTime(QuicTime::Delta::FromSeconds(15));
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(false));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.GetPingAlarm()->Fire();
}
TEST_P(QuicConnectionTest, DuplicateAckCausesLostPackets) {
if (!GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2)) {
return;
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
notifier_.NeuterUnencryptedData();
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
std::string data(1200, 'a');
for (size_t i = 0; i < 5; ++i) {
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), data,
i * 1200, i == 4 ? FIN : NO_FIN, nullptr);
}
ASSERT_TRUE(connection_.BlackholeDetectionInProgress());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(3);
QuicAckFrame frame =
InitAckFrame({{QuicPacketNumber(5), QuicPacketNumber(6)}});
LostPacketVector lost_packets;
lost_packets.push_back(
LostPacket(QuicPacketNumber(1), kMaxOutgoingPacketSize));
lost_packets.push_back(
LostPacket(QuicPacketNumber(2), kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.Times(AnyNumber())
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())))
.WillRepeatedly(DoDefault());
;
ProcessAckPacket(1, &frame);
EXPECT_TRUE(connection_.BlackholeDetectionInProgress());
QuicAlarm* retransmission_alarm = connection_.GetRetransmissionAlarm();
EXPECT_TRUE(retransmission_alarm->IsSet());
QuicAckFrame frame2 =
InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(6)},
{QuicPacketNumber(7), QuicPacketNumber(8)}});
ProcessAckPacket(2, &frame2);
EXPECT_TRUE(connection_.BlackholeDetectionInProgress());
QuicAckFrame frame3 =
InitAckFrame({{QuicPacketNumber(7), QuicPacketNumber(8)}});
lost_packets.clear();
lost_packets.push_back(
LostPacket(QuicPacketNumber(6), kMaxOutgoingPacketSize));
EXPECT_CALL(*loss_algorithm_, DetectLosses(_, _, _, _, _, _))
.Times(AnyNumber())
.WillOnce(DoAll(SetArgPointee<5>(lost_packets),
Return(LossDetectionInterface::DetectionStats())));
ProcessAckPacket(3, &frame3);
EXPECT_FALSE(connection_.BlackholeDetectionInProgress());
}
TEST_P(QuicConnectionTest, ShorterIdleTimeoutOnSentPackets) {
EXPECT_TRUE(connection_.connected());
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
QuicTime::Delta::Zero(), QuicTime::Zero());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kFIDT});
QuicConfigPeer::SetNegotiated(&config, true);
if (GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2)) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
}
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
connection_.SetFromConfig(config);
ASSERT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
QuicTime::Delta timeout =
connection_.GetTimeoutAlarm()->deadline() - clock_.Now();
clock_.AdvanceTime(timeout - QuicTime::Delta::FromSeconds(1));
SendStreamDataToPeer(
GetNthClientInitiatedStreamId(1, connection_.transport_version()), "foo",
0, FIN, nullptr);
ASSERT_TRUE(connection_.GetTimeoutAlarm()->IsSet());
EXPECT_EQ(QuicTime::Delta::FromSeconds(1),
connection_.GetTimeoutAlarm()->deadline() - clock_.Now());
clock_.AdvanceTime(timeout - QuicTime::Delta::FromMilliseconds(100));
QuicAckFrame ack = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
ProcessAckPacket(1, &ack);
EXPECT_EQ(clock_.Now() + timeout, connection_.GetTimeoutAlarm()->deadline());
}
TEST_P(QuicConnectionTest,
ReserializeInitialPacketInCoalescerAfterDiscardingInitialKey) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).WillOnce(Invoke([this]() {
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
}));
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(0u, writer_->packets_write_attempts());
connection_.GetAckAlarm()->Fire();
}
EXPECT_FALSE(connection_.packet_creator().HasPendingFrames());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1000, false, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, PathValidationOnNewSocketSuccess) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}))
.WillRepeatedly(DoDefault());
;
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(0u, writer_->packets_write_attempts());
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(
99, new_writer.path_challenge_frames().front().data_buffer)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(success);
}
TEST_P(QuicConnectionTest, PathValidationOnNewSocketWriteBlocked) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
new_writer.SetWriteBlocked();
bool success = false;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(0u, new_writer.packets_write_attempts());
EXPECT_TRUE(connection_.HasPendingPathValidation());
new_writer.SetWritable();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<test::MockRandom*>(helper_->GetRandomGenerator())->ChangeValue();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
EXPECT_EQ(1u, new_writer.packets_write_attempts());
QuicFrames frames;
QuicPathFrameBuffer path_frame_buffer{0, 1, 2, 3, 4, 5, 6, 7};
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer)));
new_writer.SetWriteBlocked();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillRepeatedly(Invoke([&] {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_TRUE(new_writer.path_response_frames().empty());
EXPECT_EQ(1u, writer_->packets_write_attempts());
}));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress,
connection_.peer_address(),
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(1u, new_writer.packets_write_attempts());
}
TEST_P(QuicConnectionTest, NewPathValidationCancelsPreviousOne) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}));
bool success = true;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(0u, writer_->packets_write_attempts());
const QuicSocketAddress kNewSelfAddress2(QuicIpAddress::Any4(), 12346);
EXPECT_NE(kNewSelfAddress2, connection_.self_address());
TestPacketWriter new_writer2(version(), &clock_, Perspective::IS_CLIENT);
bool success2 = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress2, connection_.peer_address(), &new_writer2),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress2, connection_.peer_address(),
&success2),
PathValidationReason::kReasonUnknown);
EXPECT_FALSE(success);
EXPECT_FALSE(connection_.HasPendingPathValidation());
}
TEST_P(QuicConnectionTest, PathValidationRetry) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(2u)
.WillRepeatedly(Invoke([&]() {
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
}));
bool success = true;
connection_.ValidatePath(std::make_unique<TestQuicPathValidationContext>(
connection_.self_address(),
connection_.peer_address(), writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, connection_.self_address(),
connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_TRUE(connection_.HasPendingPathValidation());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<test::MockRandom*>(helper_->GetRandomGenerator())->ChangeValue();
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
EXPECT_EQ(2u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, PathValidationReceivesStatelessReset) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
QuicConfig config;
QuicConfigPeer::SetReceivedStatelessResetToken(&config,
kTestStatelessResetToken);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}))
.WillRepeatedly(DoDefault());
;
bool success = true;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(0u, writer_->packets_write_attempts());
EXPECT_TRUE(connection_.HasPendingPathValidation());
std::unique_ptr<QuicEncryptedPacket> packet(
QuicFramer::BuildIetfStatelessResetPacket(connection_id_,
100,
kTestStatelessResetToken));
std::unique_ptr<QuicReceivedPacket> received(
ConstructReceivedPacket(*packet, QuicTime::Zero()));
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(0);
connection_.ProcessUdpPacket(kNewSelfAddress, kPeerAddress, *received);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_FALSE(success);
}
TEST_P(QuicConnectionTest, SendPathChallengeUsingBlockedNewSocket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
new_writer.BlockOnNextWrite();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}))
.WillRepeatedly(DoDefault());
;
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(0u, writer_->packets_write_attempts());
new_writer.SetWritable();
connection_.OnCanWrite();
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_EQ(1u, new_writer.packets_write_attempts());
}
TEST_P(QuicConnectionTest, SendPathChallengeUsingBlockedDefaultSocket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Any4(), 12345);
writer_->BlockOnNextWrite();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AtLeast(2));
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(0,
memcmp(&path_challenge_payload,
&writer_->path_response_frames().front().data_buffer,
sizeof(path_challenge_payload)));
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
}))
.WillRepeatedly(Invoke([&]() {
EXPECT_EQ(0u, writer_->path_challenge_frames().size());
}));
QuicFrames frames;
frames.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(1u, writer_->packets_write_attempts());
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<test::MockRandom*>(helper_->GetRandomGenerator())->ChangeValue();
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
EXPECT_EQ(1u, writer_->packets_write_attempts());
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_LE(2u, writer_->packets_write_attempts());
}
TEST_P(QuicConnectionTest, SendPathChallengeFailOnNewSocket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
new_writer.SetShouldWriteFail();
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0u);
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(1u, new_writer.packets_write_attempts());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
EXPECT_EQ(1u, new_writer.padding_frames().size());
EXPECT_EQ(kNewSelfAddress.host(), new_writer.last_write_source_address());
EXPECT_EQ(0u, writer_->packets_write_attempts());
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, SendPathChallengeFailOnDefaultPath) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
writer_->SetShouldWriteFail();
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(
Invoke([](QuicConnectionCloseFrame frame, ConnectionCloseSource) {
EXPECT_EQ(QUIC_PACKET_WRITE_ERROR, frame.quic_error_code);
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0u);
{
bool success = false;
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.ValidatePath(std::make_unique<TestQuicPathValidationContext>(
connection_.self_address(),
connection_.peer_address(), writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, connection_.self_address(),
connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
}
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(connection_.peer_address(), writer_->last_write_peer_address());
EXPECT_FALSE(connection_.connected());
EXPECT_FALSE(connection_.HasPendingPathValidation());
}
TEST_P(QuicConnectionTest, SendPathChallengeFailOnAlternativePeerAddress) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
writer_->SetShouldWriteFail();
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Any4(), 12345);
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(
Invoke([](QuicConnectionCloseFrame frame, ConnectionCloseSource) {
EXPECT_EQ(QUIC_PACKET_WRITE_ERROR, frame.quic_error_code);
}));
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
connection_.self_address(), kNewPeerAddress, writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, connection_.self_address(), kNewPeerAddress, &success),
PathValidationReason::kReasonUnknown);
EXPECT_EQ(1u, writer_->packets_write_attempts());
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_FALSE(connection_.connected());
}
TEST_P(QuicConnectionTest,
SendPathChallengeFailPacketTooBigOnAlternativePeerAddress) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
connection_.OnCanWrite();
uint32_t num_packets_write_attempts = writer_->packets_write_attempts();
writer_->SetShouldWriteFail();
writer_->SetWriteError(*writer_->MessageTooBigErrorCode());
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Any4(), 12345);
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.Times(0u);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0u);
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
connection_.self_address(), kNewPeerAddress, writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, connection_.self_address(), kNewPeerAddress, &success),
PathValidationReason::kReasonUnknown);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(connection_.connected());
EXPECT_EQ(++num_packets_write_attempts, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
}
TEST_P(QuicConnectionTest, ReceiveMultiplePathChallenge) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicPathFrameBuffer path_frame_buffer1{0, 1, 2, 3, 4, 5, 6, 7};
QuicPathFrameBuffer path_frame_buffer2{8, 9, 10, 11, 12, 13, 14, 15};
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer1)));
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer2)));
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback6(),
23456);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(2)
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(0,
memcmp(path_frame_buffer1.data(),
&(writer_->path_response_frames().front().data_buffer),
sizeof(path_frame_buffer1)));
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
}))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
}));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, ReceiveStreamFrameBeforePathChallenge) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
QuicPathFrameBuffer path_frame_buffer{0, 1, 2, 3, 4, 5, 6, 7};
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer)));
frames.push_back(QuicFrame(QuicPaddingFrame(-1)));
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
23456);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE));
EXPECT_CALL(*send_algorithm_, OnConnectionMigration()).Times(0u);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(Invoke([=, this](const QuicStreamFrame& frame) {
const std::string data{"response body"};
connection_.producer()->SaveStreamData(frame.stream_id, data);
return notifier_.WriteOrBufferData(frame.stream_id, data.length(),
NO_FIN);
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0u);
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(0, memcmp(path_frame_buffer.data(),
&(writer_->path_response_frames().front().data_buffer),
sizeof(path_frame_buffer)));
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_TRUE(connection_.HasPendingPathValidation());
}
TEST_P(QuicConnectionTest, ReceiveStreamFrameFollowingPathChallenge) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicFrames frames;
QuicPathFrameBuffer path_frame_buffer{0, 1, 2, 3, 4, 5, 6, 7};
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer)));
frames.push_back(QuicFrame(frame1_));
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
23456);
QuicByteCount received_packet_size;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([=, this, &received_packet_size]() {
EXPECT_EQ(0u, writer_->stream_frames().size());
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(0,
memcmp(path_frame_buffer.data(),
&(writer_->path_response_frames().front().data_buffer),
sizeof(path_frame_buffer)));
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
received_packet_size =
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_);
}));
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE));
EXPECT_CALL(*send_algorithm_, OnConnectionMigration()).Times(0u);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(Invoke([=, this](const QuicStreamFrame& frame) {
const std::string data{"response body"};
connection_.producer()->SaveStreamData(frame.stream_id, data);
return notifier_.WriteOrBufferData(frame.stream_id, data.length(),
NO_FIN);
}));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_EQ(0u,
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_));
EXPECT_EQ(
received_packet_size,
QuicConnectionPeer::BytesReceivedBeforeAddressValidation(&connection_));
}
TEST_P(QuicConnectionTest, PathChallengeWithDataInOutOfOrderPacket) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
QuicPathFrameBuffer path_frame_buffer{0, 1, 2, 3, 4, 5, 6, 7};
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer)));
frames.push_back(QuicFrame(frame2_));
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback6(),
23456);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0u);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.Times(2)
.WillRepeatedly(Invoke([=, this](const QuicStreamFrame& frame) {
const std::string data{"response body"};
connection_.producer()->SaveStreamData(frame.stream_id, data);
return notifier_.WriteOrBufferData(frame.stream_id, data.length(),
NO_FIN);
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
}))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(0,
memcmp(path_frame_buffer.data(),
&(writer_->path_response_frames().front().data_buffer),
sizeof(path_frame_buffer)));
EXPECT_EQ(1u, writer_->padding_frames().size());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
}))
.WillOnce(Invoke([=, this]() {
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
}));
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 1);
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, FailToWritePathResponseAtServer) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
QuicFrames frames;
QuicPathFrameBuffer path_frame_buffer{0, 1, 2, 3, 4, 5, 6, 7};
frames.push_back(QuicFrame(QuicPathChallengeFrame(0, path_frame_buffer)));
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback6(),
23456);
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0u);
QuicPacketCreatorPeer::SetPacketNumber(&peer_creator_, 1);
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AtLeast(1));
writer_->SetWriteBlocked();
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, HandshakeDataDoesNotGetPtoed) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
peer_framer_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
ProcessCryptoPacketAtLevel(1, ENCRYPTION_HANDSHAKE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
ASSERT_TRUE(connection_.HasPendingAcks());
connection_.GetSendAlarm()->Set(clock_.ApproximateNow());
connection_.GetAckAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
connection_.GetSendAlarm()->Fire();
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
}
TEST_P(QuicConnectionTest, CoalescerHandlesInitialKeyDiscard) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
SetQuicReloadableFlag(quic_discard_initial_packet_with_key_dropped, true);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(2);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).WillOnce(Invoke([this]() {
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
}));
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_EQ(0u, connection_.GetStats().packets_discarded);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
ProcessCryptoPacketAtLevel(1000, ENCRYPTION_INITIAL);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString(std::string(1200, 'a'), 0);
EXPECT_EQ(0u, writer_->packets_write_attempts());
}
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, ZeroRttRejectionAndMissingInitialKeys) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.set_defer_send_in_response_to_packets(false);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).WillOnce(Invoke([this]() {
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
}));
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.WillRepeatedly(Invoke([=, this](const QuicCryptoFrame& frame) {
if (frame.level == ENCRYPTION_HANDSHAKE) {
connection_.MarkZeroRttPacketsForRetransmission(0);
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
}
}));
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
QuicAckFrame frame1 = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramePacketAtLevel(1, QuicFrame(&frame1), ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
connection_.GetRetransmissionAlarm()->Fire();
QuicFrames frames1;
frames1.push_back(QuicFrame(&crypto_frame_));
QuicFrames frames2;
QuicCryptoFrame crypto_frame(ENCRYPTION_HANDSHAKE, 0,
absl::string_view(data1));
frames2.push_back(QuicFrame(&crypto_frame));
ProcessCoalescedPacket(
{{2, frames1, ENCRYPTION_INITIAL}, {3, frames2, ENCRYPTION_HANDSHAKE}});
}
TEST_P(QuicConnectionTest, OnZeroRttPacketAcked) {
if (!connection_.version().UsesTls()) {
return;
}
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
connection_.SendCryptoStreamData();
connection_.SetEncrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
connection_.SendStreamDataWithString(4, "bar", 0, NO_FIN);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
QuicFrames frames1;
QuicAckFrame ack_frame1 = InitAckFrame(1);
frames1.push_back(QuicFrame(&ack_frame1));
QuicFrames frames2;
QuicCryptoFrame crypto_frame(ENCRYPTION_HANDSHAKE, 0,
absl::string_view(data1));
frames2.push_back(QuicFrame(&crypto_frame));
EXPECT_CALL(debug_visitor, OnZeroRttPacketAcked()).Times(0);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
ProcessCoalescedPacket(
{{1, frames1, ENCRYPTION_INITIAL}, {2, frames2, ENCRYPTION_HANDSHAKE}});
QuicFrames frames3;
QuicAckFrame ack_frame2 =
InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
frames3.push_back(QuicFrame(&ack_frame2));
EXPECT_CALL(debug_visitor, OnZeroRttPacketAcked()).Times(1);
ProcessCoalescedPacket({{3, frames3, ENCRYPTION_FORWARD_SECURE}});
QuicFrames frames4;
QuicAckFrame ack_frame3 =
InitAckFrame({{QuicPacketNumber(3), QuicPacketNumber(4)}});
frames4.push_back(QuicFrame(&ack_frame3));
EXPECT_CALL(debug_visitor, OnZeroRttPacketAcked()).Times(0);
ProcessCoalescedPacket({{4, frames4, ENCRYPTION_FORWARD_SECURE}});
}
TEST_P(QuicConnectionTest, InitiateKeyUpdate) {
if (!connection_.version().UsesTls()) {
return;
}
TransportParameters params;
QuicConfig config;
std::string error_details;
EXPECT_THAT(config.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
EXPECT_FALSE(connection_.IsKeyUpdateAllowed());
MockFramerVisitor peer_framer_visitor_;
peer_framer_.set_visitor(&peer_framer_visitor_);
uint8_t correct_tag = ENCRYPTION_FORWARD_SECURE;
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(correct_tag));
SetDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(correct_tag));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(correct_tag));
EXPECT_FALSE(connection_.IsKeyUpdateAllowed());
EXPECT_FALSE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(1u), last_packet);
EXPECT_FALSE(connection_.IsKeyUpdateAllowed());
EXPECT_TRUE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
EXPECT_FALSE(connection_.GetDiscardPreviousOneRttKeysAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame1 = InitAckFrame(1);
ProcessAckPacket(&frame1);
EXPECT_TRUE(connection_.GetDiscardPreviousOneRttKeysAlarm()->IsSet());
EXPECT_FALSE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
correct_tag++;
EXPECT_CALL(visitor_, AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<StrictTaggingDecrypter>(correct_tag);
});
EXPECT_CALL(visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<TaggingEncrypter>(correct_tag);
});
EXPECT_CALL(visitor_, OnKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_TRUE(connection_.InitiateKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_FALSE(connection_.GetDiscardPreviousOneRttKeysAlarm()->IsSet());
EXPECT_FALSE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
EXPECT_CALL(peer_framer_visitor_,
AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<StrictTaggingDecrypter>(correct_tag);
});
EXPECT_CALL(peer_framer_visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<TaggingEncrypter>(correct_tag);
});
peer_framer_.SetKeyUpdateSupportForConnection(true);
peer_framer_.DoKeyUpdate(KeyUpdateReason::kRemote);
EXPECT_FALSE(connection_.IsKeyUpdateAllowed());
SendStreamDataToPeer(2, "bar", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(2u), last_packet);
EXPECT_TRUE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame2 = InitAckFrame(2);
ProcessAckPacket(&frame2);
EXPECT_TRUE(connection_.GetDiscardPreviousOneRttKeysAlarm()->IsSet());
EXPECT_FALSE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
correct_tag++;
EXPECT_CALL(visitor_, AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<StrictTaggingDecrypter>(correct_tag);
});
EXPECT_CALL(visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<TaggingEncrypter>(correct_tag);
});
EXPECT_CALL(visitor_, OnKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_TRUE(connection_.InitiateKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_CALL(peer_framer_visitor_,
AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<StrictTaggingDecrypter>(correct_tag);
});
EXPECT_CALL(peer_framer_visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<TaggingEncrypter>(correct_tag);
});
peer_framer_.DoKeyUpdate(KeyUpdateReason::kRemote);
EXPECT_FALSE(connection_.IsKeyUpdateAllowed());
SendStreamDataToPeer(3, "baz", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(3u), last_packet);
EXPECT_FALSE(connection_.IsKeyUpdateAllowed());
EXPECT_TRUE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame3 = InitAckFrame(3);
ProcessAckPacket(&frame3);
EXPECT_TRUE(connection_.GetDiscardPreviousOneRttKeysAlarm()->IsSet());
EXPECT_FALSE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
correct_tag++;
EXPECT_CALL(visitor_, AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<StrictTaggingDecrypter>(correct_tag);
});
EXPECT_CALL(visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([&correct_tag]() {
return std::make_unique<TaggingEncrypter>(correct_tag);
});
EXPECT_CALL(visitor_, OnKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_TRUE(connection_.InitiateKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_FALSE(connection_.GetDiscardPreviousOneRttKeysAlarm()->IsSet());
EXPECT_FALSE(connection_.HaveSentPacketsInCurrentKeyPhaseButNoneAcked());
}
TEST_P(QuicConnectionTest, InitiateKeyUpdateApproachingConfidentialityLimit) {
if (!connection_.version().UsesTls()) {
return;
}
SetQuicFlag(quic_key_update_confidentiality_limit, 3U);
std::string error_details;
TransportParameters params;
QuicConfig config;
EXPECT_THAT(config.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
MockFramerVisitor peer_framer_visitor_;
peer_framer_.set_visitor(&peer_framer_visitor_);
uint8_t current_tag = ENCRYPTION_FORWARD_SECURE;
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(current_tag));
SetDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(current_tag));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
peer_framer_.SetKeyUpdateSupportForConnection(true);
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(current_tag));
const QuicConnectionStats& stats = connection_.GetStats();
for (int packet_num = 1; packet_num <= 8; ++packet_num) {
if (packet_num == 3 || packet_num == 6) {
current_tag++;
EXPECT_CALL(visitor_, AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([current_tag]() {
return std::make_unique<StrictTaggingDecrypter>(current_tag);
});
EXPECT_CALL(visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([current_tag]() {
return std::make_unique<TaggingEncrypter>(current_tag);
});
EXPECT_CALL(visitor_,
OnKeyUpdate(KeyUpdateReason::kLocalKeyUpdateLimitOverride));
}
QuicPacketNumber last_packet;
SendStreamDataToPeer(packet_num, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(packet_num), last_packet);
if (packet_num >= 6) {
EXPECT_EQ(2U, stats.key_update_count);
} else if (packet_num >= 3) {
EXPECT_EQ(1U, stats.key_update_count);
} else {
EXPECT_EQ(0U, stats.key_update_count);
}
if (packet_num == 4 || packet_num == 7) {
EXPECT_CALL(peer_framer_visitor_,
AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([current_tag]() {
return std::make_unique<StrictTaggingDecrypter>(current_tag);
});
EXPECT_CALL(peer_framer_visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([current_tag]() {
return std::make_unique<TaggingEncrypter>(current_tag);
});
peer_framer_.DoKeyUpdate(KeyUpdateReason::kRemote);
}
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame1 = InitAckFrame(packet_num);
ProcessAckPacket(&frame1);
}
}
TEST_P(QuicConnectionTest,
CloseConnectionOnConfidentialityLimitKeyUpdateNotAllowed) {
if (!connection_.version().UsesTls()) {
return;
}
SetQuicFlag(quic_key_update_confidentiality_limit, 1U);
constexpr size_t kConfidentialityLimit = 3U;
std::string error_details;
TransportParameters params;
QuicConfig config;
EXPECT_THAT(config.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypterWithConfidentialityLimit>(
ENCRYPTION_FORWARD_SECURE, kConfidentialityLimit));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_TRUE(connection_.connected());
SendStreamDataToPeer(2, "foo", 0, NO_FIN, &last_packet);
EXPECT_TRUE(connection_.connected());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
SendStreamDataToPeer(3, "foo", 0, NO_FIN, &last_packet);
EXPECT_FALSE(connection_.connected());
const QuicConnectionStats& stats = connection_.GetStats();
EXPECT_EQ(0U, stats.key_update_count);
TestConnectionCloseQuicErrorCode(QUIC_AEAD_LIMIT_REACHED);
}
TEST_P(QuicConnectionTest, CloseConnectionOnIntegrityLimitDuringHandshake) {
if (!connection_.version().UsesTls()) {
return;
}
constexpr uint8_t correct_tag = ENCRYPTION_HANDSHAKE;
constexpr uint8_t wrong_tag = 0xFE;
constexpr QuicPacketCount kIntegrityLimit = 3;
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
correct_tag, kIntegrityLimit));
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(correct_tag));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
peer_framer_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(wrong_tag));
for (uint64_t i = 1; i <= kIntegrityLimit; ++i) {
EXPECT_TRUE(connection_.connected());
if (i == kIntegrityLimit) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(AnyNumber());
}
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(
i, connection_.GetStats().num_failed_authentication_packets_received);
}
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_AEAD_LIMIT_REACHED);
}
TEST_P(QuicConnectionTest, CloseConnectionOnIntegrityLimitAfterHandshake) {
if (!connection_.version().UsesTls()) {
return;
}
constexpr uint8_t correct_tag = ENCRYPTION_FORWARD_SECURE;
constexpr uint8_t wrong_tag = 0xFE;
constexpr QuicPacketCount kIntegrityLimit = 3;
SetDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
correct_tag, kIntegrityLimit));
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(correct_tag));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(wrong_tag));
for (uint64_t i = 1; i <= kIntegrityLimit; ++i) {
EXPECT_TRUE(connection_.connected());
if (i == kIntegrityLimit) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
}
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(
i, connection_.GetStats().num_failed_authentication_packets_received);
}
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_AEAD_LIMIT_REACHED);
}
TEST_P(QuicConnectionTest,
CloseConnectionOnIntegrityLimitAcrossEncryptionLevels) {
if (!connection_.version().UsesTls()) {
return;
}
uint8_t correct_tag = ENCRYPTION_HANDSHAKE;
constexpr uint8_t wrong_tag = 0xFE;
constexpr QuicPacketCount kIntegrityLimit = 4;
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
correct_tag, kIntegrityLimit));
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(correct_tag));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
peer_framer_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(wrong_tag));
for (uint64_t i = 1; i <= 2; ++i) {
EXPECT_TRUE(connection_.connected());
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(
i, connection_.GetStats().num_failed_authentication_packets_received);
}
correct_tag = ENCRYPTION_FORWARD_SECURE;
SetDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
correct_tag, kIntegrityLimit));
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(correct_tag));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.RemoveEncrypter(ENCRYPTION_HANDSHAKE);
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(wrong_tag));
for (uint64_t i = 3; i <= kIntegrityLimit; ++i) {
EXPECT_TRUE(connection_.connected());
if (i == kIntegrityLimit) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
}
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(
i, connection_.GetStats().num_failed_authentication_packets_received);
}
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_AEAD_LIMIT_REACHED);
}
TEST_P(QuicConnectionTest, IntegrityLimitDoesNotApplyWithoutDecryptionKey) {
if (!connection_.version().UsesTls()) {
return;
}
constexpr uint8_t correct_tag = ENCRYPTION_HANDSHAKE;
constexpr uint8_t wrong_tag = 0xFE;
constexpr QuicPacketCount kIntegrityLimit = 3;
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
correct_tag, kIntegrityLimit));
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(correct_tag));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(wrong_tag));
for (uint64_t i = 1; i <= kIntegrityLimit * 2; ++i) {
EXPECT_TRUE(connection_.connected());
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(
0u, connection_.GetStats().num_failed_authentication_packets_received);
}
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, CloseConnectionOnIntegrityLimitAcrossKeyPhases) {
if (!connection_.version().UsesTls()) {
return;
}
constexpr QuicPacketCount kIntegrityLimit = 4;
TransportParameters params;
QuicConfig config;
std::string error_details;
EXPECT_THAT(config.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
QuicConfigPeer::SetNegotiated(&config, true);
if (connection_.version().UsesTls()) {
QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
MockFramerVisitor peer_framer_visitor_;
peer_framer_.set_visitor(&peer_framer_visitor_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x01));
SetDecrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
ENCRYPTION_FORWARD_SECURE, kIntegrityLimit));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0xFF));
for (uint64_t i = 1; i <= 2; ++i) {
EXPECT_TRUE(connection_.connected());
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(
i, connection_.GetStats().num_failed_authentication_packets_received);
}
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
QuicPacketNumber last_packet;
SendStreamDataToPeer(1, "foo", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(1u), last_packet);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame1 = InitAckFrame(1);
ProcessAckPacket(&frame1);
EXPECT_CALL(visitor_, AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce([kIntegrityLimit]() {
return std::make_unique<StrictTaggingDecrypterWithIntegrityLimit>(
0x02, kIntegrityLimit);
});
EXPECT_CALL(visitor_, CreateCurrentOneRttEncrypter()).WillOnce([]() {
return std::make_unique<TaggingEncrypter>(0x02);
});
EXPECT_CALL(visitor_, OnKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_TRUE(connection_.InitiateKeyUpdate(KeyUpdateReason::kLocalForTests));
EXPECT_CALL(peer_framer_visitor_,
AdvanceKeysAndCreateCurrentOneRttDecrypter())
.WillOnce(
[]() { return std::make_unique<StrictTaggingDecrypter>(0x02); });
EXPECT_CALL(peer_framer_visitor_, CreateCurrentOneRttEncrypter())
.WillOnce([]() { return std::make_unique<TaggingEncrypter>(0x02); });
peer_framer_.SetKeyUpdateSupportForConnection(true);
peer_framer_.DoKeyUpdate(KeyUpdateReason::kLocalForTests);
SendStreamDataToPeer(2, "bar", 0, NO_FIN, &last_packet);
EXPECT_EQ(QuicPacketNumber(2u), last_packet);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(true, _, _, _, _, _, _));
QuicAckFrame frame2 = InitAckFrame(2);
ProcessAckPacket(&frame2);
EXPECT_EQ(2u,
connection_.GetStats().num_failed_authentication_packets_received);
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0xFF));
for (uint64_t i = 3; i <= kIntegrityLimit; ++i) {
EXPECT_TRUE(connection_.connected());
if (i == kIntegrityLimit) {
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
}
ProcessDataPacketAtLevel(i, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(
i, connection_.GetStats().num_failed_authentication_packets_received);
}
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(QUIC_AEAD_LIMIT_REACHED);
}
TEST_P(QuicConnectionTest, SendAckFrequencyFrame) {
if (!version().HasIetfQuicFrames()) {
return;
}
SetQuicReloadableFlag(quic_can_send_ack_frequency, true);
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
QuicConfig config;
QuicConfigPeer::SetReceivedMinAckDelayMs(&config, 1);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
QuicConnectionPeer::SetAddressValidated(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
writer_->SetWritable();
QuicPacketCreatorPeer::SetPacketNumber(creator_, 99);
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
QuicAckFrequencyFrame captured_frame;
EXPECT_CALL(visitor_, SendAckFrequency(_))
.WillOnce(Invoke([&captured_frame](const QuicAckFrequencyFrame& frame) {
captured_frame = frame;
}));
SendStreamDataToPeer(1, "bar", 3, NO_FIN, nullptr);
EXPECT_EQ(captured_frame.packet_tolerance, 10u);
EXPECT_EQ(captured_frame.max_ack_delay,
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs()));
SendStreamDataToPeer(1, "baz", 6, NO_FIN, nullptr);
}
TEST_P(QuicConnectionTest, SendAckFrequencyFrameUponHandshakeCompletion) {
if (!version().HasIetfQuicFrames()) {
return;
}
SetQuicReloadableFlag(quic_can_send_ack_frequency, true);
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
QuicConfig config;
QuicConfigPeer::SetReceivedMinAckDelayMs(&config, 1);
QuicTagVector quic_tag_vector;
quic_tag_vector.push_back(kAFF2);
QuicConfigPeer::SetReceivedConnectionOptions(&config, quic_tag_vector);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
QuicConnectionPeer::SetAddressValidated(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
peer_creator_.set_encryption_level(ENCRYPTION_FORWARD_SECURE);
QuicAckFrequencyFrame captured_frame;
EXPECT_CALL(visitor_, SendAckFrequency(_))
.WillOnce(Invoke([&captured_frame](const QuicAckFrequencyFrame& frame) {
captured_frame = frame;
}));
connection_.OnHandshakeComplete();
EXPECT_EQ(captured_frame.packet_tolerance, 2u);
EXPECT_EQ(captured_frame.max_ack_delay,
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs()));
}
TEST_P(QuicConnectionTest, FastRecoveryOfLostServerHello) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(20));
peer_framer_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
ProcessCryptoPacketAtLevel(2, ENCRYPTION_HANDSHAKE);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(clock_.ApproximateNow() + kAlarmGranularity,
connection_.GetRetransmissionAlarm()->deadline());
}
TEST_P(QuicConnectionTest, ServerHelloGetsReordered) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
connection_.SetFromConfig(config);
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.WillRepeatedly(Invoke([=, this](const QuicCryptoFrame& frame) {
if (frame.level == ENCRYPTION_INITIAL) {
SetDecrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
}
}));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(20));
peer_framer_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
ProcessCryptoPacketAtLevel(2, ENCRYPTION_HANDSHAKE);
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_EQ(connection_.sent_packet_manager().GetRetransmissionTime(),
connection_.GetRetransmissionAlarm()->deadline());
}
TEST_P(QuicConnectionTest, MigratePath) {
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.OnPathDegradingDetected();
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(1);
writer_->SetWriteBlocked();
connection_.SendMtuDiscoveryPacket(kMaxOutgoingPacketSize);
EXPECT_EQ(1u, connection_.NumQueuedPackets());
if (version().HasIetfQuicFrames()) {
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
connection_.OnNewConnectionIdFrame(frame);
}
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, OnForwardProgressMadeAfterPathDegrading());
EXPECT_TRUE(connection_.MigratePath(kNewSelfAddress,
connection_.peer_address(), &new_writer,
false));
EXPECT_EQ(kNewSelfAddress, connection_.self_address());
EXPECT_EQ(&new_writer, QuicConnectionPeer::GetWriter(&connection_));
EXPECT_FALSE(connection_.IsPathDegrading());
if (version().HasIetfQuicFrames()) {
EXPECT_EQ(0u, connection_.NumQueuedPackets());
} else {
EXPECT_EQ(1u, connection_.NumQueuedPackets());
}
}
TEST_P(QuicConnectionTest, MigrateToNewPathDuringProbing) {
if (!VersionHasIetfQuicFrames(connection_.version().transport_version)) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Any4(), 12345);
EXPECT_NE(kNewSelfAddress, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
bool success = false;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
connection_.MigratePath(kNewSelfAddress, connection_.peer_address(),
&new_writer, false);
EXPECT_EQ(kNewSelfAddress, connection_.self_address());
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_FALSE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
}
TEST_P(QuicConnectionTest, MultiPortConnection) {
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kMPQC});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.OnPathDegradingDetected();
auto self_address = connection_.self_address();
const QuicSocketAddress kNewSelfAddress(self_address.host(),
self_address.port() + 1);
EXPECT_NE(kNewSelfAddress, self_address);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive()).WillOnce(Return(false));
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
connection_.OnNewConnectionIdFrame(frame);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
EXPECT_EQ(PathValidationReason::kMultiPort,
QuicConnectionPeer::path_validator(&connection_)
->GetPathValidationReason());
connection_.OnNewConnectionIdFrame(frame);
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(30);
clock_.AdvanceTime(kTestRTT);
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(
99, new_writer.path_challenge_frames().back().data_buffer)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_TRUE(alt_path->validated);
auto stats = connection_.multi_port_stats();
EXPECT_EQ(1, connection_.GetStats().num_path_degrading);
EXPECT_EQ(1, stats->num_successful_probes);
EXPECT_EQ(1, stats->num_client_probing_attempts);
EXPECT_EQ(1, connection_.GetStats().num_client_probing_attempts);
EXPECT_EQ(0, stats->num_multi_port_probe_failures_when_path_degrading);
EXPECT_EQ(kTestRTT, stats->rtt_stats.latest_rtt());
EXPECT_EQ(kTestRTT,
stats->rtt_stats_when_default_path_degrading.latest_rtt());
EXPECT_CALL(visitor_, CreateContextForMultiPortPath).Times(0);
connection_.OnNewConnectionIdFrame(frame);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive()).WillOnce(Return(false));
connection_.GetMultiPortProbingAlarm()->Fire();
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_FALSE(connection_.GetMultiPortProbingAlarm()->IsSet());
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
random_generator_.ChangeValue();
connection_.MaybeProbeMultiPortPath();
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_TRUE(alt_path->validated);
clock_.AdvanceTime(kTestRTT);
QuicFrames frames2;
frames2.push_back(QuicFrame(QuicPathResponseFrame(
99, new_writer.path_challenge_frames().back().data_buffer)));
ProcessFramesPacketWithAddresses(frames2, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_TRUE(alt_path->validated);
EXPECT_EQ(1, connection_.GetStats().num_path_degrading);
EXPECT_EQ(0, stats->num_multi_port_probe_failures_when_path_degrading);
EXPECT_EQ(kTestRTT, stats->rtt_stats.latest_rtt());
EXPECT_EQ(kTestRTT,
stats->rtt_stats_when_default_path_degrading.latest_rtt());
EXPECT_CALL(visitor_, OnForwardProgressMadeAfterPathDegrading());
QuicConnectionPeer::OnForwardProgressMade(&connection_);
EXPECT_TRUE(connection_.GetMultiPortProbingAlarm()->IsSet());
connection_.MaybeProbeMultiPortPath();
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_CALL(visitor_, OnPathDegrading());
EXPECT_CALL(visitor_, MigrateToMultiPortPath(_)).Times(0);
connection_.OnPathDegradingDetected();
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
connection_.GetMultiPortProbingAlarm()->Fire();
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
for (size_t i = 0; i < QuicPathValidator::kMaxRetryTimes + 1; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_FALSE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_EQ(2, connection_.GetStats().num_path_degrading);
EXPECT_EQ(1, stats->num_multi_port_probe_failures_when_path_degrading);
EXPECT_EQ(0, stats->num_multi_port_probe_failures_when_path_not_degrading);
EXPECT_EQ(0, connection_.GetStats().num_stateless_resets_on_alternate_path);
}
TEST_P(QuicConnectionTest, TooManyMultiPortPathCreations) {
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kMPQC});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.OnPathDegradingDetected();
auto self_address = connection_.self_address();
const QuicSocketAddress kNewSelfAddress(self_address.host(),
self_address.port() + 1);
EXPECT_NE(kNewSelfAddress, self_address);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
{
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
}
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
for (size_t i = 0; i < QuicPathValidator::kMaxRetryTimes + 1; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
auto stats = connection_.multi_port_stats();
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_FALSE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_EQ(1, connection_.GetStats().num_path_degrading);
EXPECT_EQ(1, stats->num_multi_port_probe_failures_when_path_degrading);
uint64_t connection_id = 1235;
for (size_t i = 0; i < kMaxNumMultiPortPaths - 1; ++i) {
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(connection_id + i);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = i + 2;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_FALSE(alt_path->validated);
for (size_t j = 0; j < QuicPathValidator::kMaxRetryTimes + 1; ++j) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_FALSE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_EQ(1, connection_.GetStats().num_path_degrading);
EXPECT_EQ(i + 2, stats->num_multi_port_probe_failures_when_path_degrading);
}
QuicNewConnectionIdFrame frame2;
frame2.connection_id = TestConnectionId(1239);
ASSERT_NE(frame2.connection_id, connection_.connection_id());
frame2.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame2.connection_id);
frame2.retire_prior_to = 0u;
frame2.sequence_number = 6u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame2));
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_EQ(kMaxNumMultiPortPaths,
stats->num_multi_port_probe_failures_when_path_degrading);
}
TEST_P(QuicConnectionTest, MultiPortPathReceivesStatelessReset) {
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
QuicConfigPeer::SetReceivedStatelessResetToken(&config,
kTestStatelessResetToken);
config.SetClientConnectionOptions(QuicTagVector{kMPQC});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.OnPathDegradingDetected();
auto self_address = connection_.self_address();
const QuicSocketAddress kNewSelfAddress(self_address.host(),
self_address.port() + 1);
EXPECT_NE(kNewSelfAddress, self_address);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
connection_.OnNewConnectionIdFrame(frame);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
EXPECT_EQ(PathValidationReason::kMultiPort,
QuicConnectionPeer::path_validator(&connection_)
->GetPathValidationReason());
std::unique_ptr<QuicEncryptedPacket> packet(
QuicFramer::BuildIetfStatelessResetPacket(connection_id_,
100,
kTestStatelessResetToken));
std::unique_ptr<QuicReceivedPacket> received(
ConstructReceivedPacket(*packet, QuicTime::Zero()));
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_PEER))
.Times(0);
connection_.ProcessUdpPacket(kNewSelfAddress, kPeerAddress, *received);
EXPECT_EQ(connection_.GetStats().num_client_probing_attempts, 1);
EXPECT_EQ(connection_.GetStats().num_stateless_resets_on_alternate_path, 1);
}
TEST_P(QuicConnectionTest, MultiPortPathRespectsActiveMigrationConfig) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
QuicConfigPeer::SetReceivedStatelessResetToken(&config,
kTestStatelessResetToken);
QuicConfigPeer::SetReceivedDisableConnectionMigration(&config);
config.SetClientConnectionOptions(QuicTagVector{kMPQC});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, OnPathDegrading());
connection_.OnPathDegradingDetected();
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath).Times(0);
connection_.OnNewConnectionIdFrame(frame);
EXPECT_FALSE(connection_.HasPendingPathValidation());
}
TEST_P(QuicConnectionTest, PathDegradingWhenAltPathIsNotReady) {
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kMPQC});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
auto self_address = connection_.self_address();
const QuicSocketAddress kNewSelfAddress(self_address.host(),
self_address.port() + 1);
EXPECT_NE(kNewSelfAddress, self_address);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
EXPECT_CALL(visitor_, OnPathDegrading());
EXPECT_CALL(visitor_, MigrateToMultiPortPath(_)).Times(0);
connection_.OnPathDegradingDetected();
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(30);
clock_.AdvanceTime(kTestRTT);
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(
99, new_writer.path_challenge_frames().back().data_buffer)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_TRUE(alt_path->validated);
}
TEST_P(QuicConnectionTest, PathDegradingWhenAltPathIsReadyAndNotProbing) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kMPQC, kMPQM});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
auto self_address = connection_.self_address();
const QuicSocketAddress kNewSelfAddress(self_address.host(),
self_address.port() + 1);
EXPECT_NE(kNewSelfAddress, self_address);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(30);
clock_.AdvanceTime(kTestRTT);
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(
99, new_writer.path_challenge_frames().back().data_buffer)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_TRUE(alt_path->validated);
EXPECT_CALL(visitor_, OnPathDegrading());
EXPECT_CALL(visitor_, OnForwardProgressMadeAfterPathDegrading()).Times(0);
EXPECT_CALL(visitor_, MigrateToMultiPortPath(_))
.WillOnce(Invoke([&](std::unique_ptr<QuicPathValidationContext> context) {
EXPECT_EQ(context->self_address(), kNewSelfAddress);
connection_.MigratePath(context->self_address(),
context->peer_address(), context->WriterToUse(),
false);
}));
connection_.OnPathDegradingDetected();
}
TEST_P(QuicConnectionTest, PathDegradingWhenAltPathIsReadyAndProbing) {
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
set_perspective(Perspective::IS_CLIENT);
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kMPQC, kMPQM});
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
auto self_address = connection_.self_address();
const QuicSocketAddress kNewSelfAddress(self_address.host(),
self_address.port() + 1);
EXPECT_NE(kNewSelfAddress, self_address);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_, ShouldKeepConnectionAlive())
.WillRepeatedly(Return(true));
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(1234);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 1u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillRepeatedly(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), &new_writer)));
}));
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(30);
clock_.AdvanceTime(kTestRTT);
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(
99, new_writer.path_challenge_frames().back().data_buffer)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, connection_.peer_address()));
EXPECT_TRUE(alt_path->validated);
random_generator_.ChangeValue();
connection_.GetMultiPortProbingAlarm()->Fire();
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_FALSE(connection_.GetMultiPortProbingAlarm()->IsSet());
EXPECT_CALL(visitor_, OnPathDegrading());
EXPECT_CALL(visitor_, OnForwardProgressMadeAfterPathDegrading()).Times(0);
EXPECT_CALL(visitor_, MigrateToMultiPortPath(_))
.WillOnce(Invoke([&](std::unique_ptr<QuicPathValidationContext> context) {
EXPECT_EQ(context->self_address(), kNewSelfAddress);
connection_.MigratePath(context->self_address(),
context->peer_address(), context->WriterToUse(),
false);
}));
connection_.OnPathDegradingDetected();
EXPECT_FALSE(connection_.HasPendingPathValidation());
auto* path_validator = QuicConnectionPeer::path_validator(&connection_);
EXPECT_FALSE(QuicPathValidatorPeer::retry_timer(path_validator)->IsSet());
}
TEST_P(QuicConnectionTest, SingleAckInPacket) {
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillOnce(Invoke([=, this]() {
connection_.SendStreamData3();
connection_.CloseConnection(
QUIC_INTERNAL_ERROR, "error",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}));
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
ASSERT_FALSE(writer_->ack_frames().empty());
EXPECT_EQ(1u, writer_->ack_frames().size());
}
TEST_P(QuicConnectionTest,
ServerReceivedZeroRttPacketAfterOneRttPacketWithRetainedKey) {
if (!connection_.version().UsesTls()) {
return;
}
set_perspective(Perspective::IS_SERVER);
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
notifier_.NeuterUnencryptedData();
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(4, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.GetDiscardZeroRttDecryptionKeysAlarm()->IsSet());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(2, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
EXPECT_EQ(
0u,
connection_.GetStats()
.num_tls_server_zero_rtt_packets_received_after_discarding_decrypter);
connection_.GetDiscardZeroRttDecryptionKeysAlarm()->Fire();
EXPECT_FALSE(connection_.GetDiscardZeroRttDecryptionKeysAlarm()->IsSet());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(0);
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
EXPECT_EQ(
1u,
connection_.GetStats()
.num_tls_server_zero_rtt_packets_received_after_discarding_decrypter);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(5, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.GetDiscardZeroRttDecryptionKeysAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, NewTokenFrameInstigateAcks) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(visitor_, OnSuccessfulVersionNegotiation(_));
QuicNewTokenFrame* new_token = new QuicNewTokenFrame();
EXPECT_CALL(visitor_, OnNewTokenReceived(_));
ProcessFramePacket(QuicFrame(new_token));
EXPECT_TRUE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, ServerClosesConnectionOnNewTokenFrame) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicNewTokenFrame* new_token = new QuicNewTokenFrame();
EXPECT_CALL(visitor_, OnNewTokenReceived(_)).Times(0);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
ProcessFramePacket(QuicFrame(new_token));
EXPECT_FALSE(connection_.connected());
}
TEST_P(QuicConnectionTest, OverrideRetryTokenWithRetryPacket) {
if (!version().HasIetfQuicFrames()) {
return;
}
std::string address_token = "TestAddressToken";
connection_.SetSourceAddressTokenToSend(address_token);
EXPECT_EQ(QuicPacketCreatorPeer::GetRetryToken(
QuicConnectionPeer::GetPacketCreator(&connection_)),
address_token);
TestClientRetryHandling(false,
false,
false,
false,
false);
}
TEST_P(QuicConnectionTest, DonotOverrideRetryTokenWithAddressToken) {
if (!version().HasIetfQuicFrames()) {
return;
}
TestClientRetryHandling(false,
false,
false,
false,
false);
std::string retry_token = QuicPacketCreatorPeer::GetRetryToken(
QuicConnectionPeer::GetPacketCreator(&connection_));
std::string address_token = "TestAddressToken";
connection_.SetSourceAddressTokenToSend(address_token);
EXPECT_EQ(QuicPacketCreatorPeer::GetRetryToken(
QuicConnectionPeer::GetPacketCreator(&connection_)),
retry_token);
}
TEST_P(QuicConnectionTest,
ServerReceivedZeroRttWithHigherPacketNumberThanOneRtt) {
if (!connection_.version().UsesTls()) {
return;
}
std::string error_details;
TransportParameters params;
QuicConfig config;
EXPECT_THAT(config.ProcessTransportParameters(
params, false, &error_details),
IsQuicNoError());
QuicConfigPeer::SetNegotiated(&config, true);
QuicConfigPeer::SetReceivedOriginalConnectionId(&config,
connection_.connection_id());
QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_.connection_id());
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
set_perspective(Perspective::IS_SERVER);
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
notifier_.NeuterUnencryptedData();
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_COMPLETE));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(2, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.GetDiscardZeroRttDecryptionKeysAlarm()->IsSet());
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
EXPECT_FALSE(connection_.connected());
TestConnectionCloseQuicErrorCode(
QUIC_INVALID_0RTT_PACKET_NUMBER_OUT_OF_ORDER);
}
TEST_P(QuicConnectionTest, PeerMigrateBeforeHandshakeConfirm) {
if (!VersionHasIetfQuicFrames(version().transport_version)) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
EXPECT_EQ(Perspective::IS_SERVER, connection_.perspective());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_START));
QuicConnectionPeer::SetDirectPeerAddress(&connection_, QuicSocketAddress());
QuicConnectionPeer::SetEffectivePeerAddress(&connection_,
QuicSocketAddress());
EXPECT_FALSE(connection_.effective_peer_address().IsInitialized());
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
QuicAckFrame frame = InitAckFrame(1);
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_,
OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF));
EXPECT_CALL(visitor_, OnConnectionMigration(PORT_CHANGE)).Times(0u);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(0);
ProcessFramePacketWithAddresses(QuicFrame(&frame), kSelfAddress,
kNewPeerAddress, ENCRYPTION_INITIAL);
EXPECT_FALSE(connection_.connected());
}
TEST_P(QuicConnectionTest, TryToFlushAckWithAckQueued) {
if (!version().HasIetfQuicFrames()) {
return;
}
SetQuicReloadableFlag(quic_can_send_ack_frequency, true);
set_perspective(Perspective::IS_SERVER);
QuicConfig config;
QuicConfigPeer::SetReceivedMinAckDelayMs(&config, 1);
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.OnHandshakeComplete();
QuicPacketCreatorPeer::SetPacketNumber(creator_, 200);
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, SendAckFrequency(_))
.WillOnce(Invoke(¬ifier_,
&SimpleSessionNotifier::WriteOrBufferAckFrequency));
QuicConnectionPeer::SendPing(&connection_);
}
TEST_P(QuicConnectionTest, PathChallengeBeforePeerIpAddressChangeAtServer) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
SetClientConnectionId(TestConnectionId(1));
connection_.CreateConnectionIdManager();
QuicConnectionId server_cid0 = connection_.connection_id();
QuicConnectionId client_cid0 = connection_.client_connection_id();
QuicConnectionId client_cid1 = TestConnectionId(2);
QuicConnectionId server_cid1;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
server_cid1 = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.MaybeSendConnectionIdToClient();
QuicNewConnectionIdFrame new_cid_frame;
new_cid_frame.connection_id = client_cid1;
new_cid_frame.sequence_number = 1u;
new_cid_frame.retire_prior_to = 0u;
connection_.OnNewConnectionIdFrame(new_cid_frame);
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(), client_cid0);
ASSERT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
peer_creator_.SetServerConnectionId(server_cid1);
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
QuicFrames frames1;
frames1.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
QuicPathFrameBuffer payload;
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.Times(AtLeast(1))
.WillOnce(Invoke([&]() {
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_FALSE(writer_->path_response_frames().empty());
EXPECT_FALSE(writer_->path_challenge_frames().empty());
payload = writer_->path_challenge_frames().front().data_buffer;
}))
.WillRepeatedly(DoDefault());
;
ProcessFramesPacketWithAddresses(frames1, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_TRUE(connection_.HasPendingPathValidation());
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
const auto* alternative_path =
QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_EQ(default_path->client_connection_id, client_cid0);
EXPECT_EQ(default_path->server_connection_id, server_cid0);
EXPECT_EQ(alternative_path->client_connection_id, client_cid1);
EXPECT_EQ(alternative_path->server_connection_id, server_cid1);
EXPECT_EQ(packet_creator->GetDestinationConnectionId(), client_cid0);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillOnce(Invoke([=, this]() {
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
}));
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.Times(0);
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
EXPECT_TRUE(writer_->path_challenge_frames().empty());
EXPECT_NE(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
send_algorithm_ = new StrictMock<MockSendAlgorithm>();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_)).Times(AnyNumber());
connection_.SetSendAlgorithm(send_algorithm_);
EXPECT_EQ(default_path->client_connection_id, client_cid1);
EXPECT_EQ(default_path->server_connection_id, server_cid1);
EXPECT_EQ(alternative_path->client_connection_id, client_cid0);
EXPECT_EQ(alternative_path->server_connection_id, server_cid0);
EXPECT_EQ(packet_creator->GetDestinationConnectionId(), client_cid1);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid1);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
connection_.active_effective_peer_migration_type());
EXPECT_EQ(1u, connection_.GetStats()
.num_peer_migration_to_proactively_validated_address);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
QuicFrames frames3;
frames3.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
EXPECT_CALL(visitor_, MaybeSendAddressToken());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(testing::AtLeast(1u));
ProcessFramesPacketWithAddresses(frames3, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
EXPECT_TRUE(alternative_path->client_connection_id.IsEmpty());
EXPECT_TRUE(alternative_path->server_connection_id.IsEmpty());
EXPECT_FALSE(alternative_path->stateless_reset_token.has_value());
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
EXPECT_CALL(*send_algorithm_, PacingRate(_))
.WillRepeatedly(Return(QuicBandwidth::Zero()));
connection_.SendCryptoDataWithString(std::string(1200, 'a'), 0);
EXPECT_EQ(1u, connection_.GetStats().num_validated_peer_migration);
EXPECT_EQ(1u, connection_.num_unlinkable_client_migration());
}
TEST_P(QuicConnectionTest,
PathValidationSucceedsBeforePeerIpAddressChangeAtServer) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
QuicConnectionId server_cid0 = connection_.connection_id();
QuicConnectionId server_cid1;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
server_cid1 = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.MaybeSendConnectionIdToClient();
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
peer_creator_.SetServerConnectionId(server_cid1);
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
23456);
QuicPathFrameBuffer payload;
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.WillOnce(Invoke([&]() {
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_FALSE(writer_->path_response_frames().empty());
EXPECT_FALSE(writer_->path_challenge_frames().empty());
payload = writer_->path_challenge_frames().front().data_buffer;
}))
.WillRepeatedly(Invoke([&]() {
EXPECT_TRUE(writer_->path_challenge_frames().empty());
}));
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
QuicFrames frames1;
frames1.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
ProcessFramesPacketWithAddresses(frames1, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.HasPendingPathValidation());
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
const auto* alternative_path =
QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_EQ(default_path->server_connection_id, server_cid0);
EXPECT_EQ(alternative_path->server_connection_id, server_cid1);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
QuicFrames frames3;
frames3.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
ProcessFramesPacketWithAddresses(frames3, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.Times(0);
const QuicSocketAddress kNewerPeerAddress(QuicIpAddress::Loopback4(),
34567);
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillOnce(Invoke([=, this]() {
EXPECT_EQ(kNewerPeerAddress, connection_.peer_address());
}));
EXPECT_CALL(visitor_, MaybeSendAddressToken());
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewerPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewerPeerAddress, connection_.peer_address());
EXPECT_EQ(kNewerPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(NO_CHANGE, connection_.active_effective_peer_migration_type());
EXPECT_EQ(kNewerPeerAddress, writer_->last_write_peer_address());
EXPECT_EQ(1u, connection_.GetStats()
.num_peer_migration_to_proactively_validated_address);
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_NE(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
EXPECT_EQ(default_path->server_connection_id, server_cid1);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid1);
EXPECT_TRUE(alternative_path->server_connection_id.IsEmpty());
EXPECT_FALSE(alternative_path->stateless_reset_token.has_value());
send_algorithm_ = new StrictMock<MockSendAlgorithm>();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_)).Times(AnyNumber());
connection_.SetSendAlgorithm(send_algorithm_);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _));
connection_.SendCryptoDataWithString(std::string(1200, 'a'), 0);
EXPECT_EQ(1u, connection_.GetStats().num_validated_peer_migration);
}
TEST_P(QuicConnectionTest, NoNonProbingFrameOnAlternativePath) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
SetClientConnectionId(TestConnectionId(1));
connection_.CreateConnectionIdManager();
QuicConnectionId server_cid0 = connection_.connection_id();
QuicConnectionId client_cid0 = connection_.client_connection_id();
QuicConnectionId client_cid1 = TestConnectionId(2);
QuicConnectionId server_cid1;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke([&](const QuicConnectionId& cid) {
server_cid1 = cid;
return true;
}));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.MaybeSendConnectionIdToClient();
QuicNewConnectionIdFrame new_cid_frame;
new_cid_frame.connection_id = client_cid1;
new_cid_frame.sequence_number = 1u;
new_cid_frame.retire_prior_to = 0u;
connection_.OnNewConnectionIdFrame(new_cid_frame);
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(), client_cid0);
ASSERT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
peer_creator_.SetServerConnectionId(server_cid1);
const QuicSocketAddress kNewPeerAddress =
QuicSocketAddress(QuicIpAddress::Loopback4(), 23456);
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
QuicFrames frames1;
frames1.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.Times(AtLeast(1))
.WillOnce(Invoke([&]() {
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_FALSE(writer_->path_response_frames().empty());
EXPECT_FALSE(writer_->path_challenge_frames().empty());
}))
.WillRepeatedly(DoDefault());
ProcessFramesPacketWithAddresses(frames1, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_TRUE(connection_.HasPendingPathValidation());
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
const auto* alternative_path =
QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_EQ(default_path->client_connection_id, client_cid0);
EXPECT_EQ(default_path->server_connection_id, server_cid0);
EXPECT_EQ(alternative_path->client_connection_id, client_cid1);
EXPECT_EQ(alternative_path->server_connection_id, server_cid1);
EXPECT_EQ(packet_creator->GetDestinationConnectionId(), client_cid0);
EXPECT_EQ(packet_creator->GetSourceConnectionId(), server_cid0);
peer_creator_.SetServerConnectionId(server_cid0);
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillRepeatedly(Invoke([=, this]() {
EXPECT_EQ(kPeerAddress, connection_.peer_address());
}));
for (size_t i = 3; i <= 39; ++i) {
ProcessDataPacket(i);
}
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kPeerAddress, connection_.effective_peer_address());
EXPECT_TRUE(connection_.HasPendingAcks());
QuicTime ack_time = connection_.GetAckAlarm()->deadline();
QuicTime path_validation_retry_time =
connection_.GetRetryTimeout(kNewPeerAddress, writer_.get());
clock_.AdvanceTime(std::max(ack_time, path_validation_retry_time) -
clock_.ApproximateNow());
EXPECT_CALL(visitor_, OnAckNeedsRetransmittableFrame())
.WillOnce(Invoke([this]() {
connection_.SendControlFrame(QuicFrame(QuicWindowUpdateFrame(1, 0, 0)));
}));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(Invoke([&]() {
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
EXPECT_FALSE(writer_->path_challenge_frames().empty());
EXPECT_TRUE(writer_->ack_frames().empty());
}))
.WillOnce(Invoke([&]() {
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
EXPECT_FALSE(writer_->ack_frames().empty());
EXPECT_FALSE(writer_->window_update_frames().empty());
}));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
TEST_P(QuicConnectionTest, DoNotIssueNewCidIfVisitorSaysNo) {
set_perspective(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
QuicConnectionId server_cid0 = connection_.connection_id();
QuicConnectionId client_cid1 = TestConnectionId(2);
QuicConnectionId server_cid1;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_)).WillOnce(Return(false));
EXPECT_CALL(visitor_, SendNewConnectionId(_)).Times(0);
connection_.MaybeSendConnectionIdToClient();
}
TEST_P(QuicConnectionTest,
ProbedOnAnotherPathAfterPeerIpAddressChangeAtServer) {
PathProbeTestInit(Perspective::IS_SERVER);
if (!version().HasIetfQuicFrames()) {
return;
}
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
23456);
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE)).Times(1);
EXPECT_CALL(*send_algorithm_,
OnPacketSent(_, _, _, _, NO_RETRANSMITTABLE_DATA))
.Times(0);
EXPECT_CALL(visitor_, OnStreamFrame(_)).WillOnce(Invoke([=, this]() {
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
}));
QuicFrames frames2;
frames2.push_back(QuicFrame(frame2_));
ProcessFramesPacketWithAddresses(frames2, kSelfAddress, kNewPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePathValidated(&connection_));
EXPECT_TRUE(connection_.HasPendingPathValidation());
send_algorithm_ = new StrictMock<MockSendAlgorithm>();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_)).Times(AnyNumber());
connection_.SetSendAlgorithm(send_algorithm_);
const QuicSocketAddress kNewerPeerAddress(QuicIpAddress::Loopback4(),
34567);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.WillOnce(Invoke([&]() {
EXPECT_EQ(kNewerPeerAddress, writer_->last_write_peer_address());
EXPECT_FALSE(writer_->path_response_frames().empty());
EXPECT_TRUE(writer_->path_challenge_frames().empty());
}));
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
QuicFrames frames1;
frames1.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
ProcessFramesPacketWithAddresses(frames1, kSelfAddress, kNewerPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.effective_peer_address());
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePathValidated(&connection_));
EXPECT_TRUE(connection_.HasPendingPathValidation());
}
TEST_P(QuicConnectionTest,
PathValidationFailedOnClientDueToLackOfServerConnectionId) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT,
false);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Loopback4(),
34567);
bool success;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
EXPECT_FALSE(success);
}
TEST_P(QuicConnectionTest,
PathValidationFailedOnClientDueToLackOfClientConnectionIdTheSecondTime) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT,
false);
SetClientConnectionId(TestConnectionId(1));
QuicConnectionId server_cid0 = connection_.connection_id();
QuicConnectionId server_cid1 = TestConnectionId(2);
QuicConnectionId server_cid2 = TestConnectionId(4);
QuicConnectionId client_cid1;
QuicNewConnectionIdFrame frame1;
frame1.connection_id = server_cid1;
frame1.sequence_number = 1u;
frame1.retire_prior_to = 0u;
frame1.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame1.connection_id);
connection_.OnNewConnectionIdFrame(frame1);
const auto* packet_creator =
QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(), server_cid0);
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
EXPECT_CALL(visitor_, SendNewConnectionId(_))
.WillOnce(Invoke([&](const QuicNewConnectionIdFrame& frame) {
client_cid1 = frame.connection_id;
}));
const QuicSocketAddress kSelfAddress1(QuicIpAddress::Any4(), 12345);
ASSERT_NE(kSelfAddress1, connection_.self_address());
bool success1;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kSelfAddress1, connection_.peer_address(), writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, kSelfAddress1, connection_.peer_address(), &success1),
PathValidationReason::kReasonUnknown);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
ASSERT_TRUE(connection_.MigratePath(kSelfAddress1, connection_.peer_address(),
&new_writer, false));
QuicConnectionPeer::RetirePeerIssuedConnectionIdsNoLongerOnPath(&connection_);
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
EXPECT_EQ(default_path->client_connection_id, client_cid1);
EXPECT_EQ(default_path->server_connection_id, server_cid1);
EXPECT_EQ(default_path->stateless_reset_token, frame1.stateless_reset_token);
const auto* alternative_path =
QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_TRUE(alternative_path->client_connection_id.IsEmpty());
EXPECT_TRUE(alternative_path->server_connection_id.IsEmpty());
EXPECT_FALSE(alternative_path->stateless_reset_token.has_value());
ASSERT_EQ(packet_creator->GetDestinationConnectionId(), server_cid1);
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
QuicNewConnectionIdFrame frame2;
frame2.connection_id = server_cid2;
frame2.sequence_number = 2u;
frame2.retire_prior_to = 1u;
frame2.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame2.connection_id);
connection_.OnNewConnectionIdFrame(frame2);
const QuicSocketAddress kSelfAddress2(QuicIpAddress::Loopback4(),
45678);
bool success2;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kSelfAddress2, connection_.peer_address(), writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, kSelfAddress2, connection_.peer_address(), &success2),
PathValidationReason::kReasonUnknown);
EXPECT_FALSE(success2);
}
TEST_P(QuicConnectionTest, ServerConnectionIdRetiredUponPathValidationFailure) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT);
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(2);
frame.sequence_number = 1u;
frame.retire_prior_to = 0u;
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
connection_.OnNewConnectionIdFrame(frame);
const QuicSocketAddress kNewSelfAddress(QuicIpAddress::Loopback4(),
34567);
bool success;
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, connection_.peer_address(), writer_.get()),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress, connection_.peer_address(), &success),
PathValidationReason::kReasonUnknown);
auto* path_validator = QuicConnectionPeer::path_validator(&connection_);
path_validator->CancelPathValidation();
QuicConnectionPeer::RetirePeerIssuedConnectionIdsNoLongerOnPath(&connection_);
EXPECT_FALSE(success);
const auto* alternative_path =
QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_TRUE(alternative_path->client_connection_id.IsEmpty());
EXPECT_TRUE(alternative_path->server_connection_id.IsEmpty());
EXPECT_FALSE(alternative_path->stateless_reset_token.has_value());
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(1u));
retire_peer_issued_cid_alarm->Fire();
}
TEST_P(QuicConnectionTest,
MigratePathDirectlyFailedDueToLackOfServerConnectionId) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT,
false);
const QuicSocketAddress kSelfAddress1(QuicIpAddress::Any4(), 12345);
ASSERT_NE(kSelfAddress1, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
ASSERT_FALSE(connection_.MigratePath(kSelfAddress1,
connection_.peer_address(), &new_writer,
false));
}
TEST_P(QuicConnectionTest,
MigratePathDirectlyFailedDueToLackOfClientConnectionIdTheSecondTime) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_CLIENT,
false);
SetClientConnectionId(TestConnectionId(1));
QuicNewConnectionIdFrame frame1;
frame1.connection_id = TestConnectionId(2);
frame1.sequence_number = 1u;
frame1.retire_prior_to = 0u;
frame1.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame1.connection_id);
connection_.OnNewConnectionIdFrame(frame1);
QuicConnectionId new_client_connection_id;
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
EXPECT_CALL(visitor_, SendNewConnectionId(_))
.WillOnce(Invoke([&](const QuicNewConnectionIdFrame& frame) {
new_client_connection_id = frame.connection_id;
}));
const QuicSocketAddress kSelfAddress1(QuicIpAddress::Any4(), 12345);
ASSERT_NE(kSelfAddress1, connection_.self_address());
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
ASSERT_TRUE(connection_.MigratePath(kSelfAddress1, connection_.peer_address(),
&new_writer,
false));
QuicConnectionPeer::RetirePeerIssuedConnectionIdsNoLongerOnPath(&connection_);
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
EXPECT_EQ(default_path->client_connection_id, new_client_connection_id);
EXPECT_EQ(default_path->server_connection_id, frame1.connection_id);
EXPECT_EQ(default_path->stateless_reset_token, frame1.stateless_reset_token);
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
QuicNewConnectionIdFrame frame2;
frame2.connection_id = TestConnectionId(4);
frame2.sequence_number = 2u;
frame2.retire_prior_to = 1u;
frame2.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame2.connection_id);
connection_.OnNewConnectionIdFrame(frame2);
const QuicSocketAddress kSelfAddress2(QuicIpAddress::Loopback4(),
45678);
auto new_writer2 = std::make_unique<TestPacketWriter>(version(), &clock_,
Perspective::IS_CLIENT);
ASSERT_FALSE(connection_.MigratePath(
kSelfAddress2, connection_.peer_address(), new_writer2.release(),
true));
}
TEST_P(QuicConnectionTest,
CloseConnectionAfterReceiveNewConnectionIdFromPeerUsingEmptyCID) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
ASSERT_TRUE(connection_.client_connection_id().IsEmpty());
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = TestConnectionId(1);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
EXPECT_FALSE(connection_.OnNewConnectionIdFrame(frame));
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(IETF_QUIC_PROTOCOL_VIOLATION));
}
TEST_P(QuicConnectionTest, NewConnectionIdFrameResultsInError) {
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
ASSERT_FALSE(connection_.connection_id().IsEmpty());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = connection_id_;
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
EXPECT_FALSE(connection_.OnNewConnectionIdFrame(frame));
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(IETF_QUIC_PROTOCOL_VIOLATION));
}
TEST_P(QuicConnectionTest,
ClientRetirePeerIssuedConnectionIdTriggeredByNewConnectionIdFrame) {
if (!version().HasIetfQuicFrames()) {
return;
}
connection_.CreateConnectionIdManager();
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = TestConnectionId(1);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_FALSE(retire_peer_issued_cid_alarm->IsSet());
frame.sequence_number = 2u;
frame.connection_id = TestConnectionId(2);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 1u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_EQ(connection_.connection_id(), connection_id_);
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_EQ(connection_.connection_id(), TestConnectionId(2));
EXPECT_EQ(connection_.packet_creator().GetDestinationConnectionId(),
TestConnectionId(2));
}
TEST_P(QuicConnectionTest,
ServerRetirePeerIssuedConnectionIdTriggeredByNewConnectionIdFrame) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
SetClientConnectionId(TestConnectionId(0));
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = TestConnectionId(1);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_FALSE(retire_peer_issued_cid_alarm->IsSet());
frame.sequence_number = 2u;
frame.connection_id = TestConnectionId(2);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 1u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_EQ(connection_.client_connection_id(), TestConnectionId(0));
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_EQ(connection_.client_connection_id(), TestConnectionId(2));
EXPECT_EQ(connection_.packet_creator().GetDestinationConnectionId(),
TestConnectionId(2));
}
TEST_P(
QuicConnectionTest,
ReplacePeerIssuedConnectionIdOnBothPathsTriggeredByNewConnectionIdFrame) {
if (!version().HasIetfQuicFrames()) {
return;
}
PathProbeTestInit(Perspective::IS_SERVER);
SetClientConnectionId(TestConnectionId(0));
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
QuicIpAddress new_host;
new_host.FromString("1.1.1.1");
ProcessReceivedPacket(kSelfAddress,
QuicSocketAddress(new_host, 23456), *received);
EXPECT_EQ(
TestConnectionId(0),
QuicConnectionPeer::GetClientConnectionIdOnAlternativePath(&connection_));
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = TestConnectionId(1);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_FALSE(retire_peer_issued_cid_alarm->IsSet());
frame.sequence_number = 2u;
frame.connection_id = TestConnectionId(2);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 1u;
EXPECT_TRUE(connection_.OnNewConnectionIdFrame(frame));
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_EQ(connection_.client_connection_id(), TestConnectionId(0));
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_EQ(connection_.client_connection_id(), TestConnectionId(2));
EXPECT_EQ(connection_.packet_creator().GetDestinationConnectionId(),
TestConnectionId(2));
EXPECT_EQ(
TestConnectionId(2),
QuicConnectionPeer::GetClientConnectionIdOnAlternativePath(&connection_));
}
TEST_P(QuicConnectionTest,
CloseConnectionAfterReceiveRetireConnectionIdWhenNoCIDIssued) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
QuicRetireConnectionIdFrame frame;
frame.sequence_number = 1u;
EXPECT_FALSE(connection_.OnRetireConnectionIdFrame(frame));
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(IETF_QUIC_PROTOCOL_VIOLATION));
}
TEST_P(QuicConnectionTest, RetireConnectionIdFrameResultsInError) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_)).WillOnce(Return(true));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.MaybeSendConnectionIdToClient();
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_, OnConnectionClosed(_, ConnectionCloseSource::FROM_SELF))
.WillOnce(Invoke(this, &QuicConnectionTest::SaveConnectionCloseFrame));
QuicRetireConnectionIdFrame frame;
frame.sequence_number = 2u;
EXPECT_FALSE(connection_.OnRetireConnectionIdFrame(frame));
EXPECT_FALSE(connection_.connected());
EXPECT_THAT(saved_connection_close_frame_.quic_error_code,
IsError(IETF_QUIC_PROTOCOL_VIOLATION));
}
TEST_P(QuicConnectionTest,
ServerRetireSelfIssuedConnectionIdWithoutSendingNewConnectionIdBefore) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
auto* retire_self_issued_cid_alarm =
connection_.GetRetireSelfIssuedConnectionIdAlarm();
ASSERT_FALSE(retire_self_issued_cid_alarm->IsSet());
QuicConnectionId cid0 = connection_id_;
QuicRetireConnectionIdFrame frame;
frame.sequence_number = 0u;
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(cid0))
.WillOnce(Return(TestConnectionId(456)));
EXPECT_CALL(connection_id_generator_,
GenerateNextConnectionId(TestConnectionId(456)))
.WillOnce(Return(TestConnectionId(789)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.Times(2)
.WillRepeatedly(Return(true));
EXPECT_CALL(visitor_, SendNewConnectionId(_)).Times(2);
EXPECT_TRUE(connection_.OnRetireConnectionIdFrame(frame));
}
TEST_P(QuicConnectionTest, ServerRetireSelfIssuedConnectionId) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
QuicConnectionId recorded_cid;
auto cid_recorder = [&recorded_cid](const QuicConnectionId& cid) -> bool {
recorded_cid = cid;
return true;
};
QuicConnectionId cid0 = connection_id_;
QuicConnectionId cid1;
QuicConnectionId cid2;
EXPECT_EQ(connection_.connection_id(), cid0);
EXPECT_EQ(connection_.GetOneActiveServerConnectionId(), cid0);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke(cid_recorder));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
connection_.MaybeSendConnectionIdToClient();
cid1 = recorded_cid;
auto* retire_self_issued_cid_alarm =
connection_.GetRetireSelfIssuedConnectionIdAlarm();
ASSERT_FALSE(retire_self_issued_cid_alarm->IsSet());
char buffers[3][kMaxOutgoingPacketSize];
auto packet1 =
ConstructPacket({QuicFrame(QuicPingFrame())}, ENCRYPTION_FORWARD_SECURE,
buffers[0], kMaxOutgoingPacketSize);
peer_creator_.SetServerConnectionId(cid1);
auto retire_cid_frame = std::make_unique<QuicRetireConnectionIdFrame>();
retire_cid_frame->sequence_number = 0u;
auto packet2 = ConstructPacket({QuicFrame(retire_cid_frame.release())},
ENCRYPTION_FORWARD_SECURE, buffers[1],
kMaxOutgoingPacketSize);
auto packet3 =
ConstructPacket({QuicFrame(QuicPingFrame())}, ENCRYPTION_FORWARD_SECURE,
buffers[2], kMaxOutgoingPacketSize);
if (!connection_.connection_id().IsEmpty()) {
EXPECT_CALL(connection_id_generator_, GenerateNextConnectionId(_))
.WillOnce(Return(TestConnectionId(456)));
}
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_))
.WillOnce(Invoke(cid_recorder));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
peer_creator_.SetServerConnectionId(cid1);
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *packet2);
cid2 = recorded_cid;
EXPECT_THAT(connection_.GetActiveServerConnectionIds(),
ElementsAre(cid0, cid1, cid2));
ASSERT_TRUE(retire_self_issued_cid_alarm->IsSet());
EXPECT_EQ(connection_.connection_id(), cid1);
EXPECT_TRUE(connection_.GetOneActiveServerConnectionId() == cid0 ||
connection_.GetOneActiveServerConnectionId() == cid1 ||
connection_.GetOneActiveServerConnectionId() == cid2);
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *packet1);
EXPECT_EQ(connection_.connection_id(), cid0);
EXPECT_TRUE(connection_.GetOneActiveServerConnectionId() == cid0 ||
connection_.GetOneActiveServerConnectionId() == cid1 ||
connection_.GetOneActiveServerConnectionId() == cid2);
EXPECT_CALL(visitor_, OnServerConnectionIdRetired(cid0));
retire_self_issued_cid_alarm->Fire();
EXPECT_THAT(connection_.GetActiveServerConnectionIds(),
ElementsAre(cid1, cid2));
EXPECT_TRUE(connection_.GetOneActiveServerConnectionId() == cid1 ||
connection_.GetOneActiveServerConnectionId() == cid2);
connection_.ProcessUdpPacket(kSelfAddress, kPeerAddress, *packet3);
EXPECT_EQ(connection_.connection_id(), cid1);
EXPECT_TRUE(connection_.GetOneActiveServerConnectionId() == cid1 ||
connection_.GetOneActiveServerConnectionId() == cid2);
}
TEST_P(QuicConnectionTest, PatchMissingClientConnectionIdOntoAlternativePath) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
connection_.set_client_connection_id(TestConnectionId(1));
const auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
auto* alternative_path = QuicConnectionPeer::GetAlternativePath(&connection_);
QuicIpAddress new_host;
new_host.FromString("12.12.12.12");
alternative_path->self_address = default_path->self_address;
alternative_path->peer_address = QuicSocketAddress(new_host, 12345);
alternative_path->server_connection_id = TestConnectionId(3);
ASSERT_TRUE(alternative_path->client_connection_id.IsEmpty());
ASSERT_FALSE(alternative_path->stateless_reset_token.has_value());
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = TestConnectionId(5);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
connection_.OnNewConnectionIdFrame(frame);
ASSERT_EQ(alternative_path->client_connection_id, frame.connection_id);
ASSERT_EQ(alternative_path->stateless_reset_token,
frame.stateless_reset_token);
}
TEST_P(QuicConnectionTest, PatchMissingClientConnectionIdOntoDefaultPath) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
connection_.CreateConnectionIdManager();
connection_.set_client_connection_id(TestConnectionId(1));
auto* default_path = QuicConnectionPeer::GetDefaultPath(&connection_);
auto* alternative_path = QuicConnectionPeer::GetAlternativePath(&connection_);
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
*alternative_path = std::move(*default_path);
QuicIpAddress new_host;
new_host.FromString("12.12.12.12");
default_path->self_address = default_path->self_address;
default_path->peer_address = QuicSocketAddress(new_host, 12345);
default_path->server_connection_id = TestConnectionId(3);
packet_creator->SetDefaultPeerAddress(default_path->peer_address);
packet_creator->SetServerConnectionId(default_path->server_connection_id);
packet_creator->SetClientConnectionId(default_path->client_connection_id);
ASSERT_FALSE(default_path->validated);
ASSERT_TRUE(default_path->client_connection_id.IsEmpty());
ASSERT_FALSE(default_path->stateless_reset_token.has_value());
QuicNewConnectionIdFrame frame;
frame.sequence_number = 1u;
frame.connection_id = TestConnectionId(5);
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
connection_.OnNewConnectionIdFrame(frame);
ASSERT_EQ(default_path->client_connection_id, frame.connection_id);
ASSERT_EQ(default_path->stateless_reset_token, frame.stateless_reset_token);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(), frame.connection_id);
}
TEST_P(QuicConnectionTest, ShouldGeneratePacketBlockedByMissingConnectionId) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
connection_.set_client_connection_id(TestConnectionId(1));
connection_.CreateConnectionIdManager();
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
ASSERT_TRUE(
connection_.ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA, NOT_HANDSHAKE));
QuicPacketCreator* packet_creator =
QuicConnectionPeer::GetPacketCreator(&connection_);
QuicIpAddress peer_host1;
peer_host1.FromString("12.12.12.12");
QuicSocketAddress peer_address1(peer_host1, 1235);
{
QuicPacketCreator::ScopedPeerAddressContext context(
packet_creator, peer_address1, EmptyQuicConnectionId(),
EmptyQuicConnectionId());
ASSERT_FALSE(connection_.ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA,
NOT_HANDSHAKE));
}
ASSERT_TRUE(
connection_.ShouldGeneratePacket(NO_RETRANSMITTABLE_DATA, NOT_HANDSHAKE));
}
TEST_P(QuicConnectionTest, LostDataThenGetAcknowledged) {
set_perspective(Perspective::IS_SERVER);
if (!version().SupportsAntiAmplificationLimit() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
QuicPacketCreatorPeer::SetSendVersionInPacket(creator_, false);
if (version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(&connection_);
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
QuicPacketNumber last_packet;
SendStreamDataToPeer(3, "foo", 0, NO_FIN, &last_packet);
SendStreamDataToPeer(3, "foo", 3, NO_FIN, &last_packet);
SendStreamDataToPeer(3, "foo", 6, NO_FIN, &last_packet);
SendStreamDataToPeer(3, "foo", 9, NO_FIN, &last_packet);
ProcessFramePacket(QuicFrame(QuicPingFrame()));
QuicFrames frames;
frames.push_back(QuicFrame(frame1_));
QuicAckFrame ack = InitAckFrame({{QuicPacketNumber(1), QuicPacketNumber(5)}});
frames.push_back(QuicFrame(&ack));
QuicIpAddress ip_address;
ASSERT_TRUE(ip_address.FromString("127.0.52.223"));
EXPECT_QUIC_BUG(
{
EXPECT_CALL(visitor_, OnConnectionMigration(_)).Times(1);
EXPECT_CALL(visitor_, OnStreamFrame(_))
.WillOnce(InvokeWithoutArgs(¬ifier_,
&SimpleSessionNotifier::OnCanWrite));
ProcessFramesPacketWithAddresses(frames, kSelfAddress,
QuicSocketAddress(ip_address, 1000),
ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_TRUE(writer_->stream_frames().empty());
},
"Try to write mid packet processing");
}
TEST_P(QuicConnectionTest, PtoSendStreamData) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
if (QuicVersionUsesCryptoFrames(connection_.transport_version())) {
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
}
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(false));
connection_.SendStreamDataWithString(2, std::string(1500, 'a'), 0, NO_FIN);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0x01010101u, writer_->final_bytes_of_last_packet());
}
TEST_P(QuicConnectionTest, SendingZeroRttPacketsDoesNotPostponePTO) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
connection_.SetEncrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
QuicAckFrame frame1 = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramePacketAtLevel(1, QuicFrame(&frame1), ENCRYPTION_INITIAL);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
QuicTime pto_deadline = connection_.GetRetransmissionAlarm()->deadline();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
connection_.SetEncrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_EQ(pto_deadline, connection_.GetRetransmissionAlarm()->deadline());
}
TEST_P(QuicConnectionTest, QueueingUndecryptablePacketsDoesntPostponePTO) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.set_max_undecryptable_packets(3);
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
connection_.SendCryptoStreamData();
connection_.SetEncrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
QuicAckFrame frame1 = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramePacketAtLevel(1, QuicFrame(&frame1), ENCRYPTION_INITIAL);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
QuicTime pto_deadline = connection_.GetRetransmissionAlarm()->deadline();
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
peer_framer_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0xFF));
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_GT(pto_deadline, connection_.GetRetransmissionAlarm()->deadline());
pto_deadline = connection_.GetRetransmissionAlarm()->deadline();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
clock_.AdvanceTime(pto_deadline - clock_.ApproximateNow());
connection_.GetRetransmissionAlarm()->Fire();
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
pto_deadline = connection_.GetRetransmissionAlarm()->deadline();
ProcessDataPacketAtLevel(4, !kHasStopWaiting, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(pto_deadline, connection_.GetRetransmissionAlarm()->deadline());
}
TEST_P(QuicConnectionTest, QueueUndecryptableHandshakePackets) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.set_max_undecryptable_packets(3);
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.RemoveDecrypter(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoStreamData();
connection_.SetEncrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
EXPECT_EQ(0u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
peer_framer_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0xFF));
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(1u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
}
TEST_P(QuicConnectionTest, PingNotSentAt0RTTLevelWhenInitialAvailable) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(2, "foo", 0, NO_FIN);
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
QuicAckFrame frame1 = InitAckFrame(1);
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
ProcessFramePacketAtLevel(1, QuicFrame(&frame1), ENCRYPTION_INITIAL);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
QuicTime pto_deadline = connection_.GetRetransmissionAlarm()->deadline();
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(1);
clock_.AdvanceTime(pto_deadline - clock_.ApproximateNow());
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_NE(0x02020202u, writer_->final_bytes_of_last_packet());
}
TEST_P(QuicConnectionTest, AckElicitingFrames) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.SetReliableStreamReset(true);
connection_.SetFromConfig(config);
EXPECT_CALL(connection_id_generator_,
GenerateNextConnectionId(TestConnectionId(12)))
.WillOnce(Return(TestConnectionId(456)));
EXPECT_CALL(connection_id_generator_,
GenerateNextConnectionId(TestConnectionId(456)))
.WillOnce(Return(TestConnectionId(789)));
EXPECT_CALL(visitor_, SendNewConnectionId(_)).Times(2);
EXPECT_CALL(visitor_, OnRstStream(_));
EXPECT_CALL(visitor_, OnResetStreamAt(_));
EXPECT_CALL(visitor_, OnWindowUpdateFrame(_));
EXPECT_CALL(visitor_, OnBlockedFrame(_));
EXPECT_CALL(visitor_, OnHandshakeDoneReceived());
EXPECT_CALL(visitor_, OnStreamFrame(_));
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
EXPECT_CALL(visitor_, OnMaxStreamsFrame(_));
EXPECT_CALL(visitor_, OnStreamsBlockedFrame(_));
EXPECT_CALL(visitor_, OnStopSendingFrame(_));
EXPECT_CALL(visitor_, OnMessageReceived(""));
EXPECT_CALL(visitor_, OnNewTokenReceived(""));
SetClientConnectionId(TestConnectionId(12));
connection_.CreateConnectionIdManager();
QuicConnectionPeer::GetSelfIssuedConnectionIdManager(&connection_)
->MaybeSendNewConnectionIds();
connection_.set_can_receive_ack_frequency_frame();
QuicAckFrame ack_frame = InitAckFrame(1);
QuicRstStreamFrame rst_stream_frame;
QuicWindowUpdateFrame window_update_frame;
QuicPathChallengeFrame path_challenge_frame;
QuicNewConnectionIdFrame new_connection_id_frame;
new_connection_id_frame.sequence_number = 1u;
QuicRetireConnectionIdFrame retire_connection_id_frame;
retire_connection_id_frame.sequence_number = 1u;
QuicStopSendingFrame stop_sending_frame;
QuicPathResponseFrame path_response_frame;
QuicMessageFrame message_frame;
QuicNewTokenFrame new_token_frame;
QuicAckFrequencyFrame ack_frequency_frame;
QuicResetStreamAtFrame reset_stream_at_frame;
QuicBlockedFrame blocked_frame;
size_t packet_number = 1;
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicFramer* framer = const_cast<QuicFramer*>(&connection_.framer());
framer->set_process_reset_stream_at(true);
peer_framer_.set_process_reset_stream_at(true);
for (uint8_t i = 0; i < NUM_FRAME_TYPES; ++i) {
QuicFrameType frame_type = static_cast<QuicFrameType>(i);
bool skipped = false;
QuicFrame frame;
QuicFrames frames;
frames.push_back(QuicFrame(QuicPaddingFrame(10)));
switch (frame_type) {
case PADDING_FRAME:
frame = QuicFrame(QuicPaddingFrame(10));
break;
case MTU_DISCOVERY_FRAME:
frame = QuicFrame(QuicMtuDiscoveryFrame());
break;
case PING_FRAME:
frame = QuicFrame(QuicPingFrame());
break;
case MAX_STREAMS_FRAME:
frame = QuicFrame(QuicMaxStreamsFrame());
break;
case STOP_WAITING_FRAME:
skipped = true;
break;
case STREAMS_BLOCKED_FRAME:
frame = QuicFrame(QuicStreamsBlockedFrame());
break;
case STREAM_FRAME:
frame = QuicFrame(QuicStreamFrame());
break;
case HANDSHAKE_DONE_FRAME:
frame = QuicFrame(QuicHandshakeDoneFrame());
break;
case ACK_FRAME:
frame = QuicFrame(&ack_frame);
break;
case RST_STREAM_FRAME:
frame = QuicFrame(&rst_stream_frame);
break;
case CONNECTION_CLOSE_FRAME:
skipped = true;
break;
case GOAWAY_FRAME:
skipped = true;
break;
case BLOCKED_FRAME:
frame = QuicFrame(blocked_frame);
break;
case WINDOW_UPDATE_FRAME:
frame = QuicFrame(window_update_frame);
break;
case PATH_CHALLENGE_FRAME:
frame = QuicFrame(path_challenge_frame);
break;
case STOP_SENDING_FRAME:
frame = QuicFrame(stop_sending_frame);
break;
case NEW_CONNECTION_ID_FRAME:
frame = QuicFrame(&new_connection_id_frame);
break;
case RETIRE_CONNECTION_ID_FRAME:
frame = QuicFrame(&retire_connection_id_frame);
break;
case PATH_RESPONSE_FRAME:
frame = QuicFrame(path_response_frame);
break;
case MESSAGE_FRAME:
frame = QuicFrame(&message_frame);
break;
case CRYPTO_FRAME:
skipped = true;
break;
case NEW_TOKEN_FRAME:
frame = QuicFrame(&new_token_frame);
break;
case ACK_FREQUENCY_FRAME:
frame = QuicFrame(&ack_frequency_frame);
break;
case RESET_STREAM_AT_FRAME:
frame = QuicFrame(&reset_stream_at_frame);
break;
case NUM_FRAME_TYPES:
skipped = true;
break;
}
if (skipped) {
continue;
}
ASSERT_EQ(frame_type, frame.type);
frames.push_back(frame);
EXPECT_FALSE(connection_.HasPendingAcks());
ProcessFramesPacketAtLevel(packet_number++, frames,
ENCRYPTION_FORWARD_SECURE);
if (QuicUtils::IsAckElicitingFrame(frame_type)) {
ASSERT_TRUE(connection_.HasPendingAcks()) << frame;
clock_.AdvanceTime(DefaultDelayedAckTime());
connection_.GetAckAlarm()->Fire();
}
EXPECT_FALSE(connection_.HasPendingAcks());
ASSERT_TRUE(connection_.connected());
}
}
TEST_P(QuicConnectionTest, ReceivedChloAndAck) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicFrames frames;
QuicAckFrame ack_frame = InitAckFrame(1);
frames.push_back(MakeCryptoFrame());
frames.push_back(QuicFrame(&ack_frame));
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.WillOnce(IgnoreResult(InvokeWithoutArgs(
&connection_, &TestConnection::SendCryptoStreamData)));
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_INITIAL);
}
TEST_P(QuicConnectionTest, FailedToRetransmitShlo) {
if (!version().SupportsAntiAmplificationLimit() ||
GetQuicFlag(quic_enforce_strict_amplification_factor)) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_HANDSHAKE);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(0, std::string(100 * 1024, 'a'), 0,
NO_FIN);
}
ProcessCryptoPacketAtLevel(2, ENCRYPTION_INITIAL);
ASSERT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(clock_.Now() + kAlarmGranularity,
connection_.GetAckAlarm()->deadline());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(3);
clock_.AdvanceTime(kAlarmGranularity);
connection_.GetAckAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
EXPECT_EQ(1u, writer_->ack_frames().size());
EXPECT_EQ(1u, writer_->crypto_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
auto packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(0u, writer_->ack_frames().size());
EXPECT_EQ(1u, writer_->crypto_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(0u, writer_->crypto_frames().size());
EXPECT_EQ(1u, writer_->stream_frames().size());
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(3, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
}
TEST_P(QuicConnectionTest, FailedToConsumeCryptoData) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
EXPECT_TRUE(connection_.HasPendingAcks());
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
ProcessDataPacketAtLevel(1, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString(std::string(200, 'a'), 0,
ENCRYPTION_HANDSHAKE);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(0, std::string(40, 'a'), 0, NO_FIN);
}
peer_framer_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x03));
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.NeuterUnencryptedPackets();
ProcessCryptoPacketAtLevel(1, ENCRYPTION_HANDSHAKE);
clock_.AdvanceTime(kAlarmGranularity);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(0, std::string(1395, 'a'), 40, NO_FIN);
}
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
const QuicTime retransmission_time =
connection_.GetRetransmissionAlarm()->deadline();
clock_.AdvanceTime(retransmission_time - clock_.Now());
connection_.GetRetransmissionAlarm()->Fire();
EXPECT_EQ(0x03030303u, writer_->final_bytes_of_last_packet());
EXPECT_EQ(1u, writer_->crypto_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
auto packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(1u, writer_->stream_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() == nullptr);
ASSERT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest,
RTTSampleDoesNotIncludeQueuingDelayWithPostponedAckProcessing) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.set_max_undecryptable_packets(3);
connection_.SetFromConfig(config);
const QuicTime::Delta kTestRTT = QuicTime::Delta::FromMilliseconds(30);
RttStats* rtt_stats = const_cast<RttStats*>(manager_->GetRttStats());
rtt_stats->UpdateRtt(kTestRTT, QuicTime::Delta::Zero(), QuicTime::Zero());
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
connection_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_ZERO_RTT);
connection_.SendStreamDataWithString(0, std::string(10, 'a'), 0, FIN);
clock_.AdvanceTime(kTestRTT + QuicTime::Delta::FromMilliseconds(
GetDefaultDelayedAckTimeMs()));
EXPECT_EQ(0u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
QuicAckFrame ack_frame = InitAckFrame(1);
ack_frame.ack_delay_time =
QuicTime::Delta::FromMilliseconds(GetDefaultDelayedAckTimeMs());
QuicFrames frames;
frames.push_back(QuicFrame(&ack_frame));
QuicPacketHeader header =
ConstructPacketHeader(30, ENCRYPTION_FORWARD_SECURE);
std::unique_ptr<QuicPacket> packet(ConstructPacket(header, frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(30), *packet, buffer,
kMaxOutgoingPacketSize);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
ASSERT_EQ(1u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
EXPECT_FALSE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
ASSERT_TRUE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _));
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
EXPECT_EQ(rtt_stats->latest_rtt(), kTestRTT);
}
TEST_P(QuicConnectionTest, NoExtraPaddingInReserializedInitial) {
if (!IsDefaultTestConfiguration() ||
!connection_.version().CanSendCoalescedPackets()) {
return;
}
set_perspective(Perspective::IS_SERVER);
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
uint64_t debug_visitor_sent_count = 0;
EXPECT_CALL(debug_visitor, OnPacketSent(_, _, _, _, _, _, _, _, _))
.WillRepeatedly([&]() { debug_visitor_sent_count++; });
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
ProcessDataPacketAtLevel(2, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString(std::string(200, 'a'), 0,
ENCRYPTION_HANDSHAKE);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(0, std::string(400, 'b'), 0, NO_FIN);
}
const std::string data4(1000, '4');
const std::string data8(3000, '8');
EXPECT_CALL(visitor_, OnCanWrite()).WillOnce([&]() {
connection_.producer()->SaveStreamData(4, data4);
connection_.producer()->SaveStreamData(8, data8);
notifier_.WriteOrBufferData(4, data4.size(), FIN_AND_PADDING);
notifier_.WriteOrBufferData(8, data8.size(), FIN);
});
QuicByteCount pending_padding_after_serialize_2nd_1rtt_packet = 0;
QuicPacketCount num_1rtt_packets_serialized = 0;
EXPECT_CALL(connection_, OnSerializedPacket(_))
.WillRepeatedly([&](SerializedPacket packet) {
if (packet.encryption_level == ENCRYPTION_FORWARD_SECURE) {
num_1rtt_packets_serialized++;
if (num_1rtt_packets_serialized == 2) {
pending_padding_after_serialize_2nd_1rtt_packet =
connection_.packet_creator().pending_padding_bytes();
}
}
connection_.QuicConnection::OnSerializedPacket(std::move(packet));
});
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_INITIAL);
EXPECT_EQ(
debug_visitor_sent_count,
connection_.sent_packet_manager().GetLargestSentPacket().ToUint64());
EXPECT_GT(pending_padding_after_serialize_2nd_1rtt_packet, 0u);
EXPECT_TRUE(connection_.connected());
}
TEST_P(QuicConnectionTest, ReportedAckDelayIncludesQueuingDelay) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.set_max_undecryptable_packets(3);
connection_.SetFromConfig(config);
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
peer_framer_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
QuicFrames frames;
frames.push_back(QuicFrame(QuicPingFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(100)));
QuicPacketHeader header =
ConstructPacketHeader(30, ENCRYPTION_FORWARD_SECURE);
std::unique_ptr<QuicPacket> packet(ConstructPacket(header, frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(30), *packet, buffer,
kMaxOutgoingPacketSize);
EXPECT_EQ(0u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
const QuicTime packet_receipt_time = clock_.Now();
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(buffer, encrypted_length, clock_.Now(), false));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
ASSERT_EQ(1u, QuicConnectionPeer::NumUndecryptablePackets(&connection_));
const QuicTime::Delta kQueuingDelay = QuicTime::Delta::FromMilliseconds(10);
clock_.AdvanceTime(kQueuingDelay);
EXPECT_FALSE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
ASSERT_TRUE(connection_.GetProcessUndecryptablePacketsAlarm()->IsSet());
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
ASSERT_TRUE(connection_.HasPendingAcks());
EXPECT_EQ(packet_receipt_time + DefaultDelayedAckTime(),
connection_.GetAckAlarm()->deadline());
clock_.AdvanceTime(packet_receipt_time + DefaultDelayedAckTime() -
clock_.Now());
connection_.GetAckAlarm()->Fire();
ASSERT_EQ(1u, writer_->ack_frames().size());
EXPECT_EQ(DefaultDelayedAckTime(), writer_->ack_frames()[0].ack_delay_time);
}
TEST_P(QuicConnectionTest, CoalesceOneRTTPacketWithInitialAndHandshakePackets) {
if (!version().HasIetfQuicFrames()) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
peer_framer_.SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_ZERO_RTT,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_ZERO_RTT));
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
ProcessDataPacketAtLevel(2, !kHasStopWaiting, ENCRYPTION_ZERO_RTT);
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString(std::string(200, 'a'), 0,
ENCRYPTION_HANDSHAKE);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(0, std::string(2000, 'b'), 0, FIN);
}
EXPECT_EQ(2u, writer_->packets_write_attempts());
ProcessDataPacketAtLevel(3, !kHasStopWaiting, ENCRYPTION_INITIAL);
EXPECT_EQ(3u, writer_->packets_write_attempts());
EXPECT_EQ(1u, writer_->ack_frames().size());
EXPECT_EQ(1u, writer_->crypto_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
auto packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(1u, writer_->crypto_frames().size());
ASSERT_TRUE(writer_->coalesced_packet() != nullptr);
packet = writer_->coalesced_packet()->Clone();
writer_->framer()->ProcessPacket(*packet);
EXPECT_EQ(1u, writer_->stream_frames().size());
}
TEST_P(QuicConnectionTest, SendMultipleConnectionCloses) {
if (!version().HasIetfQuicFrames() ||
!GetQuicReloadableFlag(quic_default_enable_5rto_blackhole_detection2)) {
return;
}
set_perspective(Perspective::IS_SERVER);
QuicConnectionPeer::SetAddressValidated(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
notifier_.NeuterUnencryptedData();
connection_.NeuterUnencryptedPackets();
connection_.OnHandshakeComplete();
connection_.RemoveEncrypter(ENCRYPTION_INITIAL);
connection_.RemoveEncrypter(ENCRYPTION_HANDSHAKE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
SendStreamDataToPeer(1, "foo", 0, NO_FIN, nullptr);
ASSERT_TRUE(connection_.BlackholeDetectionInProgress());
EXPECT_CALL(visitor_, BeforeConnectionCloseSent()).Times(2);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
QuicConnectionPeer::SendConnectionClosePacket(
&connection_, INTERNAL_ERROR, QUIC_INTERNAL_ERROR, "internal error");
connection_.GetBlackholeDetectorAlarm()->Fire();
}
TEST_P(QuicConnectionTest, EarliestSentTimeNotInitializedWhenPtoFires) {
if (!connection_.SupportsMultiplePacketNumberSpaces()) {
return;
}
set_perspective(Perspective::IS_SERVER);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(AnyNumber());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(AnyNumber());
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
SetDecrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_HANDSHAKE));
connection_.SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
{
QuicConnection::ScopedPacketFlusher flusher(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoDataWithString("foo", 0, ENCRYPTION_INITIAL);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
connection_.SendCryptoDataWithString(std::string(200, 'a'), 0,
ENCRYPTION_HANDSHAKE);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_.SendStreamDataWithString(0, std::string(2000, 'b'), 0, FIN);
}
EXPECT_CALL(*send_algorithm_, OnCongestionEvent(_, _, _, _, _, _, _))
.Times(AnyNumber());
QuicFrames frames1;
QuicAckFrame ack_frame1 = InitAckFrame(1);
frames1.push_back(QuicFrame(&ack_frame1));
QuicFrames frames2;
QuicAckFrame ack_frame2 =
InitAckFrame({{QuicPacketNumber(2), QuicPacketNumber(3)}});
frames2.push_back(QuicFrame(&ack_frame2));
ProcessCoalescedPacket(
{{2, frames1, ENCRYPTION_INITIAL}, {3, frames2, ENCRYPTION_HANDSHAKE}});
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
}
TEST_P(QuicConnectionTest, CalculateNetworkBlackholeDelay) {
if (!IsDefaultTestConfiguration()) {
return;
}
const QuicTime::Delta kOneSec = QuicTime::Delta::FromSeconds(1);
const QuicTime::Delta kTwoSec = QuicTime::Delta::FromSeconds(2);
const QuicTime::Delta kFourSec = QuicTime::Delta::FromSeconds(4);
EXPECT_EQ(QuicConnection::CalculateNetworkBlackholeDelay(kFourSec, kOneSec,
kOneSec),
kFourSec);
EXPECT_EQ(QuicConnection::CalculateNetworkBlackholeDelay(kFourSec, kOneSec,
kTwoSec),
QuicTime::Delta::FromSeconds(5));
}
TEST_P(QuicConnectionTest, FixBytesAccountingForBufferedCoalescedPackets) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(AnyNumber());
writer_->SetWriteBlocked();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
QuicConnectionPeer::SendPing(&connection_);
const QuicConnectionStats& stats = connection_.GetStats();
EXPECT_EQ(stats.bytes_sent, connection_.max_packet_length());
}
TEST_P(QuicConnectionTest, StrictAntiAmplificationLimit) {
if (!connection_.version().SupportsAntiAmplificationLimit()) {
return;
}
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(AnyNumber());
set_perspective(Perspective::IS_SERVER);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
connection_.SendCryptoDataWithString("foo", 0);
EXPECT_FALSE(connection_.CanWrite(HAS_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.CanWrite(NO_RETRANSMITTABLE_DATA));
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
const size_t anti_amplification_factor =
GetQuicFlag(quic_anti_amplification_factor);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(anti_amplification_factor);
ForceWillingAndAbleToWriteOnceForDeferSending();
ProcessCryptoPacketAtLevel(1, ENCRYPTION_INITIAL);
connection_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
connection_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x03));
for (size_t i = 1; i < anti_amplification_factor - 1; ++i) {
connection_.SendCryptoDataWithString("foo", i * 3);
}
connection_.SetMaxPacketLength(connection_.max_packet_length() - 1);
connection_.SendCryptoDataWithString("bar",
(anti_amplification_factor - 1) * 3);
EXPECT_LT(writer_->total_bytes_written(),
anti_amplification_factor *
QuicConnectionPeer::BytesReceivedOnDefaultPath(&connection_));
if (GetQuicFlag(quic_enforce_strict_amplification_factor)) {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(3);
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
} else {
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(4);
EXPECT_TRUE(connection_.GetRetransmissionAlarm()->IsSet());
}
connection_.SetMaxPacketLength(connection_.max_packet_length() + 1);
connection_.SendCryptoDataWithString("bar", anti_amplification_factor * 3);
EXPECT_FALSE(connection_.GetRetransmissionAlarm()->IsSet());
EXPECT_CALL(visitor_, BeforeConnectionCloseSent());
EXPECT_CALL(visitor_, OnConnectionClosed(_, _));
connection_.CloseConnection(
QUIC_INTERNAL_ERROR, "error",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
EXPECT_EQ(0u, connection_.NumQueuedPackets());
if (GetQuicFlag(quic_enforce_strict_amplification_factor)) {
EXPECT_LT(writer_->total_bytes_written(),
anti_amplification_factor *
QuicConnectionPeer::BytesReceivedOnDefaultPath(&connection_));
} else {
EXPECT_LT(writer_->total_bytes_written(),
(anti_amplification_factor + 2) *
QuicConnectionPeer::BytesReceivedOnDefaultPath(&connection_));
EXPECT_GT(writer_->total_bytes_written(),
(anti_amplification_factor + 1) *
QuicConnectionPeer::BytesReceivedOnDefaultPath(&connection_));
}
}
TEST_P(QuicConnectionTest, OriginalConnectionId) {
set_perspective(Perspective::IS_SERVER);
EXPECT_FALSE(connection_.GetDiscardZeroRttDecryptionKeysAlarm()->IsSet());
EXPECT_EQ(connection_.GetOriginalDestinationConnectionId(),
connection_.connection_id());
QuicConnectionId original({0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08});
connection_.SetOriginalDestinationConnectionId(original);
EXPECT_EQ(original, connection_.GetOriginalDestinationConnectionId());
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessDataPacketAtLevel(1, false, ENCRYPTION_FORWARD_SECURE);
if (connection_.version().UsesTls()) {
EXPECT_TRUE(connection_.GetDiscardZeroRttDecryptionKeysAlarm()->IsSet());
EXPECT_CALL(visitor_, OnServerConnectionIdRetired(original));
connection_.GetDiscardZeroRttDecryptionKeysAlarm()->Fire();
EXPECT_EQ(connection_.GetOriginalDestinationConnectionId(),
connection_.connection_id());
} else {
EXPECT_EQ(connection_.GetOriginalDestinationConnectionId(), original);
}
}
ACTION_P2(InstallKeys, conn, level) {
uint8_t crypto_input = (level == ENCRYPTION_FORWARD_SECURE) ? 0x03 : 0x02;
conn->SetEncrypter(level, std::make_unique<TaggingEncrypter>(crypto_input));
conn->InstallDecrypter(
level, std::make_unique<StrictTaggingDecrypter>(crypto_input));
conn->SetDefaultEncryptionLevel(level);
}
TEST_P(QuicConnectionTest, ServerConnectionIdChangeWithLateInitial) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(1);
QuicConfig config;
connection_.SetFromConfig(config);
connection_.RemoveEncrypter(ENCRYPTION_FORWARD_SECURE);
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
EXPECT_EQ(1u, writer_->packets_write_attempts());
QuicConnectionId old_id = connection_id_;
connection_id_ = TestConnectionId(2);
peer_creator_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
ProcessCryptoPacketAtLevel(0, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(QuicConnectionPeer::NumUndecryptablePackets(&connection_), 1u);
EXPECT_EQ(connection_.connection_id(), old_id);
peer_creator_.SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(0x03));
ProcessDataPacket(0);
EXPECT_EQ(QuicConnectionPeer::NumUndecryptablePackets(&connection_), 2u);
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.Times(2)
.WillOnce(InstallKeys(&connection_, ENCRYPTION_HANDSHAKE))
.WillOnce(InstallKeys(&connection_, ENCRYPTION_FORWARD_SECURE));
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
ProcessCryptoPacketAtLevel(0, ENCRYPTION_INITIAL);
EXPECT_EQ(QuicConnectionPeer::NumUndecryptablePackets(&connection_), 0u);
EXPECT_EQ(connection_.connection_id(), connection_id_);
}
TEST_P(QuicConnectionTest, ServerConnectionIdChangeTwiceWithLateInitial) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _)).Times(1);
QuicConfig config;
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_INITIAL);
connection_.SendCryptoStreamData();
EXPECT_EQ(1u, writer_->packets_write_attempts());
QuicConnectionId old_id = connection_id_;
connection_id_ = TestConnectionId(2);
peer_creator_.SetEncrypter(ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(0x02));
ProcessCryptoPacketAtLevel(0, ENCRYPTION_HANDSHAKE);
EXPECT_EQ(QuicConnectionPeer::NumUndecryptablePackets(&connection_), 1u);
EXPECT_EQ(connection_.connection_id(), old_id);
EXPECT_CALL(visitor_, OnCryptoFrame(_))
.WillOnce(InstallKeys(&connection_, ENCRYPTION_HANDSHAKE));
connection_id_ = TestConnectionId(1);
ProcessCryptoPacketAtLevel(0, ENCRYPTION_INITIAL);
EXPECT_EQ(QuicConnectionPeer::NumUndecryptablePackets(&connection_), 0u);
EXPECT_EQ(connection_.connection_id(), connection_id_);
}
TEST_P(QuicConnectionTest, ClientValidatedServerPreferredAddress) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
QuicConfig config;
ServerPreferredAddressInit(config);
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
const StatelessResetToken kNewStatelessResetToken =
QuicUtils::GenerateStatelessResetToken(TestConnectionId(17));
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, kServerPreferredAddress));
EXPECT_EQ(TestConnectionId(17),
new_writer.last_packet_header().destination_connection_id);
EXPECT_EQ(kServerPreferredAddress, new_writer.last_write_peer_address());
ASSERT_FALSE(new_writer.path_challenge_frames().empty());
QuicPathFrameBuffer payload =
new_writer.path_challenge_frames().front().data_buffer;
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
ASSERT_FALSE(writer_->stream_frames().empty());
EXPECT_EQ(TestConnectionId(),
writer_->last_packet_header().destination_connection_id);
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
EXPECT_TRUE(connection_.IsValidStatelessResetToken(kTestStatelessResetToken));
EXPECT_FALSE(connection_.IsValidStatelessResetToken(kNewStatelessResetToken));
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _)).Times(0);
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress,
kServerPreferredAddress,
ENCRYPTION_FORWARD_SECURE);
ASSERT_FALSE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsDefaultPath(&connection_, kNewSelfAddress,
kServerPreferredAddress));
ASSERT_FALSE(new_writer.stream_frames().empty());
EXPECT_EQ(TestConnectionId(17),
new_writer.last_packet_header().destination_connection_id);
EXPECT_EQ(kServerPreferredAddress, new_writer.last_write_peer_address());
EXPECT_FALSE(
connection_.IsValidStatelessResetToken(kTestStatelessResetToken));
EXPECT_TRUE(connection_.IsValidStatelessResetToken(kNewStatelessResetToken));
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_TRUE(connection_.GetStats().server_preferred_address_validated);
EXPECT_FALSE(
connection_.GetStats().failed_to_validate_server_preferred_address);
}
TEST_P(QuicConnectionTest, ClientValidatedServerPreferredAddress2) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
QuicConfig config;
ServerPreferredAddressInit(config);
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.HasPendingPathValidation());
ASSERT_FALSE(new_writer.path_challenge_frames().empty());
QuicPathFrameBuffer payload =
new_writer.path_challenge_frames().front().data_buffer;
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
ASSERT_FALSE(writer_->stream_frames().empty());
EXPECT_EQ(TestConnectionId(),
writer_->last_packet_header().destination_connection_id);
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
ASSERT_FALSE(connection_.HasPendingPathValidation());
ASSERT_FALSE(new_writer.stream_frames().empty());
EXPECT_EQ(TestConnectionId(17),
new_writer.last_packet_header().destination_connection_id);
EXPECT_EQ(kServerPreferredAddress, new_writer.last_write_peer_address());
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_CALL(visitor_, OnStreamFrame(_)).Times(1);
frames.clear();
frames.push_back(QuicFrame(frame1_));
ProcessFramesPacketWithAddresses(frames, kSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(connection_.GetStats().server_preferred_address_validated);
EXPECT_FALSE(
connection_.GetStats().failed_to_validate_server_preferred_address);
}
TEST_P(QuicConnectionTest, ClientFailedToValidateServerPreferredAddress) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
QuicConfig config;
ServerPreferredAddressInit(config);
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.IsValidatingServerPreferredAddress());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, kServerPreferredAddress));
ASSERT_FALSE(new_writer.path_challenge_frames().empty());
QuicFrames frames;
frames.push_back(
QuicFrame(QuicPathResponseFrame(99, {0, 1, 2, 3, 4, 5, 6, 7})));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
ASSERT_TRUE(connection_.HasPendingPathValidation());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, kServerPreferredAddress));
for (size_t i = 0; i < QuicPathValidator::kMaxRetryTimes + 1; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
EXPECT_FALSE(connection_.HasPendingPathValidation());
EXPECT_FALSE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress, kServerPreferredAddress));
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
ASSERT_FALSE(writer_->stream_frames().empty());
EXPECT_EQ(TestConnectionId(),
writer_->last_packet_header().destination_connection_id);
EXPECT_EQ(kPeerAddress, writer_->last_write_peer_address());
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(1u));
retire_peer_issued_cid_alarm->Fire();
EXPECT_TRUE(connection_.IsValidStatelessResetToken(kTestStatelessResetToken));
EXPECT_FALSE(connection_.GetStats().server_preferred_address_validated);
EXPECT_TRUE(
connection_.GetStats().failed_to_validate_server_preferred_address);
}
TEST_P(QuicConnectionTest, OptimizedServerPreferredAddress) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kSPA2});
ServerPreferredAddressInit(config);
EXPECT_TRUE(connection_.HasPendingPathValidation());
ASSERT_FALSE(new_writer.path_challenge_frames().empty());
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_FALSE(writer_->stream_frames().empty());
EXPECT_FALSE(new_writer.stream_frames().empty());
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
SendPing();
EXPECT_FALSE(writer_->ping_frames().empty());
EXPECT_TRUE(new_writer.ping_frames().empty());
}
TEST_P(QuicConnectionTest, OptimizedServerPreferredAddress2) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kSPA2});
ServerPreferredAddressInit(config);
EXPECT_TRUE(connection_.HasPendingPathValidation());
ASSERT_FALSE(new_writer.path_challenge_frames().empty());
connection_.SendStreamDataWithString(3, "foo", 0, NO_FIN);
EXPECT_FALSE(writer_->stream_frames().empty());
EXPECT_FALSE(new_writer.stream_frames().empty());
for (size_t i = 0; i < QuicPathValidator::kMaxRetryTimes + 1; ++i) {
clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3 * kInitialRttMs));
static_cast<TestAlarmFactory::TestAlarm*>(
QuicPathValidatorPeer::retry_timer(
QuicConnectionPeer::path_validator(&connection_)))
->Fire();
}
EXPECT_FALSE(connection_.HasPendingPathValidation());
SendPing();
EXPECT_FALSE(writer_->ping_frames().empty());
EXPECT_TRUE(new_writer.ping_frames().empty());
}
TEST_P(QuicConnectionTest, MaxDuplicatedPacketsSentToServerPreferredAddress) {
if (!connection_.version().HasIetfQuicFrames()) {
return;
}
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kSPA2});
ServerPreferredAddressInit(config);
EXPECT_TRUE(connection_.HasPendingPathValidation());
ASSERT_FALSE(new_writer.path_challenge_frames().empty());
size_t write_limit = writer_->packets_write_attempts();
size_t new_write_limit = new_writer.packets_write_attempts();
for (size_t i = 0; i < kMaxDuplicatedPacketsSentToServerPreferredAddress;
++i) {
connection_.SendStreamDataWithString(3, "foo", i * 3, NO_FIN);
ASSERT_EQ(write_limit + 1, writer_->packets_write_attempts());
ASSERT_EQ(new_write_limit + 1, new_writer.packets_write_attempts());
++write_limit;
++new_write_limit;
EXPECT_FALSE(writer_->stream_frames().empty());
EXPECT_FALSE(new_writer.stream_frames().empty());
}
SendPing();
ASSERT_EQ(write_limit + 1, writer_->packets_write_attempts());
ASSERT_EQ(new_write_limit, new_writer.packets_write_attempts());
EXPECT_FALSE(writer_->ping_frames().empty());
EXPECT_TRUE(new_writer.ping_frames().empty());
}
TEST_P(QuicConnectionTest, MultiPortCreationAfterServerMigration) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
QuicConfig config;
config.SetClientConnectionOptions(QuicTagVector{kMPQC});
ServerPreferredAddressInit(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicConnectionId cid_for_preferred_address = TestConnectionId(17);
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), 23456);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer),
std::make_unique<ServerPreferredAddressTestResultDelegate>(
&connection_),
PathValidationReason::kReasonUnknown);
}));
QuicPathFrameBuffer payload;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(testing::AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
payload = new_writer.path_challenge_frames().front().data_buffer;
EXPECT_EQ(kServerPreferredAddress,
new_writer.last_write_peer_address());
}));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
EXPECT_TRUE(connection_.IsValidatingServerPreferredAddress());
QuicFrames frames;
frames.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_FALSE(connection_.IsValidatingServerPreferredAddress());
EXPECT_EQ(kServerPreferredAddress, connection_.effective_peer_address());
EXPECT_EQ(kNewSelfAddress, connection_.self_address());
EXPECT_EQ(connection_.connection_id(), cid_for_preferred_address);
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
const QuicSocketAddress kNewSelfAddress2(kNewSelfAddress.host(),
kNewSelfAddress.port() + 1);
EXPECT_NE(kNewSelfAddress2, kNewSelfAddress);
TestPacketWriter new_writer2(version(), &clock_, Perspective::IS_CLIENT);
QuicNewConnectionIdFrame frame;
frame.connection_id = TestConnectionId(789);
ASSERT_NE(frame.connection_id, connection_.connection_id());
frame.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(frame.connection_id);
frame.retire_prior_to = 0u;
frame.sequence_number = 2u;
EXPECT_CALL(visitor_, CreateContextForMultiPortPath)
.WillOnce(testing::WithArgs<0>([&](auto&& observer) {
observer->OnMultiPortPathContextAvailable(
std::move(std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress2, connection_.peer_address(), &new_writer2)));
}));
connection_.OnNewConnectionIdFrame(frame);
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_EQ(1u, new_writer.path_challenge_frames().size());
payload = new_writer.path_challenge_frames().front().data_buffer;
EXPECT_EQ(kServerPreferredAddress, new_writer.last_write_peer_address());
EXPECT_EQ(kNewSelfAddress2.host(), new_writer.last_write_source_address());
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress2, connection_.peer_address()));
auto* alt_path = QuicConnectionPeer::GetAlternativePath(&connection_);
EXPECT_FALSE(alt_path->validated);
QuicFrames frames2;
frames2.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
ProcessFramesPacketWithAddresses(frames2, kNewSelfAddress2, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(alt_path->validated);
}
TEST_P(QuicConnectionTest, ClientReceivePathChallengeAfterServerMigration) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
QuicConfig config;
ServerPreferredAddressInit(config);
QuicConnectionId cid_for_preferred_address = TestConnectionId(17);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.AddKnownServerAddress(kServerPreferredAddress);
}));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), kTestPort + 1);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
auto context = std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer);
connection_.OnServerPreferredAddressValidated(*context, false);
EXPECT_EQ(kServerPreferredAddress, connection_.effective_peer_address());
EXPECT_EQ(kServerPreferredAddress, connection_.peer_address());
EXPECT_EQ(kNewSelfAddress, connection_.self_address());
EXPECT_EQ(connection_.connection_id(), cid_for_preferred_address);
EXPECT_NE(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
send_algorithm_ = new StrictMock<MockSendAlgorithm>();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_)).Times(AnyNumber());
connection_.SetSendAlgorithm(send_algorithm_);
QuicConnectionPeer::RetirePeerIssuedConnectionIdsNoLongerOnPath(&connection_);
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
QuicFrames frames1;
frames1.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1))
.WillOnce(Invoke([&]() {
ASSERT_FALSE(new_writer.path_response_frames().empty());
EXPECT_EQ(
0, memcmp(&path_challenge_payload,
&(new_writer.path_response_frames().front().data_buffer),
sizeof(path_challenge_payload)));
EXPECT_EQ(kServerPreferredAddress,
new_writer.last_write_peer_address());
EXPECT_EQ(kNewSelfAddress.host(),
new_writer.last_write_source_address());
}));
ProcessFramesPacketWithAddresses(frames1, kNewSelfAddress, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
}
TEST_P(QuicConnectionTest, ClientProbesAfterServerMigration) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
QuicConfig config;
ServerPreferredAddressInit(config);
QuicConnectionId cid_for_preferred_address = TestConnectionId(17);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_,
OnServerPreferredAddressAvailable(kServerPreferredAddress))
.WillOnce(Invoke([&]() {
connection_.AddKnownServerAddress(kServerPreferredAddress);
}));
EXPECT_CALL(visitor_, GetHandshakeState())
.WillRepeatedly(Return(HANDSHAKE_CONFIRMED));
connection_.OnHandshakeComplete();
const QuicSocketAddress kNewSelfAddress =
QuicSocketAddress(QuicIpAddress::Loopback6(), kTestPort + 1);
TestPacketWriter new_writer(version(), &clock_, Perspective::IS_CLIENT);
auto context = std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress, kServerPreferredAddress, &new_writer);
connection_.OnServerPreferredAddressValidated(*context, false);
EXPECT_EQ(kServerPreferredAddress, connection_.effective_peer_address());
EXPECT_EQ(kServerPreferredAddress, connection_.peer_address());
EXPECT_EQ(kNewSelfAddress, connection_.self_address());
EXPECT_EQ(connection_.connection_id(), cid_for_preferred_address);
EXPECT_NE(connection_.sent_packet_manager().GetSendAlgorithm(),
send_algorithm_);
send_algorithm_ = new StrictMock<MockSendAlgorithm>();
EXPECT_CALL(*send_algorithm_, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm_, GetCongestionWindow())
.WillRepeatedly(Return(kDefaultTCPMSS));
EXPECT_CALL(*send_algorithm_, OnApplicationLimited(_)).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, BandwidthEstimate())
.Times(AnyNumber())
.WillRepeatedly(Return(QuicBandwidth::Zero()));
EXPECT_CALL(*send_algorithm_, InSlowStart()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, InRecovery()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm_, PopulateConnectionStats(_)).Times(AnyNumber());
connection_.SetSendAlgorithm(send_algorithm_);
EXPECT_CALL(visitor_, OnCryptoFrame(_));
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kNewSelfAddress,
kPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kServerPreferredAddress, connection_.effective_peer_address());
EXPECT_EQ(kServerPreferredAddress, connection_.peer_address());
auto* retire_peer_issued_cid_alarm =
connection_.GetRetirePeerIssuedConnectionIdAlarm();
ASSERT_TRUE(retire_peer_issued_cid_alarm->IsSet());
EXPECT_CALL(visitor_, SendRetireConnectionId(0u));
retire_peer_issued_cid_alarm->Fire();
QuicNewConnectionIdFrame new_cid_frame1;
new_cid_frame1.connection_id = TestConnectionId(456);
ASSERT_NE(new_cid_frame1.connection_id, connection_.connection_id());
new_cid_frame1.stateless_reset_token =
QuicUtils::GenerateStatelessResetToken(new_cid_frame1.connection_id);
new_cid_frame1.retire_prior_to = 0u;
new_cid_frame1.sequence_number = 2u;
connection_.OnNewConnectionIdFrame(new_cid_frame1);
const QuicSocketAddress kNewSelfAddress2 =
QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort + 2);
TestPacketWriter new_writer2(version(), &clock_, Perspective::IS_CLIENT);
bool success;
QuicPathFrameBuffer payload;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(testing::AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, new_writer2.path_challenge_frames().size());
payload = new_writer2.path_challenge_frames().front().data_buffer;
EXPECT_EQ(kServerPreferredAddress,
new_writer2.last_write_peer_address());
EXPECT_EQ(kNewSelfAddress2.host(),
new_writer2.last_write_source_address());
}));
connection_.ValidatePath(
std::make_unique<TestQuicPathValidationContext>(
kNewSelfAddress2, connection_.peer_address(), &new_writer2),
std::make_unique<TestValidationResultDelegate>(
&connection_, kNewSelfAddress2, connection_.peer_address(), &success),
PathValidationReason::kServerPreferredAddressMigration);
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(
&connection_, kNewSelfAddress2, kServerPreferredAddress));
QuicPathFrameBuffer path_challenge_payload{0, 1, 2, 3, 4, 5, 6, 7};
QuicFrames frames;
frames.push_back(
QuicFrame(QuicPathChallengeFrame(0, path_challenge_payload)));
frames.push_back(QuicFrame(QuicPathResponseFrame(99, payload)));
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1))
.WillOnce(Invoke([&]() {
EXPECT_FALSE(new_writer2.path_response_frames().empty());
EXPECT_EQ(
0, memcmp(&path_challenge_payload,
&(new_writer2.path_response_frames().front().data_buffer),
sizeof(path_challenge_payload)));
EXPECT_EQ(kServerPreferredAddress,
new_writer2.last_write_peer_address());
EXPECT_EQ(kNewSelfAddress2.host(),
new_writer2.last_write_source_address());
}));
ProcessFramesPacketWithAddresses(frames, kNewSelfAddress2, kPeerAddress,
ENCRYPTION_FORWARD_SECURE);
EXPECT_TRUE(success);
}
TEST_P(QuicConnectionTest, EcnMarksCorrectlyRecorded) {
set_perspective(Perspective::IS_SERVER);
QuicFrames frames;
frames.push_back(QuicFrame(QuicPingFrame()));
frames.push_back(QuicFrame(QuicPaddingFrame(7)));
QuicAckFrame ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
ProcessFramesPacketAtLevelWithEcn(1, frames, ENCRYPTION_FORWARD_SECURE,
ECN_ECT0);
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
if (connection_.version().HasIetfQuicFrames()) {
QuicConnectionPeer::SendPing(&connection_);
QuicConnectionPeer::SendPing(&connection_);
}
QuicConnectionStats stats = connection_.GetStats();
ASSERT_TRUE(ack_frame.ecn_counters.has_value());
EXPECT_EQ(ack_frame.ecn_counters->ect0, 1);
EXPECT_EQ(stats.num_ack_frames_sent_with_ecn,
connection_.version().HasIetfQuicFrames() ? 1 : 0);
EXPECT_EQ(stats.num_ecn_marks_received.ect0, 1);
EXPECT_EQ(stats.num_ecn_marks_received.ect1, 0);
EXPECT_EQ(stats.num_ecn_marks_received.ce, 0);
}
TEST_P(QuicConnectionTest, EcnMarksCoalescedPacket) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
QuicCryptoFrame crypto_frame1{ENCRYPTION_HANDSHAKE, 0, "foo"};
QuicFrames frames1;
frames1.push_back(QuicFrame(&crypto_frame1));
QuicFrames frames2;
QuicCryptoFrame crypto_frame2{ENCRYPTION_FORWARD_SECURE, 0, "bar"};
frames2.push_back(QuicFrame(&crypto_frame2));
std::vector<PacketInfo> packets = {{2, frames1, ENCRYPTION_HANDSHAKE},
{3, frames2, ENCRYPTION_FORWARD_SECURE}};
QuicAckFrame ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(HANDSHAKE_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
connection_.SetEncrypter(
ENCRYPTION_HANDSHAKE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_HANDSHAKE));
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(2);
ProcessCoalescedPacket(packets, ECN_ECT0);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
if (connection_.version().HasIetfQuicFrames()) {
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_HANDSHAKE);
QuicConnectionPeer::SendPing(&connection_);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicConnectionPeer::SendPing(&connection_);
}
QuicConnectionStats stats = connection_.GetStats();
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(HANDSHAKE_DATA)
: connection_.received_packet_manager().ack_frame();
ASSERT_TRUE(ack_frame.ecn_counters.has_value());
EXPECT_EQ(ack_frame.ecn_counters->ect0,
connection_.SupportsMultiplePacketNumberSpaces() ? 1 : 2);
if (connection_.SupportsMultiplePacketNumberSpaces()) {
ack_frame = connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(
APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_TRUE(ack_frame.ecn_counters.has_value());
EXPECT_EQ(ack_frame.ecn_counters->ect0, 1);
}
EXPECT_EQ(stats.num_ecn_marks_received.ect0, 2);
EXPECT_EQ(stats.num_ack_frames_sent_with_ecn,
connection_.version().HasIetfQuicFrames() ? 2 : 0);
EXPECT_EQ(stats.num_ecn_marks_received.ect1, 0);
EXPECT_EQ(stats.num_ecn_marks_received.ce, 0);
}
TEST_P(QuicConnectionTest, EcnMarksUndecryptableCoalescedPacket) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.set_max_undecryptable_packets(100);
connection_.SetFromConfig(config);
QuicCryptoFrame crypto_frame1{ENCRYPTION_HANDSHAKE, 0, "foo"};
QuicFrames frames1;
frames1.push_back(QuicFrame(&crypto_frame1));
QuicFrames frames2;
QuicCryptoFrame crypto_frame2{ENCRYPTION_FORWARD_SECURE, 0, "bar"};
frames2.push_back(QuicFrame(&crypto_frame2));
std::vector<PacketInfo> packets = {{2, frames1, ENCRYPTION_HANDSHAKE},
{3, frames2, ENCRYPTION_FORWARD_SECURE}};
char coalesced_buffer[kMaxOutgoingPacketSize];
size_t coalesced_size = 0;
for (const auto& packet : packets) {
QuicPacketHeader header =
ConstructPacketHeader(packet.packet_number, packet.level);
peer_creator_.set_encryption_level(packet.level);
peer_framer_.SetEncrypter(packet.level,
std::make_unique<TaggingEncrypter>(packet.level));
if (packet.level == ENCRYPTION_HANDSHAKE) {
connection_.SetEncrypter(
packet.level, std::make_unique<TaggingEncrypter>(packet.level));
connection_.SetDefaultEncryptionLevel(packet.level);
SetDecrypter(packet.level,
std::make_unique<StrictTaggingDecrypter>(packet.level));
}
std::unique_ptr<QuicPacket> constructed_packet(
ConstructPacket(header, packet.frames));
char buffer[kMaxOutgoingPacketSize];
size_t encrypted_length = peer_framer_.EncryptPayload(
packet.level, QuicPacketNumber(packet.packet_number),
*constructed_packet, buffer, kMaxOutgoingPacketSize);
QUICHE_DCHECK_LE(coalesced_size + encrypted_length, kMaxOutgoingPacketSize);
memcpy(coalesced_buffer + coalesced_size, buffer, encrypted_length);
coalesced_size += encrypted_length;
}
QuicAckFrame ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(HANDSHAKE_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
connection_.RemoveDecrypter(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
EXPECT_CALL(visitor_, OnHandshakePacketSent()).Times(1);
connection_.ProcessUdpPacket(
kSelfAddress, kPeerAddress,
QuicReceivedPacket(coalesced_buffer, coalesced_size, clock_.Now(), false,
0, true, nullptr, 0, true, ECN_ECT0));
if (connection_.GetSendAlarm()->IsSet()) {
connection_.GetSendAlarm()->Fire();
}
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(HANDSHAKE_DATA)
: connection_.received_packet_manager().ack_frame();
ASSERT_TRUE(ack_frame.ecn_counters.has_value());
EXPECT_EQ(ack_frame.ecn_counters->ect0, 1);
if (connection_.SupportsMultiplePacketNumberSpaces()) {
ack_frame = connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(
APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
}
ProcessFramePacketAtLevelWithEcn(4, QuicFrame(QuicPingFrame()),
ENCRYPTION_HANDSHAKE, ECN_CE);
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(HANDSHAKE_DATA)
: connection_.received_packet_manager().ack_frame();
ASSERT_TRUE(ack_frame.ecn_counters.has_value());
EXPECT_EQ(ack_frame.ecn_counters->ect0, 1);
EXPECT_EQ(ack_frame.ecn_counters->ce, 1);
if (connection_.SupportsMultiplePacketNumberSpaces()) {
ack_frame = connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(
APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
EXPECT_FALSE(ack_frame.ecn_counters.has_value());
}
EXPECT_CALL(visitor_, OnCryptoFrame(_)).Times(1);
SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<StrictTaggingDecrypter>(ENCRYPTION_FORWARD_SECURE));
connection_.GetProcessUndecryptablePacketsAlarm()->Fire();
ack_frame =
connection_.SupportsMultiplePacketNumberSpaces()
? connection_.received_packet_manager().GetAckFrame(APPLICATION_DATA)
: connection_.received_packet_manager().ack_frame();
ASSERT_TRUE(ack_frame.ecn_counters.has_value());
EXPECT_EQ(ack_frame.ecn_counters->ect0,
connection_.SupportsMultiplePacketNumberSpaces() ? 1 : 2);
QuicConnectionStats stats = connection_.GetStats();
EXPECT_EQ(stats.num_ecn_marks_received.ect0, 2);
EXPECT_EQ(stats.num_ecn_marks_received.ect1, 0);
EXPECT_EQ(stats.num_ecn_marks_received.ce, 1);
}
TEST_P(QuicConnectionTest, ReceivedPacketInfoDefaults) {
EXPECT_TRUE(QuicConnectionPeer::TestLastReceivedPacketInfoDefaults());
}
TEST_P(QuicConnectionTest, DetectMigrationToPreferredAddress) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
ServerHandlePreferredAddressInit();
QuicConnectionId server_issued_cid_for_preferred_address =
TestConnectionId(17);
EXPECT_CALL(connection_id_generator_,
GenerateNextConnectionId(connection_id_))
.WillOnce(Return(server_issued_cid_for_preferred_address));
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_)).WillOnce(Return(true));
std::optional<QuicNewConnectionIdFrame> frame =
connection_.MaybeIssueNewConnectionIdForPreferredAddress();
ASSERT_TRUE(frame.has_value());
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(),
connection_.client_connection_id());
ASSERT_EQ(packet_creator->GetSourceConnectionId(), connection_id_);
peer_creator_.SetServerConnectionId(server_issued_cid_for_preferred_address);
EXPECT_CALL(visitor_, OnCryptoFrame(_));
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kServerPreferredAddress,
kPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kSelfAddress.host(), writer_->last_write_source_address());
EXPECT_EQ(kSelfAddress, connection_.self_address());
QuicRetireConnectionIdFrame retire_cid_frame;
retire_cid_frame.sequence_number = 0u;
EXPECT_CALL(connection_id_generator_,
GenerateNextConnectionId(server_issued_cid_for_preferred_address))
.WillOnce(Return(TestConnectionId(456)));
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_)).WillOnce(Return(true));
EXPECT_CALL(visitor_, SendNewConnectionId(_));
EXPECT_TRUE(connection_.OnRetireConnectionIdFrame(retire_cid_frame));
EXPECT_CALL(visitor_, OnCryptoFrame(_));
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kServerPreferredAddress,
kPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kSelfAddress.host(), writer_->last_write_source_address());
EXPECT_EQ(kSelfAddress, connection_.self_address());
}
TEST_P(QuicConnectionTest,
DetectSimutanuousServerAndClientAddressChangeWithProbe) {
if (!GetParam().version.HasIetfQuicFrames()) {
return;
}
ServerHandlePreferredAddressInit();
QuicConnectionId server_issued_cid_for_preferred_address =
TestConnectionId(17);
EXPECT_CALL(connection_id_generator_,
GenerateNextConnectionId(connection_id_))
.WillOnce(Return(server_issued_cid_for_preferred_address));
EXPECT_CALL(visitor_, MaybeReserveConnectionId(_)).WillOnce(Return(true));
std::optional<QuicNewConnectionIdFrame> frame =
connection_.MaybeIssueNewConnectionIdForPreferredAddress();
ASSERT_TRUE(frame.has_value());
auto* packet_creator = QuicConnectionPeer::GetPacketCreator(&connection_);
ASSERT_EQ(packet_creator->GetSourceConnectionId(), connection_id_);
ASSERT_EQ(packet_creator->GetDestinationConnectionId(),
connection_.client_connection_id());
peer_creator_.SetServerConnectionId(server_issued_cid_for_preferred_address);
const QuicSocketAddress kNewPeerAddress(QuicIpAddress::Loopback4(),
34567);
std::unique_ptr<SerializedPacket> probing_packet = ConstructProbingPacket();
std::unique_ptr<QuicReceivedPacket> received(ConstructReceivedPacket(
QuicEncryptedPacket(probing_packet->encrypted_buffer,
probing_packet->encrypted_length),
clock_.Now()));
uint64_t num_probing_received =
connection_.GetStats().num_connectivity_probing_received;
EXPECT_CALL(*send_algorithm_, OnPacketSent(_, _, _, _, _))
.Times(AtLeast(1u))
.WillOnce(Invoke([&]() {
EXPECT_EQ(1u, writer_->path_response_frames().size());
EXPECT_EQ(1u, writer_->path_challenge_frames().size());
EXPECT_EQ(kServerPreferredAddress.host(),
writer_->last_write_source_address());
EXPECT_EQ(kNewPeerAddress, writer_->last_write_peer_address());
}))
.WillRepeatedly(DoDefault());
ProcessReceivedPacket(kServerPreferredAddress, kNewPeerAddress, *received);
EXPECT_EQ(num_probing_received + 1,
connection_.GetStats().num_connectivity_probing_received);
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(&connection_, kSelfAddress,
kNewPeerAddress));
EXPECT_LT(0u, QuicConnectionPeer::BytesSentOnAlternativePath(&connection_));
EXPECT_EQ(received->length(),
QuicConnectionPeer::BytesReceivedOnAlternativePath(&connection_));
EXPECT_EQ(kPeerAddress, connection_.peer_address());
EXPECT_EQ(kSelfAddress, connection_.self_address());
EXPECT_CALL(visitor_, OnConnectionMigration(IPV6_TO_IPV4_CHANGE));
EXPECT_CALL(visitor_, OnCryptoFrame(_));
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kServerPreferredAddress,
kNewPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kSelfAddress.host(), writer_->last_write_source_address());
EXPECT_EQ(kSelfAddress, connection_.self_address());
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_TRUE(connection_.HasPendingPathValidation());
EXPECT_FALSE(QuicConnectionPeer::GetDefaultPath(&connection_)->validated);
EXPECT_TRUE(QuicConnectionPeer::IsAlternativePath(&connection_, kSelfAddress,
kPeerAddress));
EXPECT_EQ(packet_creator->GetSourceConnectionId(),
server_issued_cid_for_preferred_address);
EXPECT_CALL(visitor_, OnCryptoFrame(_));
ProcessFramePacketWithAddresses(MakeCryptoFrame(), kServerPreferredAddress,
kNewPeerAddress, ENCRYPTION_FORWARD_SECURE);
EXPECT_EQ(kNewPeerAddress, connection_.peer_address());
EXPECT_EQ(kServerPreferredAddress.host(),
writer_->last_write_source_address());
EXPECT_EQ(kSelfAddress, connection_.self_address());
}
TEST_P(QuicConnectionTest, EcnCodepointsRejected) {
SetQuicRestartFlag(quic_support_ect1, true);
for (QuicEcnCodepoint ecn : {ECN_NOT_ECT, ECN_ECT0, ECN_ECT1, ECN_CE}) {
if (ecn == ECN_ECT0) {
EXPECT_CALL(*send_algorithm_, EnableECT0()).WillOnce(Return(false));
} else if (ecn == ECN_ECT1) {
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(false));
}
if (ecn == ECN_NOT_ECT) {
EXPECT_TRUE(connection_.set_ecn_codepoint(ecn));
} else {
EXPECT_FALSE(connection_.set_ecn_codepoint(ecn));
}
EXPECT_EQ(connection_.ecn_codepoint(), ECN_NOT_ECT);
EXPECT_CALL(connection_, OnSerializedPacket(_));
SendPing();
EXPECT_EQ(writer_->last_ecn_sent(), ECN_NOT_ECT);
}
}
TEST_P(QuicConnectionTest, EcnCodepointsAccepted) {
SetQuicRestartFlag(quic_support_ect1, true);
for (QuicEcnCodepoint ecn : {ECN_NOT_ECT, ECN_ECT0, ECN_ECT1, ECN_CE}) {
if (ecn == ECN_ECT0) {
EXPECT_CALL(*send_algorithm_, EnableECT0()).WillOnce(Return(true));
} else if (ecn == ECN_ECT1) {
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
}
if (ecn == ECN_CE) {
EXPECT_FALSE(connection_.set_ecn_codepoint(ecn));
} else {
EXPECT_TRUE(connection_.set_ecn_codepoint(ecn));
}
EXPECT_CALL(connection_, OnSerializedPacket(_));
SendPing();
QuicEcnCodepoint expected_codepoint = ecn;
if (ecn == ECN_CE) {
expected_codepoint = ECN_ECT1;
}
EXPECT_EQ(connection_.ecn_codepoint(), expected_codepoint);
EXPECT_EQ(writer_->last_ecn_sent(), expected_codepoint);
}
}
TEST_P(QuicConnectionTest, EcnCodepointsRejectedIfFlagIsFalse) {
SetQuicRestartFlag(quic_support_ect1, false);
for (QuicEcnCodepoint ecn : {ECN_NOT_ECT, ECN_ECT0, ECN_ECT1, ECN_CE}) {
EXPECT_FALSE(connection_.set_ecn_codepoint(ecn));
EXPECT_CALL(connection_, OnSerializedPacket(_));
SendPing();
EXPECT_EQ(connection_.ecn_codepoint(), ECN_NOT_ECT);
EXPECT_EQ(writer_->last_ecn_sent(), ECN_NOT_ECT);
}
}
TEST_P(QuicConnectionTest, EcnValidationDisabled) {
SetQuicRestartFlag(quic_support_ect1, true);
QuicConnectionPeer::DisableEcnCodepointValidation(&connection_);
for (QuicEcnCodepoint ecn : {ECN_NOT_ECT, ECN_ECT0, ECN_ECT1, ECN_CE}) {
EXPECT_TRUE(connection_.set_ecn_codepoint(ecn));
EXPECT_CALL(connection_, OnSerializedPacket(_));
SendPing();
EXPECT_EQ(connection_.ecn_codepoint(), ecn);
EXPECT_EQ(writer_->last_ecn_sent(), ecn);
}
}
TEST_P(QuicConnectionTest, RtoDisablesEcnMarking) {
SetQuicRestartFlag(quic_support_ect1, true);
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT1));
QuicPacketCreatorPeer::SetPacketNumber(
QuicConnectionPeer::GetPacketCreator(&connection_), 1);
SendPing();
connection_.OnRetransmissionAlarm();
EXPECT_EQ(writer_->last_ecn_sent(), ECN_NOT_ECT);
EXPECT_EQ(connection_.ecn_codepoint(), ECN_ECT1);
connection_.OnRetransmissionAlarm();
EXPECT_EQ(writer_->last_ecn_sent(), ECN_NOT_ECT);
EXPECT_EQ(connection_.ecn_codepoint(), ECN_NOT_ECT);
}
TEST_P(QuicConnectionTest, RtoDoesntDisableEcnMarkingIfEcnAcked) {
SetQuicRestartFlag(quic_support_ect1, true);
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT1));
QuicPacketCreatorPeer::SetPacketNumber(
QuicConnectionPeer::GetPacketCreator(&connection_), 1);
connection_.OnInFlightEcnPacketAcked();
SendPing();
connection_.OnRetransmissionAlarm();
QuicEcnCodepoint expected_codepoint = ECN_ECT1;
EXPECT_EQ(writer_->last_ecn_sent(), expected_codepoint);
EXPECT_EQ(connection_.ecn_codepoint(), expected_codepoint);
connection_.OnRetransmissionAlarm();
EXPECT_EQ(writer_->last_ecn_sent(), expected_codepoint);
EXPECT_EQ(connection_.ecn_codepoint(), expected_codepoint);
}
TEST_P(QuicConnectionTest, InvalidFeedbackCancelsEcn) {
SetQuicRestartFlag(quic_support_ect1, true);
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT1));
EXPECT_EQ(connection_.ecn_codepoint(), ECN_ECT1);
connection_.OnInvalidEcnFeedback();
EXPECT_EQ(connection_.ecn_codepoint(), ECN_NOT_ECT);
}
TEST_P(QuicConnectionTest, StateMatchesSentEcn) {
SetQuicRestartFlag(quic_support_ect1, true);
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT1));
SendPing();
QuicSentPacketManager* sent_packet_manager =
QuicConnectionPeer::GetSentPacketManager(&connection_);
EXPECT_EQ(writer_->last_ecn_sent(), ECN_ECT1);
EXPECT_EQ(
QuicSentPacketManagerPeer::GetEct1Sent(sent_packet_manager, INITIAL_DATA),
1);
}
TEST_P(QuicConnectionTest, CoalescedPacketSplitsEcn) {
if (!connection_.version().CanSendCoalescedPackets()) {
return;
}
SetQuicRestartFlag(quic_support_ect1, true);
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT1));
char buffer[1000];
creator_->set_encryption_level(ENCRYPTION_INITIAL);
QuicFrames frames;
QuicPingFrame ping;
frames.emplace_back(QuicFrame(ping));
SerializedPacket packet1 = QuicPacketCreatorPeer::SerializeAllFrames(
creator_, frames, buffer, sizeof(buffer));
connection_.SendOrQueuePacket(std::move(packet1));
creator_->set_encryption_level(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*send_algorithm_, EnableECT0()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT0));
EXPECT_EQ(writer_->packets_write_attempts(), 0);
SendPing();
EXPECT_EQ(writer_->packets_write_attempts(), 2);
EXPECT_EQ(writer_->last_ecn_sent(), ECN_ECT0);
}
TEST_P(QuicConnectionTest, BufferedPacketRetainsOldEcn) {
SetQuicRestartFlag(quic_support_ect1, true);
EXPECT_CALL(*send_algorithm_, EnableECT1()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT1));
writer_->SetWriteBlocked();
EXPECT_CALL(visitor_, OnWriteBlocked()).Times(2);
SendPing();
EXPECT_CALL(*send_algorithm_, EnableECT0()).WillOnce(Return(true));
EXPECT_TRUE(connection_.set_ecn_codepoint(ECN_ECT0));
writer_->SetWritable();
connection_.OnCanWrite();
EXPECT_EQ(writer_->last_ecn_sent(), ECN_ECT1);
}
TEST_P(QuicConnectionTest, RejectEcnIfWriterDoesNotSupport) {
SetQuicRestartFlag(quic_support_ect1, true);
MockPacketWriter mock_writer;
QuicConnectionPeer::SetWriter(&connection_, &mock_writer, false);
EXPECT_CALL(mock_writer, SupportsEcn()).WillOnce(Return(false));
EXPECT_FALSE(connection_.set_ecn_codepoint(ECN_ECT1));
EXPECT_EQ(connection_.ecn_codepoint(), ECN_NOT_ECT);
}
TEST_P(QuicConnectionTest, RejectResetStreamAtIfNotNegotiated) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.SetReliableStreamReset(false);
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnConnectionClosed(_, _)).Times(1);
connection_.OnResetStreamAtFrame(QuicResetStreamAtFrame());
}
TEST_P(QuicConnectionTest, ResetStreamAt) {
if (!version().HasIetfQuicFrames()) {
return;
}
EXPECT_CALL(*send_algorithm_, SetFromConfig(_, _));
QuicConfig config;
config.SetReliableStreamReset(true);
connection_.SetFromConfig(config);
connection_.SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(visitor_, OnResetStreamAt(QuicResetStreamAtFrame(
0, 0, QUIC_STREAM_NO_ERROR, 20, 10)))
.Times(1);
connection_.OnResetStreamAtFrame(QuicResetStreamAtFrame(0, 0, 0, 20, 10));
}
TEST_P(QuicConnectionTest, OnParsedClientHelloInfoWithDebugVisitor) {
const ParsedClientHello parsed_chlo{.sni = "sni",
.uaid = "uiad",
.supported_groups = {1, 2, 3},
.cert_compression_algos = {4, 5, 6},
.alpns = {"h2", "http/1.1"},
.retry_token = "retry_token"};
MockQuicConnectionDebugVisitor debug_visitor;
connection_.set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnParsedClientHelloInfo(parsed_chlo)).Times(1);
connection_.OnParsedClientHelloInfo(parsed_chlo);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_connection.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_connection_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f99744f1-007e-423e-8328-acb078b852c0 | cpp | google/quiche | chlo_extractor | quiche/quic/core/chlo_extractor.cc | quiche/quic/core/chlo_extractor_test.cc | #include "quiche/quic/core/chlo_extractor.h"
#include <memory>
#include <optional>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_framer.h"
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/frames/quic_ack_frequency_frame.h"
#include "quiche/quic/core/frames/quic_reset_stream_at_frame.h"
#include "quiche/quic/core/quic_framer.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
namespace quic {
namespace {
class ChloFramerVisitor : public QuicFramerVisitorInterface,
public CryptoFramerVisitorInterface {
public:
ChloFramerVisitor(QuicFramer* framer,
const QuicTagVector& create_session_tag_indicators,
ChloExtractor::Delegate* delegate);
~ChloFramerVisitor() override = default;
void OnError(QuicFramer* ) override {}
bool OnProtocolVersionMismatch(ParsedQuicVersion version) override;
void OnPacket() override {}
void OnVersionNegotiationPacket(
const QuicVersionNegotiationPacket& ) override {}
void OnRetryPacket(QuicConnectionId ,
QuicConnectionId ,
absl::string_view ,
absl::string_view ,
absl::string_view ) override {}
bool OnUnauthenticatedPublicHeader(const QuicPacketHeader& header) override;
bool OnUnauthenticatedHeader(const QuicPacketHeader& header) override;
void OnDecryptedPacket(size_t ,
EncryptionLevel ) override {}
bool OnPacketHeader(const QuicPacketHeader& header) override;
void OnCoalescedPacket(const QuicEncryptedPacket& packet) override;
void OnUndecryptablePacket(const QuicEncryptedPacket& packet,
EncryptionLevel decryption_level,
bool has_decryption_key) override;
bool OnStreamFrame(const QuicStreamFrame& frame) override;
bool OnCryptoFrame(const QuicCryptoFrame& frame) override;
bool OnAckFrameStart(QuicPacketNumber largest_acked,
QuicTime::Delta ack_delay_time) override;
bool OnAckRange(QuicPacketNumber start, QuicPacketNumber end) override;
bool OnAckTimestamp(QuicPacketNumber packet_number,
QuicTime timestamp) override;
bool OnAckFrameEnd(QuicPacketNumber start,
const std::optional<QuicEcnCounts>& ecn_counts) override;
bool OnStopWaitingFrame(const QuicStopWaitingFrame& frame) override;
bool OnPingFrame(const QuicPingFrame& frame) override;
bool OnRstStreamFrame(const QuicRstStreamFrame& frame) override;
bool OnConnectionCloseFrame(const QuicConnectionCloseFrame& frame) override;
bool OnNewConnectionIdFrame(const QuicNewConnectionIdFrame& frame) override;
bool OnRetireConnectionIdFrame(
const QuicRetireConnectionIdFrame& frame) override;
bool OnNewTokenFrame(const QuicNewTokenFrame& frame) override;
bool OnStopSendingFrame(const QuicStopSendingFrame& frame) override;
bool OnPathChallengeFrame(const QuicPathChallengeFrame& frame) override;
bool OnPathResponseFrame(const QuicPathResponseFrame& frame) override;
bool OnGoAwayFrame(const QuicGoAwayFrame& frame) override;
bool OnMaxStreamsFrame(const QuicMaxStreamsFrame& frame) override;
bool OnStreamsBlockedFrame(const QuicStreamsBlockedFrame& frame) override;
bool OnWindowUpdateFrame(const QuicWindowUpdateFrame& frame) override;
bool OnBlockedFrame(const QuicBlockedFrame& frame) override;
bool OnPaddingFrame(const QuicPaddingFrame& frame) override;
bool OnMessageFrame(const QuicMessageFrame& frame) override;
bool OnHandshakeDoneFrame(const QuicHandshakeDoneFrame& frame) override;
bool OnAckFrequencyFrame(const QuicAckFrequencyFrame& farme) override;
bool OnResetStreamAtFrame(const QuicResetStreamAtFrame& frame) override;
void OnPacketComplete() override {}
bool IsValidStatelessResetToken(
const StatelessResetToken& token) const override;
void OnAuthenticatedIetfStatelessResetPacket(
const QuicIetfStatelessResetPacket& ) override {}
void OnKeyUpdate(KeyUpdateReason ) override;
void OnDecryptedFirstPacketInKeyPhase() override;
std::unique_ptr<QuicDecrypter> AdvanceKeysAndCreateCurrentOneRttDecrypter()
override;
std::unique_ptr<QuicEncrypter> CreateCurrentOneRttEncrypter() override;
void OnError(CryptoFramer* framer) override;
void OnHandshakeMessage(const CryptoHandshakeMessage& message) override;
bool OnHandshakeData(absl::string_view data);
bool found_chlo() { return found_chlo_; }
bool chlo_contains_tags() { return chlo_contains_tags_; }
private:
QuicFramer* framer_;
const QuicTagVector& create_session_tag_indicators_;
ChloExtractor::Delegate* delegate_;
bool found_chlo_;
bool chlo_contains_tags_;
QuicConnectionId connection_id_;
};
ChloFramerVisitor::ChloFramerVisitor(
QuicFramer* framer, const QuicTagVector& create_session_tag_indicators,
ChloExtractor::Delegate* delegate)
: framer_(framer),
create_session_tag_indicators_(create_session_tag_indicators),
delegate_(delegate),
found_chlo_(false),
chlo_contains_tags_(false),
connection_id_(EmptyQuicConnectionId()) {}
bool ChloFramerVisitor::OnProtocolVersionMismatch(ParsedQuicVersion version) {
if (!framer_->IsSupportedVersion(version)) {
return false;
}
framer_->set_version(version);
return true;
}
bool ChloFramerVisitor::OnUnauthenticatedPublicHeader(
const QuicPacketHeader& header) {
connection_id_ = header.destination_connection_id;
framer_->SetInitialObfuscators(header.destination_connection_id);
return true;
}
bool ChloFramerVisitor::OnUnauthenticatedHeader(
const QuicPacketHeader& ) {
return true;
}
bool ChloFramerVisitor::OnPacketHeader(const QuicPacketHeader& ) {
return true;
}
void ChloFramerVisitor::OnCoalescedPacket(
const QuicEncryptedPacket& ) {}
void ChloFramerVisitor::OnUndecryptablePacket(
const QuicEncryptedPacket& , EncryptionLevel ,
bool ) {}
bool ChloFramerVisitor::OnStreamFrame(const QuicStreamFrame& frame) {
if (QuicVersionUsesCryptoFrames(framer_->transport_version())) {
return false;
}
absl::string_view data(frame.data_buffer, frame.data_length);
if (QuicUtils::IsCryptoStreamId(framer_->transport_version(),
frame.stream_id) &&
frame.offset == 0 && absl::StartsWith(data, "CHLO")) {
return OnHandshakeData(data);
}
return true;
}
bool ChloFramerVisitor::OnCryptoFrame(const QuicCryptoFrame& frame) {
if (!QuicVersionUsesCryptoFrames(framer_->transport_version())) {
return false;
}
absl::string_view data(frame.data_buffer, frame.data_length);
if (frame.offset == 0 && absl::StartsWith(data, "CHLO")) {
return OnHandshakeData(data);
}
return true;
}
bool ChloFramerVisitor::OnHandshakeData(absl::string_view data) {
CryptoFramer crypto_framer;
crypto_framer.set_visitor(this);
if (!crypto_framer.ProcessInput(data)) {
return false;
}
for (const QuicTag tag : create_session_tag_indicators_) {
if (crypto_framer.HasTag(tag)) {
chlo_contains_tags_ = true;
}
}
if (chlo_contains_tags_ && delegate_) {
crypto_framer.ForceHandshake();
}
return true;
}
bool ChloFramerVisitor::OnAckFrameStart(QuicPacketNumber ,
QuicTime::Delta ) {
return true;
}
bool ChloFramerVisitor::OnResetStreamAtFrame(
const QuicResetStreamAtFrame& ) {
return true;
}
bool ChloFramerVisitor::OnAckRange(QuicPacketNumber ,
QuicPacketNumber ) {
return true;
}
bool ChloFramerVisitor::OnAckTimestamp(QuicPacketNumber ,
QuicTime ) {
return true;
}
bool ChloFramerVisitor::OnAckFrameEnd(
QuicPacketNumber ,
const std::optional<QuicEcnCounts>& ) {
return true;
}
bool ChloFramerVisitor::OnStopWaitingFrame(
const QuicStopWaitingFrame& ) {
return true;
}
bool ChloFramerVisitor::OnPingFrame(const QuicPingFrame& ) {
return true;
}
bool ChloFramerVisitor::OnRstStreamFrame(const QuicRstStreamFrame& ) {
return true;
}
bool ChloFramerVisitor::OnConnectionCloseFrame(
const QuicConnectionCloseFrame& ) {
return true;
}
bool ChloFramerVisitor::OnStopSendingFrame(
const QuicStopSendingFrame& ) {
return true;
}
bool ChloFramerVisitor::OnPathChallengeFrame(
const QuicPathChallengeFrame& ) {
return true;
}
bool ChloFramerVisitor::OnPathResponseFrame(
const QuicPathResponseFrame& ) {
return true;
}
bool ChloFramerVisitor::OnGoAwayFrame(const QuicGoAwayFrame& ) {
return true;
}
bool ChloFramerVisitor::OnWindowUpdateFrame(
const QuicWindowUpdateFrame& ) {
return true;
}
bool ChloFramerVisitor::OnBlockedFrame(const QuicBlockedFrame& ) {
return true;
}
bool ChloFramerVisitor::OnNewConnectionIdFrame(
const QuicNewConnectionIdFrame& ) {
return true;
}
bool ChloFramerVisitor::OnRetireConnectionIdFrame(
const QuicRetireConnectionIdFrame& ) {
return true;
}
bool ChloFramerVisitor::OnNewTokenFrame(const QuicNewTokenFrame& ) {
return true;
}
bool ChloFramerVisitor::OnPaddingFrame(const QuicPaddingFrame& ) {
return true;
}
bool ChloFramerVisitor::OnMessageFrame(const QuicMessageFrame& ) {
return true;
}
bool ChloFramerVisitor::OnHandshakeDoneFrame(
const QuicHandshakeDoneFrame& ) {
return true;
}
bool ChloFramerVisitor::OnAckFrequencyFrame(
const QuicAckFrequencyFrame& ) {
return true;
}
bool ChloFramerVisitor::IsValidStatelessResetToken(
const StatelessResetToken& ) const {
return false;
}
bool ChloFramerVisitor::OnMaxStreamsFrame(
const QuicMaxStreamsFrame& ) {
return true;
}
bool ChloFramerVisitor::OnStreamsBlockedFrame(
const QuicStreamsBlockedFrame& ) {
return true;
}
void ChloFramerVisitor::OnKeyUpdate(KeyUpdateReason ) {}
void ChloFramerVisitor::OnDecryptedFirstPacketInKeyPhase() {}
std::unique_ptr<QuicDecrypter>
ChloFramerVisitor::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
return nullptr;
}
std::unique_ptr<QuicEncrypter>
ChloFramerVisitor::CreateCurrentOneRttEncrypter() {
return nullptr;
}
void ChloFramerVisitor::OnError(CryptoFramer* ) {}
void ChloFramerVisitor::OnHandshakeMessage(
const CryptoHandshakeMessage& message) {
if (delegate_ != nullptr) {
delegate_->OnChlo(framer_->transport_version(), connection_id_, message);
}
found_chlo_ = true;
}
}
bool ChloExtractor::Extract(const QuicEncryptedPacket& packet,
ParsedQuicVersion version,
const QuicTagVector& create_session_tag_indicators,
Delegate* delegate, uint8_t connection_id_length) {
QUIC_DVLOG(1) << "Extracting CHLO using version " << version;
QuicFramer framer({version}, QuicTime::Zero(), Perspective::IS_SERVER,
connection_id_length);
ChloFramerVisitor visitor(&framer, create_session_tag_indicators, delegate);
framer.set_visitor(&visitor);
if (!framer.ProcessPacket(packet)) {
return false;
}
return visitor.found_chlo() || visitor.chlo_contains_tags();
}
} | #include "quiche/quic/core/chlo_extractor.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_framer.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/first_flight.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
class TestDelegate : public ChloExtractor::Delegate {
public:
TestDelegate() = default;
~TestDelegate() override = default;
void OnChlo(QuicTransportVersion version, QuicConnectionId connection_id,
const CryptoHandshakeMessage& chlo) override {
version_ = version;
connection_id_ = connection_id;
chlo_ = chlo.DebugString();
absl::string_view alpn_value;
if (chlo.GetStringPiece(kALPN, &alpn_value)) {
alpn_ = std::string(alpn_value);
}
}
QuicConnectionId connection_id() const { return connection_id_; }
QuicTransportVersion transport_version() const { return version_; }
const std::string& chlo() const { return chlo_; }
const std::string& alpn() const { return alpn_; }
private:
QuicConnectionId connection_id_;
QuicTransportVersion version_;
std::string chlo_;
std::string alpn_;
};
class ChloExtractorTest : public QuicTestWithParam<ParsedQuicVersion> {
public:
ChloExtractorTest() : version_(GetParam()) {}
void MakePacket(absl::string_view data, bool munge_offset,
bool munge_stream_id) {
QuicPacketHeader header;
header.destination_connection_id = TestConnectionId();
header.destination_connection_id_included = CONNECTION_ID_PRESENT;
header.version_flag = true;
header.version = version_;
header.reset_flag = false;
header.packet_number_length = PACKET_4BYTE_PACKET_NUMBER;
header.packet_number = QuicPacketNumber(1);
if (version_.HasLongHeaderLengths()) {
header.retry_token_length_length =
quiche::VARIABLE_LENGTH_INTEGER_LENGTH_1;
header.length_length = quiche::VARIABLE_LENGTH_INTEGER_LENGTH_2;
}
QuicFrames frames;
size_t offset = 0;
if (munge_offset) {
offset++;
}
QuicFramer framer(SupportedVersions(version_), QuicTime::Zero(),
Perspective::IS_CLIENT, kQuicDefaultConnectionIdLength);
framer.SetInitialObfuscators(TestConnectionId());
if (!version_.UsesCryptoFrames() || munge_stream_id) {
QuicStreamId stream_id =
QuicUtils::GetCryptoStreamId(version_.transport_version);
if (munge_stream_id) {
stream_id++;
}
frames.push_back(
QuicFrame(QuicStreamFrame(stream_id, false, offset, data)));
} else {
frames.push_back(
QuicFrame(new QuicCryptoFrame(ENCRYPTION_INITIAL, offset, data)));
}
std::unique_ptr<QuicPacket> packet(
BuildUnsizedDataPacket(&framer, header, frames));
EXPECT_TRUE(packet != nullptr);
size_t encrypted_length =
framer.EncryptPayload(ENCRYPTION_INITIAL, header.packet_number, *packet,
buffer_, ABSL_ARRAYSIZE(buffer_));
ASSERT_NE(0u, encrypted_length);
packet_ = std::make_unique<QuicEncryptedPacket>(buffer_, encrypted_length);
EXPECT_TRUE(packet_ != nullptr);
DeleteFrames(&frames);
}
protected:
ParsedQuicVersion version_;
TestDelegate delegate_;
std::unique_ptr<QuicEncryptedPacket> packet_;
char buffer_[kMaxOutgoingPacketSize];
};
INSTANTIATE_TEST_SUITE_P(
ChloExtractorTests, ChloExtractorTest,
::testing::ValuesIn(AllSupportedVersionsWithQuicCrypto()),
::testing::PrintToStringParamName());
TEST_P(ChloExtractorTest, FindsValidChlo) {
CryptoHandshakeMessage client_hello;
client_hello.set_tag(kCHLO);
std::string client_hello_str(client_hello.GetSerialized().AsStringPiece());
MakePacket(client_hello_str, false,
false);
EXPECT_TRUE(ChloExtractor::Extract(*packet_, version_, {}, &delegate_,
kQuicDefaultConnectionIdLength));
EXPECT_EQ(version_.transport_version, delegate_.transport_version());
EXPECT_EQ(TestConnectionId(), delegate_.connection_id());
EXPECT_EQ(client_hello.DebugString(), delegate_.chlo());
}
TEST_P(ChloExtractorTest, DoesNotFindValidChloOnWrongStream) {
if (version_.UsesCryptoFrames()) {
return;
}
CryptoHandshakeMessage client_hello;
client_hello.set_tag(kCHLO);
std::string client_hello_str(client_hello.GetSerialized().AsStringPiece());
MakePacket(client_hello_str,
false, true);
EXPECT_FALSE(ChloExtractor::Extract(*packet_, version_, {}, &delegate_,
kQuicDefaultConnectionIdLength));
}
TEST_P(ChloExtractorTest, DoesNotFindValidChloOnWrongOffset) {
CryptoHandshakeMessage client_hello;
client_hello.set_tag(kCHLO);
std::string client_hello_str(client_hello.GetSerialized().AsStringPiece());
MakePacket(client_hello_str, true,
false);
EXPECT_FALSE(ChloExtractor::Extract(*packet_, version_, {}, &delegate_,
kQuicDefaultConnectionIdLength));
}
TEST_P(ChloExtractorTest, DoesNotFindInvalidChlo) {
MakePacket("foo", false,
false);
EXPECT_FALSE(ChloExtractor::Extract(*packet_, version_, {}, &delegate_,
kQuicDefaultConnectionIdLength));
}
TEST_P(ChloExtractorTest, FirstFlight) {
std::vector<std::unique_ptr<QuicReceivedPacket>> packets =
GetFirstFlightOfPackets(version_);
ASSERT_EQ(packets.size(), 1u);
EXPECT_TRUE(ChloExtractor::Extract(*packets[0], version_, {}, &delegate_,
kQuicDefaultConnectionIdLength));
EXPECT_EQ(version_.transport_version, delegate_.transport_version());
EXPECT_EQ(TestConnectionId(), delegate_.connection_id());
EXPECT_EQ(AlpnForVersion(version_), delegate_.alpn());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/chlo_extractor.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/chlo_extractor_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
eab7bed0-da11-471e-bab9-a472e9e04c08 | cpp | tensorflow/tensorflow | remap_plan | third_party/xla/xla/python/ifrt/remap_plan.cc | third_party/xla/xla/python/ifrt/remap_plan_test.cc | #include "xla/python/ifrt/remap_plan.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/remap_plan.pb.h"
#include "xla/status_macros.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
absl::StatusOr<RemapPlan::Mapping> MappingFromProto(
const RemapPlanProto::MappingProto& mapping_proto) {
RemapPlan::Mapping mapping;
mapping.in_array = mapping_proto.in_array();
mapping.out_array = mapping_proto.out_array();
const int64_t num_intervals = mapping_proto.from_start_size();
TF_RET_CHECK(mapping_proto.from_end_size() == num_intervals);
TF_RET_CHECK(mapping_proto.from_step_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_start_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_end_size() == num_intervals);
TF_RET_CHECK(mapping_proto.to_step_size() == num_intervals);
mapping.from.reserve(num_intervals);
mapping.to.reserve(num_intervals);
for (int64_t i = 0; i < num_intervals; ++i) {
mapping.from.push_back(
RemapPlan::Interval{mapping_proto.from_start(i),
mapping_proto.from_end(i),
mapping_proto.from_step(i)});
mapping.to.push_back(
RemapPlan::Interval{mapping_proto.to_start(i),
mapping_proto.to_end(i),
mapping_proto.to_step(i)});
}
return mapping;
}
absl::StatusOr<RemapPlanProto::MappingProto> MappingToProto(
const RemapPlan::Mapping& mapping) {
TF_RET_CHECK(mapping.from.size() == mapping.to.size());
RemapPlanProto::MappingProto proto;
proto.set_in_array(mapping.in_array);
proto.set_out_array(mapping.out_array);
const int64_t num_intervals = mapping.from.size();
proto.mutable_from_start()->Reserve(num_intervals);
proto.mutable_from_end()->Reserve(num_intervals);
proto.mutable_from_step()->Reserve(num_intervals);
proto.mutable_to_start()->Reserve(num_intervals);
proto.mutable_to_end()->Reserve(num_intervals);
proto.mutable_to_step()->Reserve(num_intervals);
for (int64_t i = 0; i < mapping.from.size(); ++i) {
proto.add_from_start(mapping.from[i].start);
proto.add_from_end(mapping.from[i].end);
proto.add_from_step(mapping.from[i].step);
proto.add_to_start(mapping.to[i].start);
proto.add_to_end(mapping.to[i].end);
proto.add_to_step(mapping.to[i].step);
}
return proto;
}
absl::Status CheckRange(int64_t num_shards,
const RemapPlan::Interval& interval) {
if (interval.start < 0 || interval.start > num_shards - 1) {
return InvalidArgument("start must be in [0, %d], but is %d",
num_shards - 1, interval.start);
}
if (interval.end < 0 || interval.end > num_shards) {
return InvalidArgument("end must be in [0, %d], but is %d", num_shards,
interval.end);
}
if (interval.step <= 0) {
return InvalidArgument("step must be positive, but is %d", interval.step);
}
return absl::OkStatus();
}
int64_t GetNumberOfSteps(const RemapPlan::Interval& interval) {
return (interval.end - interval.start + interval.step - 1) / interval.step;
}
}
std::string RemapPlan::Interval::DebugString() const {
return absl::StrCat("[", start, ":", end, ":", step, "]");
}
std::string RemapPlan::Mapping::DebugString() const {
auto format_intervals = [](absl::Span<const RemapPlan::Interval> intervals) {
return absl::StrCat(
"[",
absl::StrJoin(
intervals, ",",
[](std::string* out, const RemapPlan::Interval& interval) {
absl::StrAppend(out, interval.DebugString());
}),
"]");
};
return absl::StrCat("Mapping(in_array=", in_array, ",",
"out_array=", out_array, ",from=", format_intervals(from),
",to=", format_intervals(to), ")");
}
absl::Status RemapPlan::Validate() const {
const int num_inputs = input_specs.size();
if (num_inputs == 0) {
return InvalidArgument("Must have at least one input");
}
for (int i = 0; i < num_inputs; ++i) {
if (input_specs[i].dtype != input_specs.front().dtype) {
return InvalidArgument(
"Input must have the same dtype: %s (input 0) vs. %s (input "
"%d)",
input_specs.front().dtype.DebugString(),
input_specs[i].dtype.DebugString(), i);
}
}
const int num_outputs = output_specs.size();
for (int i = 0; i < num_outputs; ++i) {
if (output_specs[i].dtype != input_specs.front().dtype) {
return InvalidArgument(
"Input and output must have the same dtype: %s (input 0) vs. %s "
"(output %d)",
output_specs.front().dtype.DebugString(),
output_specs[i].dtype.DebugString(), i);
}
}
std::vector<std::vector<bool>> in_used_buffers_list(num_inputs);
for (int i = 0; i < num_inputs; ++i) {
in_used_buffers_list[i].resize(
input_specs[i].sharding->devices()->size(),
false);
}
std::vector<BasicDeviceList::Devices> out_assigned_devices_list(num_outputs);
for (int i = 0; i < num_outputs; ++i) {
out_assigned_devices_list[i].resize(
output_specs[i].sharding->devices()->size(),
nullptr);
}
for (int64_t i = 0; i < mappings->size(); ++i) {
const RemapPlan::Mapping& mapping = (*mappings)[i];
if (mapping.in_array < 0 || mapping.in_array >= num_inputs) {
return InvalidArgument(
"mappings[%d].in_array must be in [0, %d], but is %d", i,
num_inputs - 1, mapping.in_array);
}
if (mapping.out_array < 0 || mapping.out_array >= num_outputs) {
return InvalidArgument(
"mappings[%d].out_array must be in [0, %d], but is %d", i,
num_outputs - 1, mapping.out_array);
}
if (mapping.from.size() != mapping.to.size()) {
return InvalidArgument(
"mappings[%d].from and mappings[%d].to must have the same number of "
"intervals, but has %d and %d intervals",
i, i, mapping.from.size(), mapping.to.size());
}
std::vector<bool>& in_used_buffers = in_used_buffers_list[mapping.in_array];
absl::Span<Device* const> in_devices =
input_specs[mapping.in_array].sharding->devices()->devices();
BasicDeviceList::Devices& out_assigned_devices =
out_assigned_devices_list[mapping.out_array];
const int64_t in_shards_count = in_used_buffers.size();
const int64_t out_shards_count = out_assigned_devices.size();
for (int s = 0; s < mapping.from.size(); ++s) {
const RemapPlan::Interval& in_interval = mapping.from[s];
const RemapPlan::Interval& out_interval = mapping.to[s];
TF_RETURN_IF_ERROR(CheckRange(in_shards_count, in_interval));
TF_RETURN_IF_ERROR(CheckRange(out_shards_count, out_interval));
if (GetNumberOfSteps(in_interval) != GetNumberOfSteps(out_interval)) {
return InvalidArgument(
"mappings[%d].from[%d] and mappings[%d].to[%d] must have the same "
"number of steps, but were %d and %d "
"(%s vs. %s)",
i, s, i, s, GetNumberOfSteps(in_interval),
GetNumberOfSteps(out_interval), in_interval.DebugString(),
out_interval.DebugString());
}
int64_t in_shard = in_interval.start;
int64_t out_shard = out_interval.start;
while (in_shard < in_interval.end) {
if (in_used_buffers[in_shard]) {
return InvalidArgument("Input array %d shard %d is already used",
mapping.in_array, in_shard);
}
in_used_buffers[in_shard] = true;
if (out_assigned_devices[out_shard] != nullptr) {
return InvalidArgument("Output array %d shard %d is already assigned",
mapping.out_array, out_shard);
}
out_assigned_devices[out_shard] = in_devices[in_shard];
in_shard += in_interval.step;
out_shard += out_interval.step;
}
}
}
for (int i = 0; i < num_outputs; ++i) {
for (int out_shard = 0;
out_shard < output_specs[i].sharding->devices()->size(); ++out_shard) {
if (out_assigned_devices_list[i][out_shard] == nullptr) {
return InvalidArgument("Output array %d shard %d is unassigned", i,
out_shard);
}
}
if (out_assigned_devices_list[i] !=
output_specs[i].sharding->devices()->devices()) {
return InvalidArgument(
"Output array %d devices and sharding devices do not match: "
"Expected %v, but got %v",
i, *output_specs[i].sharding->devices(),
*BasicDeviceList::Create(std::move(out_assigned_devices_list[i])));
}
}
return absl::OkStatus();
}
absl::StatusOr<RemapPlan> RemapPlan::FromProto(
DeviceList::LookupDeviceFunc lookup_device, const RemapPlanProto& proto) {
RemapPlan plan;
plan.input_specs.reserve(proto.input_specs_size());
for (const auto& input_spec_proto : proto.input_specs()) {
TF_ASSIGN_OR_RETURN(ArraySpec input_spec,
ArraySpec::FromProto(lookup_device, input_spec_proto));
plan.input_specs.push_back(std::move(input_spec));
}
plan.output_specs.reserve(proto.output_specs_size());
for (const auto& output_spec_proto : proto.output_specs()) {
TF_ASSIGN_OR_RETURN(ArraySpec output_spec,
ArraySpec::FromProto(lookup_device, output_spec_proto));
plan.output_specs.push_back(std::move(output_spec));
}
plan.mappings = std::make_shared<std::vector<Mapping>>();
plan.mappings->reserve(proto.mappings_size());
for (const auto& mapping_proto : proto.mappings()) {
TF_ASSIGN_OR_RETURN(auto mapping, MappingFromProto(mapping_proto));
plan.mappings->push_back(std::move(mapping));
}
return plan;
}
absl::StatusOr<RemapPlanProto> RemapPlan::ToProto() const {
RemapPlanProto proto;
proto.mutable_input_specs()->Reserve(input_specs.size());
for (const auto& input_spec : input_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_input_specs(), input_spec.ToProto());
}
proto.mutable_output_specs()->Reserve(output_specs.size());
for (const auto& output_spec : output_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_output_specs(), output_spec.ToProto());
}
proto.mutable_mappings()->Reserve(mappings->size());
for (const auto& mapping : *mappings) {
TF_ASSIGN_OR_RETURN(*proto.add_mappings(), MappingToProto(mapping));
}
return proto;
}
std::string RemapPlan::DebugString() const {
auto format_array_specs = [](absl::Span<const ArraySpec> array_specs) {
return absl::StrCat(
"[",
absl::StrJoin(array_specs, ",",
[](std::string* out, const ArraySpec& spec) {
absl::StrAppend(out, spec.DebugString());
}),
"]");
};
auto format_mappings = [](absl::Span<const Mapping> mappings) {
return absl::StrCat(
"[",
absl::StrJoin(mappings, ",",
[](std::string* out, const Mapping& mapping) {
absl::StrAppend(out, mapping.DebugString());
}),
"]");
};
return absl::StrCat("RemapPlan(input_specs=", format_array_specs(input_specs),
",output_specs=", format_array_specs(output_specs), ",",
"mappings=", format_mappings(*mappings), ")");
}
}
} | #include "xla/python/ifrt/remap_plan.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class RemapPlanTest : public test_util::DeviceTest {};
TEST_P(RemapPlanTest, ToFromProto) {
RemapPlan plan;
Shape shape({20, 20});
Shape shard_shape({5, 20});
tsl::RCReference<DeviceList> devices = GetDevices({0, 1, 2, 3});
std::shared_ptr<const Sharding> sharding =
ConcreteEvenSharding::Create(devices, MemoryKind(), shape,
shard_shape);
plan.input_specs.reserve(2);
plan.input_specs.push_back(ArraySpec{DType(DType::kF32),
shape, sharding});
plan.input_specs.push_back(ArraySpec{DType(DType::kF32),
shape, sharding});
plan.output_specs.reserve(2);
plan.output_specs.push_back(ArraySpec{
DType(DType::kF32), shape, sharding});
plan.output_specs.push_back(ArraySpec{
DType(DType::kF32), shape, sharding});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->reserve(2);
plan.mappings->push_back(RemapPlan::Mapping{
0, 1,
{RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{2, 4, 1}},
{RemapPlan::Interval{1, 4, 2}, RemapPlan::Interval{0, 4, 2}}});
plan.mappings->push_back(RemapPlan::Mapping{
1, 0,
{RemapPlan::Interval{0, 4, 2}, RemapPlan::Interval{1, 4, 2}},
{RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{2, 4, 1}}});
TF_ASSERT_OK_AND_ASSIGN(RemapPlanProto plan_proto, plan.ToProto());
TF_ASSERT_OK_AND_ASSIGN(
RemapPlan plan_copy,
RemapPlan::FromProto(absl::bind_front(&Client::LookupDevice, client()),
plan_proto));
EXPECT_THAT(*plan_copy.mappings, ElementsAreArray(*plan.mappings));
EXPECT_THAT(plan_copy.output_specs, SizeIs(2));
for (const auto& spec : plan_copy.input_specs) {
EXPECT_EQ(spec.dtype, DType(DType::kF32));
EXPECT_EQ(spec.shape, shape);
const auto* sharding_copy =
llvm::dyn_cast<ConcreteEvenSharding>(spec.sharding.get());
ASSERT_NE(sharding_copy, nullptr);
EXPECT_EQ(*sharding_copy->devices(), *devices);
EXPECT_EQ(sharding_copy->shape(), shape);
EXPECT_EQ(sharding_copy->shard_shape(), shard_shape);
}
for (const auto& spec : plan_copy.output_specs) {
EXPECT_EQ(spec.dtype, DType(DType::kF32));
EXPECT_EQ(spec.shape, shape);
const auto* sharding_copy =
llvm::dyn_cast<ConcreteEvenSharding>(spec.sharding.get());
ASSERT_NE(sharding_copy, nullptr);
EXPECT_EQ(*sharding_copy->devices(), *devices);
EXPECT_EQ(sharding_copy->shape(), shape);
EXPECT_EQ(sharding_copy->shard_shape(), shard_shape);
}
}
TEST_P(RemapPlanTest, InvalidInputDtype) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.input_specs.push_back(
ArraySpec{DType(DType::kF32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input must have the same dtype")));
}
TEST_P(RemapPlanTest, InvalidOutputDtype) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kF32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input and output must have the same dtype")));
}
TEST_P(RemapPlanTest, InvalidInputArrayIndex) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{1,
0,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].in_array must be in [0, 0], but is 1")));
}
TEST_P(RemapPlanTest, InvalidOutputArrayIndex) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
1,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].out_array must be in [0, 0], but is 1")));
}
TEST_P(RemapPlanTest, InvalidIntervalCount) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("mappings[0].from and mappings[0].to must have the same "
"number of intervals, but has 2 and 1 intervals")));
}
TEST_P(RemapPlanTest, InvalidShardIndex) {
auto run = [&](RemapPlan::Interval from, RemapPlan::Interval to) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{0, 0,
{from},
{to}});
return plan.Validate();
};
EXPECT_THAT(run(RemapPlan::Interval{-1, 1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{1, 1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is 1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{-1, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("start must be in [0, 0], but is 1")));
EXPECT_THAT(run(RemapPlan::Interval{0, -1, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 2, 1}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is 2")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, -1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is -1")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 2, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("end must be in [0, 1], but is 2")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 0}, RemapPlan::Interval{0, 1, 1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("step must be positive, but is 0")));
EXPECT_THAT(run(RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, -1}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("step must be positive, but is -1")));
}
TEST_P(RemapPlanTest, AlreadyUsedInputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 2, 1}}});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Input array 0 shard 0 is already used")));
}
TEST_P(RemapPlanTest, UnassignedOutputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
0,
{RemapPlan::Interval{0, 1, 1}},
{RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output array 0 shard 1 is unassigned")));
}
TEST_P(RemapPlanTest, AlreadyAssignedOutputShard) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({2, 3}),
ConcreteEvenSharding::Create(GetDevices({0}), MemoryKind(),
Shape({2, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(RemapPlan::Mapping{
0,
0,
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{1, 2, 1}},
{RemapPlan::Interval{0, 1, 1}, RemapPlan::Interval{0, 1, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Output array 0 shard 0 is already assigned")));
}
TEST_P(RemapPlanTest, InvalidOutputDevices) {
RemapPlan plan;
plan.input_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({0, 1}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.output_specs.push_back(
ArraySpec{DType(DType::kS32),
Shape({4, 3}),
ConcreteEvenSharding::Create(GetDevices({1, 0}), MemoryKind(),
Shape({4, 3}),
Shape({2, 3}))});
plan.mappings = std::make_shared<std::vector<RemapPlan::Mapping>>();
plan.mappings->push_back(
RemapPlan::Mapping{0,
0,
{RemapPlan::Interval{0, 2, 1}},
{RemapPlan::Interval{0, 2, 1}}});
EXPECT_THAT(
plan.Validate(),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"Output array 0 devices and sharding devices do not match")));
}
INSTANTIATE_TEST_SUITE_P(NumDevices, RemapPlanTest,
testing::Values(test_util::DeviceTestParam{
4,
4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/remap_plan.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/remap_plan_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
23ee1f36-b69d-4cd2-82cb-48f9dc42b1dc | cpp | tensorflow/tensorflow | numa | tensorflow/core/platform/numa.h | third_party/xla/third_party/tsl/tsl/platform/numa_test.cc | #ifndef TENSORFLOW_CORE_PLATFORM_NUMA_H_
#define TENSORFLOW_CORE_PLATFORM_NUMA_H_
#include "tensorflow/core/platform/platform.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/numa.h"
namespace tensorflow {
namespace port {
using tsl::port::kNUMANoAffinity;
using tsl::port::NUMAEnabled;
using tsl::port::NUMAFree;
using tsl::port::NUMAGetMemAffinity;
using tsl::port::NUMAGetThreadNodeAffinity;
using tsl::port::NUMAMalloc;
using tsl::port::NUMANumNodes;
using tsl::port::NUMASetThreadNodeAffinity;
}
}
#endif | #include "tsl/platform/numa.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace internal {
TEST(Numa, NumNodes) {
if (port::NUMAEnabled()) {
EXPECT_GE(port::NUMANumNodes(), 1);
}
}
TEST(Numa, Malloc) {
if (port::NUMAEnabled()) {
int num_nodes = port::NUMANumNodes();
for (int request_node = 0; request_node < num_nodes; ++request_node) {
void* ptr = port::NUMAMalloc(request_node, 8, 0);
EXPECT_NE(ptr, nullptr);
*(reinterpret_cast<int*>(ptr)) = 0;
int affinity_node = port::NUMAGetMemAffinity(ptr);
EXPECT_EQ(affinity_node, request_node);
port::NUMAFree(ptr, 8);
}
}
}
TEST(Numa, SetNodeAffinity) {
EXPECT_EQ(-1, port::NUMAGetThreadNodeAffinity());
if (port::NUMAEnabled()) {
int num_nodes = port::NUMANumNodes();
for (int request_node = 0; request_node < num_nodes; ++request_node) {
port::NUMASetThreadNodeAffinity(request_node);
int affinity_node = port::NUMAGetThreadNodeAffinity();
EXPECT_EQ(affinity_node, request_node);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/numa.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/numa_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
28edffeb-45c6-42b1-ad17-93c134919c8e | cpp | tensorflow/tensorflow | big_little_affinity | tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.h"
#include <algorithm>
#include <cstdint>
#include <map>
#include <set>
#include "include/cpuinfo.h"
namespace tflite {
namespace acceleration {
namespace {
bool IsInOrderArch(cpuinfo_uarch arch) {
switch (arch) {
case cpuinfo_uarch_cortex_a53:
case cpuinfo_uarch_cortex_a55r0:
case cpuinfo_uarch_cortex_a55:
case cpuinfo_uarch_cortex_a57:
return true;
default:
return false;
}
return false;
}
}
BigLittleAffinity GetAffinity() {
BigLittleAffinity affinity;
if (!cpuinfo_initialize()) {
return affinity;
}
std::map<uint32_t, uint64_t> cluster_to_max_frequency;
uint64_t smallest_max_frequency = UINT64_MAX;
uint64_t largest_max_frequency = 0;
uint64_t processors_count = cpuinfo_get_processors_count();
for (auto i = 0; i < processors_count; i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
if (processor->core->frequency > 0) {
cluster_to_max_frequency[processor->cluster->cluster_id] =
processor->core->frequency;
smallest_max_frequency =
std::min(smallest_max_frequency, processor->core->frequency);
largest_max_frequency =
std::max(largest_max_frequency, processor->core->frequency);
}
}
int count_of_processors_with_largest_max_frequency = 0;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
uint64_t max_frequency =
cluster_to_max_frequency[processor->cluster->cluster_id];
if (max_frequency == largest_max_frequency) {
++count_of_processors_with_largest_max_frequency;
}
}
std::set<cpuinfo_uarch> archs;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
uint64_t max_frequency =
cluster_to_max_frequency[processor->cluster->cluster_id];
bool is_little;
archs.insert(processor->core->uarch);
if (count_of_processors_with_largest_max_frequency ==
cpuinfo_get_processors_count()) {
is_little = IsInOrderArch(processor->core->uarch);
} else if (count_of_processors_with_largest_max_frequency == 2) {
is_little = (max_frequency != largest_max_frequency);
} else {
is_little = (max_frequency == smallest_max_frequency);
}
#ifdef __ANDROID__
if (is_little) {
affinity.little_core_affinity |= (0x1 << processor->linux_id);
} else {
affinity.big_core_affinity |= (0x1 << processor->linux_id);
}
#endif
}
if (cluster_to_max_frequency.size() == 1) {
affinity.big_core_affinity = affinity.little_core_affinity =
std::max(affinity.big_core_affinity, affinity.little_core_affinity);
} else if (count_of_processors_with_largest_max_frequency ==
cpuinfo_get_processors_count() &&
archs.size() == 1) {
affinity.big_core_affinity = affinity.little_core_affinity =
std::max(affinity.big_core_affinity, affinity.little_core_affinity);
}
return affinity;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.h"
#include <cstdint>
#include <map>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "include/cpuinfo.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
namespace tflite {
namespace acceleration {
namespace {
TEST(BigLittle, CheckBasics) {
ASSERT_TRUE(cpuinfo_initialize());
auto processors_count = cpuinfo_get_processors_count();
ASSERT_GT(processors_count, 0);
#if defined(__ANDROID__)
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
if (android_info.is_emulator) {
std::cout << "Running on emulator\n";
return;
} else {
std::cout << "Running on hardware\n";
}
ASSERT_TRUE(status.ok());
std::map<uint32_t, uint64_t> cluster_to_max_frequency;
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
if (processor->core->frequency > 0) {
cluster_to_max_frequency[processor->cluster->cluster_id] =
processor->core->frequency;
}
}
EXPECT_GT(cluster_to_max_frequency.size(), 0);
EXPECT_LE(cluster_to_max_frequency.size(), 3);
for (auto i = 0; i < cpuinfo_get_processors_count(); i++) {
const struct cpuinfo_processor* processor = cpuinfo_get_processor(i);
EXPECT_TRUE(cluster_to_max_frequency.find(processor->cluster->cluster_id) !=
cluster_to_max_frequency.end());
}
BigLittleAffinity affinity = GetAffinity();
EXPECT_GT(affinity.little_core_affinity, 0);
EXPECT_GT(affinity.big_core_affinity, 0);
std::cout << "Little core affinity: " << std::hex
<< affinity.little_core_affinity << std::endl;
std::cout << "Big core affinity: " << std::hex << affinity.big_core_affinity
<< std::endl;
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/big_little_affinity_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ce41f9f8-1f4d-4324-9dbd-5d7e74c1ec3e | cpp | tensorflow/tensorflow | gpu_compilation_environment | third_party/xla/xla/service/gpu_compilation_environment.cc | third_party/xla/xla/service/gpu_compilation_environment_test.cc | #include "xla/service/gpu_compilation_environment.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "xla/parse_flags_from_env.h"
#include "xla/service/compilation_environments.h"
#include "xla/tsl/util/command_line_flags.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
void InitializeFlagsForGpuCompEnv(std::vector<tsl::Flag>* flag_list,
GpuCompilationEnvironment* gpu_comp_env) {
auto int64_setter_for =
[gpu_comp_env](
void (GpuCompilationEnvironment::*member_setter)(int64_t)) {
return [gpu_comp_env, member_setter](int64_t value) {
(gpu_comp_env->*member_setter)(value);
return true;
};
};
flag_list->push_back(tsl::Flag(
"dummy_flag",
int64_setter_for(&GpuCompilationEnvironment::set_dummy_flag),
gpu_comp_env->dummy_flag(), "Dummy flag to demonstrate the flow"));
}
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromFlagStrings(
std::vector<std::string>& flags, bool strict) {
GpuCompilationEnvironment gpu_comp_env;
std::vector<tsl::Flag> flag_objects;
InitializeFlagsForGpuCompEnv(&flag_objects, &gpu_comp_env);
bool result = tsl::Flags::Parse(flags, flag_objects);
if (!result || (strict && !flags.empty())) {
return InvalidArgument("Could not parse flags: %s",
absl::StrJoin(flags, ", "));
}
return gpu_comp_env;
}
absl::StatusOr<GpuCompilationEnvironment> CreateGpuCompEnvFromEnvVar() {
GpuCompilationEnvironment env;
std::vector<tsl::Flag> flag_objects;
InitializeFlagsForGpuCompEnv(&flag_objects, &env);
ParseFlagsFromEnvAndIgnoreUnknown("XLA_FLAGS", flag_objects);
return env;
}
GpuCompilationEnvironment CreateGpuCompEnvWithDefaultValues() {
GpuCompilationEnvironment env;
env.set_dummy_flag(1);
return env;
}
absl::Status InitializeMissingFieldsFromXLAFlags(
GpuCompilationEnvironment& env) {
TF_ASSIGN_OR_RETURN(GpuCompilationEnvironment from_env,
CreateGpuCompEnvFromEnvVar());
auto default_env = CreateGpuCompEnvWithDefaultValues();
auto reflection = env.GetReflection();
auto reflection_from_env = from_env.GetReflection();
auto descriptor = GpuCompilationEnvironment::descriptor();
std::vector<const tsl::protobuf::FieldDescriptor*> missing_fields;
for (int j = 0; j < descriptor->field_count(); ++j) {
const tsl::protobuf::FieldDescriptor* field = descriptor->field(j);
if (reflection->HasField(env, field) &&
reflection_from_env->HasField(from_env, field)) {
return InvalidArgument(
"Flag %s is set in both XLA_FLAGS env var and "
"GpuCompilationEnvironment.",
field->name());
} else if (!reflection->HasField(env, field) &&
!reflection_from_env->HasField(from_env, field)) {
missing_fields.push_back(field);
}
}
env.MergeFrom(from_env);
if (!missing_fields.empty()) {
reflection->SwapFields(&env, &default_env, missing_fields);
}
return absl::OkStatus();
}
namespace {
absl::StatusOr<std::unique_ptr<tsl::protobuf::Message>>
ProcessNewGpuCompilationEnvironment(
std::unique_ptr<tsl::protobuf::Message> env) {
if (!env) {
env = std::make_unique<GpuCompilationEnvironment>();
}
return env;
}
}
}
static bool InitModule() {
xla::CompilationEnvironments::RegisterProcessNewEnvFn(
xla::GpuCompilationEnvironment::descriptor(),
xla::ProcessNewGpuCompilationEnvironment);
return true;
}
static bool module_initialized = InitModule(); | #include "xla/service/gpu_compilation_environment.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/parse_flags_from_env.h"
#include "xla/service/compilation_environments.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::StatusIs;
void set_xla_flags_env_var(const std::string& xla_flags) {
int* pargc;
std::vector<char*>* pargv;
ResetFlagsFromEnvForTesting("XLA_FLAGS", &pargc, &pargv);
tsl::setenv("XLA_FLAGS", xla_flags.c_str(), true );
}
TEST(CreateGpuCompEnvFromFlagStringsTest, ValidFlags) {
std::vector<std::string> flags = {"--dummy_flag=2"};
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, true));
ASSERT_EQ(gpu_comp_env.dummy_flag(), 2);
ASSERT_TRUE(flags.empty());
}
TEST(CreateGpuCompEnvFromFlagStringsTest, EmptyFlags) {
std::vector<std::string> flags;
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, true));
}
TEST(CreateGpuCompEnvFromFlagStringsTest, InvalidFlagName) {
std::vector<std::string> flags = {"--xla_gpu_invalid_flag=2"};
EXPECT_THAT(CreateGpuCompEnvFromFlagStrings(flags, true),
StatusIs(tsl::error::INVALID_ARGUMENT));
TF_ASSERT_OK_AND_ASSIGN(
GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromFlagStrings(flags, false));
ASSERT_EQ(flags.size(), 1);
}
TEST(CreateGpuCompEnvFromEnvVarTest, ValidFlags) {
set_xla_flags_env_var("--dummy_flag=4");
TF_ASSERT_OK_AND_ASSIGN(GpuCompilationEnvironment gpu_comp_env,
CreateGpuCompEnvFromEnvVar());
ASSERT_EQ(gpu_comp_env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, BothProtoAndEnvVarUnset) {
set_xla_flags_env_var("");
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 1);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoSetButEnvVarUnset) {
set_xla_flags_env_var("");
GpuCompilationEnvironment env;
env.set_dummy_flag(2);
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 2);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest, ProtoUnsetButEnvVarSet) {
set_xla_flags_env_var("--dummy_flag=4");
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest,
BothProtoAndEnvVarSetButNoConflict) {
set_xla_flags_env_var("--dummy_flag=4");
CompilationEnvironments envs;
GpuCompilationEnvironment env;
TF_ASSERT_OK(InitializeMissingFieldsFromXLAFlags(env));
EXPECT_EQ(env.dummy_flag(), 4);
}
TEST(InitializeMissingFieldsFromXLAFlagsTest,
BothProtoAndEnvVarSetWithConflict) {
set_xla_flags_env_var("--dummy_flag=4");
CompilationEnvironments envs;
GpuCompilationEnvironment env;
env.set_dummy_flag(2);
EXPECT_THAT(InitializeMissingFieldsFromXLAFlags(env),
StatusIs(tsl::error::INVALID_ARGUMENT));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu_compilation_environment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu_compilation_environment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8be0f844-9f6f-4412-9f03-1e5bca387c25 | cpp | tensorflow/tensorflow | if | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/if.cc | tensorflow/lite/kernels/if_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/if.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
class LegalizeIfOp : public OpConversionPattern<mhlo::IfOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::IfOp if_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
auto new_op = rewriter.create<TFL::IfOp>(
if_op.getLoc(), if_op.getResultTypes(), if_op.getPred());
new_op.getThenRegion().takeBody(if_op.getTrueBranch());
new_op.getElseRegion().takeBody(if_op.getFalseBranch());
ReplaceTerminatorWithYield(new_op.getThenRegion(), rewriter);
ReplaceTerminatorWithYield(new_op.getElseRegion(), rewriter);
rewriter.replaceOp(if_op, new_op.getResults());
return success();
}
};
}
void PopulateIfPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeIfOp>(ctx);
target.addIllegalOp<mhlo::IfOp>();
}
} | #include <stdint.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
namespace tflite {
using subgraph_test_util::CheckIntTensor;
using subgraph_test_util::CheckScalarStringTensor;
using subgraph_test_util::CheckStringTensor;
using subgraph_test_util::ControlFlowOpTest;
using subgraph_test_util::FillIntTensor;
using subgraph_test_util::FillScalarStringTensor;
namespace {
class SimpleIfTest : public ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildMulSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
}
};
TEST_F(SimpleIfTest, TestIfTrue) {
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {6, 9});
}
TEST_F(SimpleIfTest, TestIfFalse) {
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output, {1, 2}, {5, 14});
}
TEST_F(SimpleIfTest, TestIfTrueWithLargeInputsTwice) {
const size_t kNumLargeTensors = 100000;
interpreter_->ResizeInputTensor(interpreter_->inputs()[1],
{kNumLargeTensors});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
const std::vector<int> input_vector(kNumLargeTensors, 1);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), input_vector);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {9});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected(kNumLargeTensors, 10);
CheckIntTensor(output, {kNumLargeTensors}, expected);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {19});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected2(kNumLargeTensors, 20);
CheckIntTensor(output, {kNumLargeTensors}, expected2);
}
TEST_F(SimpleIfTest, TestIfFalseWithLargeInputsTwice) {
const size_t kNumLargeTensors = 100000;
interpreter_->ResizeInputTensor(interpreter_->inputs()[1],
{kNumLargeTensors});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
const std::vector<int> input_vector(kNumLargeTensors, 1);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), input_vector);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected(kNumLargeTensors, 0);
CheckIntTensor(output, {kNumLargeTensors}, expected);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {7});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
output = interpreter_->tensor(interpreter_->outputs()[0]);
const std::vector<int> expected2(kNumLargeTensors, 7);
CheckIntTensor(output, {kNumLargeTensors}, expected2);
}
class DynamicSubgraphIfTest : public ControlFlowOpTest {
protected:
void SetUp() override {
AddSubgraphs(2);
builder_->BuildAddSubgraph(interpreter_->subgraph(1));
builder_->BuildPadSubgraph(interpreter_->subgraph(2));
builder_->BuildIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {2});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1, 2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {5, 7});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2});
}
};
TEST_F(DynamicSubgraphIfTest, TestIfTrue) {
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
EXPECT_TRUE(IsDynamicTensor(output));
CheckIntTensor(output, {1, 2}, {6, 9});
}
TEST_F(DynamicSubgraphIfTest, TestIfFalse) {
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output = interpreter_->tensor(interpreter_->outputs()[0]);
EXPECT_TRUE(IsDynamicTensor(output));
CheckIntTensor(output, {5}, {0, 5, 7, 0, 0});
}
class IfTest : public ControlFlowOpTest {};
TEST_F(IfTest, TestWithXNNPACK) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(1));
builder_->BuildXNNPACKSubgraph(interpreter_->subgraph(2));
builder_->BuildFloatIfSubgraph(&interpreter_->primary_subgraph(), 3);
const auto opt = TfLiteXNNPackDelegateOptionsDefault();
TfLiteDelegate* xnnpack_delegate = TfLiteXNNPackDelegateCreate(&opt);
interpreter_->primary_subgraph().MarkAsDelegationSkippable();
interpreter_->subgraph(1)->MarkAsDelegationSkippable();
ASSERT_EQ(interpreter_->ModifyGraphWithDelegate(xnnpack_delegate), kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
float* input0 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[1]));
input0[0] = 1;
float* input1 =
GetTensorData<float>(interpreter_->tensor(interpreter_->inputs()[2]));
input1[0] = 1;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
float* output0_data = GetTensorData<float>(output0);
ASSERT_EQ(output0_data[0], 4);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
float* output1_data = GetTensorData<float>(output1);
ASSERT_EQ(output1_data[0], 4);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteXNNPackDelegateDelete(xnnpack_delegate);
}
TEST_F(IfTest, TestInputIsOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 4);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {2});
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
CheckIntTensor(output0, {1}, {2});
CheckIntTensor(output1, {1}, {2});
}
TEST_F(IfTest, TestInputIsOutputButDifferent) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildInputIsDifferentOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildInputIsDifferentOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestFlexOutput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildFlexOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildFlexOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestCounterOnly) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildCounterOnlySubgraph(interpreter_->subgraph(1));
builder_->BuildCounterOnlySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 2);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestAllCases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildAllInplaceScenariosSubgraph(interpreter_->subgraph(1));
builder_->BuildAllInplaceScenariosSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 6);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[5], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[5]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {3});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {2}, {2, 2});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
CheckIntTensor(output3, {2}, {3, 3});
TfLiteTensor* output4 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output4, {1}, {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestStaticUnconsumedOutputs) {
for (bool dynamic_tensors : {true, false}) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(1));
builder_->BuildInputIsOutputSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraphWithUnconsumedOutput(
&interpreter_->primary_subgraph(), 4);
InterpreterOptions options;
if (dynamic_tensors) {
options.OptimizeMemoryForLargeTensors(1);
interpreter_->ApplyOptions(&options);
}
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {4});
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {2}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
CheckIntTensor(output1, {2}, {4, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
TEST_F(IfTest, TestDynamicOpTriggersAllocationOfUnsedInput) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
interpreter_->subgraph(1));
builder_->BuildDynamicOpTriggersAllocationOfUnsedInputSubgraph(
interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 4);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {4, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {2}, {2, 2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestStaticInPlace) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDeepBodySubgraph(interpreter_->subgraph(1));
builder_->BuildDeepBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {1});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestStaticInPlaceLarge) {
int size = 10000;
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargeBodySubgraph(interpreter_->subgraph(1));
builder_->BuildLargeBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {size}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]),
std::vector<int>(size, 1));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {}, {10000});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {size}, std::vector<int>(size, 6));
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestTriangularNumberSequence) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(1));
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1}),
kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
EXPECT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestTriangularNumberSequenceWithShallowCopy) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(1));
builder_->BuildAccumulateLoopBodySubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1000000});
InterpreterOptions options;
options.OptimizeMemoryForLargeTensors(1000000);
ASSERT_EQ(interpreter_->ApplyOptions(&options), kTfLiteOk);
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
const std::vector<int> input_vector(1000000, 1);
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
auto body_subgraph = interpreter_->subgraph(2);
TfLiteTensor* subgraph_input2 =
body_subgraph->tensor(body_subgraph->inputs()[1]);
ASSERT_EQ(subgraph_input2->allocation_type, kTfLiteCustom);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
const std::vector<int> expected2(1000000, 3);
CheckIntTensor(output2, {1000000}, expected2);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestPadLoop) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(1), {1, 2});
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {5, 7});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output2, {5}, {0, 5, 7, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestDynamicBodyWithSharingEarlyExit) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(1));
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 5);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {10000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {3}, {2, 3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestDynamicBodyWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(1));
builder_->BuildDynamicIncreasingSizeSubgraph(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 5);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {3});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1000000});
interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1, 2, 3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {2});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {3}, {2, 3, 4});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
EXPECT_EQ(output2->dims->data[0], 1000000);
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[3]);
EXPECT_EQ(output3->dims->data[0], 1000000);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestDynamicBodyWithSharingAndAliases) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildDynamicBodySubgraphWithAliases(interpreter_->subgraph(1));
builder_->BuildDynamicBodySubgraphWithAliases(interpreter_->subgraph(2));
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 6);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[4], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {0});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[4]), {3});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[5]), {4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {1});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {1}, {11});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {1}, {12});
TfLiteTensor* output3 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output3, {1}, {13});
TfLiteTensor* output4 = interpreter_->tensor(interpreter_->outputs()[4]);
CheckIntTensor(output4, {1}, {13});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestOutputNotConsumed) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildOutputNotConsumedSubgraph(*interpreter_->subgraph(1));
builder_->BuildOutputNotConsumedSubgraph(*interpreter_->subgraph(2));
builder_->BuildOutputNotConsumedIfSubgraph(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = true;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestPadLoopWithSharing) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildLargePadSubgraph(interpreter_->subgraph(1), {1, 2});
builder_->BuildLargePadSubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 4);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {2});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), {2});
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {3, 4});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output0 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output0, {1}, {3});
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[1]);
CheckIntTensor(output1, {2}, {5, 6});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(output2, {5}, {0, 5, 6, 0, 0});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestPadLoopWithShallowCopy) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(1), {1, 2});
builder_->BuildPadLoopBodySubgraph(interpreter_->subgraph(2), {1, 2});
builder_->BuildMultiInputIfSubgraph(&interpreter_->primary_subgraph(), 3);
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {1000000});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[1]), {1});
std::vector<int> input_vector(1000000, 0);
input_vector[0] = 5;
input_vector[1] = 7;
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[2]), input_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* output1 = interpreter_->tensor(interpreter_->outputs()[0]);
CheckIntTensor(output1, {1}, {2});
TfLiteTensor* output2 = interpreter_->tensor(interpreter_->outputs()[1]);
std::vector<int> output_vector(1000003, 0);
output_vector[1] = 5;
output_vector[2] = 7;
CheckIntTensor(output2, {1000003}, output_vector);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
TEST_F(IfTest, TestIfLoopWithDynamicTensor) {
interpreter_ = std::make_unique<Interpreter>();
AddSubgraphs(2);
builder_->BuildBodySubgraphWithDynamicTensor(interpreter_->subgraph(1));
builder_->BuildBodySubgraphWithDynamicTensor(interpreter_->subgraph(2));
builder_->BuildIfSubgraphWithDynamicTensor(&interpreter_->primary_subgraph());
interpreter_->ResizeInputTensor(interpreter_->inputs()[0], {1});
interpreter_->ResizeInputTensor(interpreter_->inputs()[1], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[2], {});
interpreter_->ResizeInputTensor(interpreter_->inputs()[3], {1});
ASSERT_EQ(interpreter_->AllocateTensors(), kTfLiteOk);
interpreter_->typed_input_tensor<bool>(0)[0] = false;
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[1]), "A");
FillScalarStringTensor(interpreter_->tensor(interpreter_->inputs()[2]), "A");
FillIntTensor(interpreter_->tensor(interpreter_->inputs()[3]), {1});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
TfLiteTensor* string_output1 =
interpreter_->tensor(interpreter_->outputs()[0]);
CheckScalarStringTensor(string_output1, "A");
TfLiteTensor* string_output2 =
interpreter_->tensor(interpreter_->outputs()[1]);
CheckStringTensor(string_output2, {2}, {"A", "A"});
TfLiteTensor* integer_output =
interpreter_->tensor(interpreter_->outputs()[2]);
CheckIntTensor(integer_output, {1}, {2});
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
ASSERT_EQ(interpreter_->Invoke(), kTfLiteOk);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/if.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/if_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f8d5d5e-32e1-4d9c-88f8-05cfbfcd8c32 | cpp | google/quiche | headers_payload_decoder | quiche/http2/decoder/payload_decoders/headers_payload_decoder.cc | quiche/http2/decoder/payload_decoders/headers_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/headers_payload_decoder.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out,
HeadersPayloadDecoder::PayloadState v) {
switch (v) {
case HeadersPayloadDecoder::PayloadState::kReadPadLength:
return out << "kReadPadLength";
case HeadersPayloadDecoder::PayloadState::kStartDecodingPriorityFields:
return out << "kStartDecodingPriorityFields";
case HeadersPayloadDecoder::PayloadState::kResumeDecodingPriorityFields:
return out << "kResumeDecodingPriorityFields";
case HeadersPayloadDecoder::PayloadState::kReadPayload:
return out << "kReadPayload";
case HeadersPayloadDecoder::PayloadState::kSkipPadding:
return out << "kSkipPadding";
}
int unknown = static_cast<int>(v);
QUICHE_BUG(http2_bug_189_1)
<< "Invalid HeadersPayloadDecoder::PayloadState: " << unknown;
return out << "HeadersPayloadDecoder::PayloadState(" << unknown << ")";
}
DecodeStatus HeadersPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "HeadersPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::HEADERS, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(
0, frame_header.flags &
~(Http2FrameFlag::END_STREAM | Http2FrameFlag::END_HEADERS |
Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY));
const auto payload_flags = Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY;
if (!frame_header.HasAnyFlags(payload_flags)) {
QUICHE_DVLOG(2) << "StartDecodingPayload !IsPadded && !HasPriority";
if (db->Remaining() == total_length) {
QUICHE_DVLOG(2) << "StartDecodingPayload all present";
state->listener()->OnHeadersStart(frame_header);
if (total_length > 0) {
state->listener()->OnHpackFragment(db->cursor(), total_length);
db->AdvanceCursor(total_length);
}
state->listener()->OnHeadersEnd();
return DecodeStatus::kDecodeDone;
}
payload_state_ = PayloadState::kReadPayload;
} else if (frame_header.IsPadded()) {
payload_state_ = PayloadState::kReadPadLength;
} else {
QUICHE_DCHECK(frame_header.HasPriority()) << frame_header;
payload_state_ = PayloadState::kStartDecodingPriorityFields;
}
state->InitializeRemainders();
state->listener()->OnHeadersStart(frame_header);
return ResumeDecodingPayload(state, db);
}
DecodeStatus HeadersPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "HeadersPayloadDecoder::ResumeDecodingPayload "
<< "remaining_payload=" << state->remaining_payload()
<< "; db->Remaining=" << db->Remaining();
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::HEADERS, frame_header.type);
QUICHE_DCHECK_LE(state->remaining_payload_and_padding(),
frame_header.payload_length);
QUICHE_DCHECK_LE(db->Remaining(), state->remaining_payload_and_padding());
DecodeStatus status;
size_t avail;
while (true) {
QUICHE_DVLOG(2)
<< "HeadersPayloadDecoder::ResumeDecodingPayload payload_state_="
<< payload_state_;
switch (payload_state_) {
case PayloadState::kReadPadLength:
status = state->ReadPadLength(db, true);
if (status != DecodeStatus::kDecodeDone) {
return status;
}
if (!frame_header.HasPriority()) {
payload_state_ = PayloadState::kReadPayload;
continue;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kStartDecodingPriorityFields:
status = state->StartDecodingStructureInPayload(&priority_fields_, db);
if (status != DecodeStatus::kDecodeDone) {
payload_state_ = PayloadState::kResumeDecodingPriorityFields;
return status;
}
state->listener()->OnHeadersPriority(priority_fields_);
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kReadPayload:
avail = state->AvailablePayload(db);
if (avail > 0) {
state->listener()->OnHpackFragment(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() > 0) {
payload_state_ = PayloadState::kReadPayload;
return DecodeStatus::kDecodeInProgress;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kSkipPadding:
if (state->SkipPadding(db)) {
state->listener()->OnHeadersEnd();
return DecodeStatus::kDecodeDone;
}
payload_state_ = PayloadState::kSkipPadding;
return DecodeStatus::kDecodeInProgress;
case PayloadState::kResumeDecodingPriorityFields:
status = state->ResumeDecodingStructureInPayload(&priority_fields_, db);
if (status != DecodeStatus::kDecodeDone) {
return status;
}
state->listener()->OnHeadersPriority(priority_fields_);
payload_state_ = PayloadState::kReadPayload;
continue;
}
QUICHE_BUG(http2_bug_189_2) << "PayloadState: " << payload_state_;
}
}
} | #include "quiche/http2/decoder/payload_decoders/headers_payload_decoder.h"
#include <stddef.h>
#include <string>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class HeadersPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::HEADERS;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() {
return Http2FrameFlag::PADDED | Http2FrameFlag::PRIORITY;
}
};
namespace {
struct Listener : public FramePartsCollector {
void OnHeadersStart(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnHeadersStart: " << header;
StartFrame(header)->OnHeadersStart(header);
}
void OnHeadersPriority(const Http2PriorityFields& priority) override {
QUICHE_VLOG(1) << "OnHeadersPriority: " << priority;
CurrentFrame()->OnHeadersPriority(priority);
}
void OnHpackFragment(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnHpackFragment: len=" << len;
CurrentFrame()->OnHpackFragment(data, len);
}
void OnHeadersEnd() override {
QUICHE_VLOG(1) << "OnHeadersEnd";
EndFrame()->OnHeadersEnd();
}
void OnPadLength(size_t pad_length) override {
QUICHE_VLOG(1) << "OnPadLength: " << pad_length;
CurrentFrame()->OnPadLength(pad_length);
}
void OnPadding(const char* padding, size_t skipped_length) override {
QUICHE_VLOG(1) << "OnPadding: " << skipped_length;
CurrentFrame()->OnPadding(padding, skipped_length);
}
void OnPaddingTooLong(const Http2FrameHeader& header,
size_t missing_length) override {
QUICHE_VLOG(1) << "OnPaddingTooLong: " << header
<< "; missing_length: " << missing_length;
FrameError(header)->OnPaddingTooLong(header, missing_length);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class HeadersPayloadDecoderTest
: public AbstractPaddablePayloadDecoderTest<
HeadersPayloadDecoder, HeadersPayloadDecoderPeer, Listener> {};
INSTANTIATE_TEST_SUITE_P(VariousPadLengths, HeadersPayloadDecoderTest,
::testing::Values(0, 1, 2, 3, 4, 254, 255, 256));
TEST_P(HeadersPayloadDecoderTest, VariousHpackPayloadSizes) {
for (size_t hpack_size : {0, 1, 2, 3, 255, 256, 1024}) {
QUICHE_LOG(INFO) << "########### hpack_size = " << hpack_size
<< " ###########";
Http2PriorityFields priority(RandStreamId(), 1 + Random().Rand8(),
Random().OneIn(2));
for (bool has_priority : {false, true}) {
Reset();
ASSERT_EQ(IsPadded() ? 1u : 0u, frame_builder_.size());
uint8_t flags = RandFlags();
if (has_priority) {
flags |= Http2FrameFlag::PRIORITY;
frame_builder_.Append(priority);
}
std::string hpack_payload = Random().RandString(hpack_size);
frame_builder_.Append(hpack_payload);
MaybeAppendTrailingPadding();
Http2FrameHeader frame_header(frame_builder_.size(),
Http2FrameType::HEADERS, flags,
RandStreamId());
set_frame_header(frame_header);
ScrubFlagsOfHeader(&frame_header);
FrameParts expected(frame_header, hpack_payload, total_pad_length_);
if (has_priority) {
expected.SetOptPriority(priority);
}
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(frame_builder_.buffer(),
expected));
}
}
}
TEST_P(HeadersPayloadDecoderTest, Truncated) {
auto approve_size = [](size_t size) {
return size != Http2PriorityFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(Http2PriorityFields(RandStreamId(), 1 + Random().Rand8(),
Random().OneIn(2)));
EXPECT_TRUE(VerifyDetectsMultipleFrameSizeErrors(
Http2FrameFlag::PRIORITY, fb.buffer(), approve_size, total_pad_length_));
}
TEST_P(HeadersPayloadDecoderTest, PaddingTooLong) {
EXPECT_TRUE(VerifyDetectsPaddingTooLong());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/headers_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/headers_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d19434c1-ac20-4e52-9b0a-725424f6da46 | cpp | google/cel-cpp | parsed_json_list_value | common/values/parsed_json_list_value.cc | common/values/parsed_json_list_value_test.cc | #include "common/values/parsed_json_list_value.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/parsed_json_value.h"
#include "internal/json.h"
#include "internal/number.h"
#include "internal/status_macros.h"
#include "internal/well_known_types.h"
#include "google/protobuf/message.h"
namespace cel {
namespace common_internal {
absl::Status CheckWellKnownListValueMessage(const google::protobuf::Message& message) {
return internal::CheckJsonList(message);
}
}
std::string ParsedJsonListValue::DebugString() const {
if (value_ == nullptr) {
return "[]";
}
return internal::JsonListDebugString(*value_);
}
absl::Status ParsedJsonListValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
if (value_ == nullptr) {
value.Clear();
return absl::OkStatus();
}
if (!value_->SerializePartialToCord(&value)) {
return absl::UnknownError("failed to serialize protocol buffer message");
}
return absl::OkStatus();
}
absl::StatusOr<Json> ParsedJsonListValue::ConvertToJson(
AnyToJsonConverter& converter) const {
if (value_ == nullptr) {
return JsonArray();
}
return internal::ProtoJsonListToNativeJsonList(*value_);
}
absl::Status ParsedJsonListValue::Equal(ValueManager& value_manager,
const Value& other,
Value& result) const {
if (auto other_value = other.AsParsedJsonList(); other_value) {
result = BoolValue(*this == *other_value);
return absl::OkStatus();
}
if (auto other_value = other.AsList(); other_value) {
return common_internal::ListValueEqual(value_manager, ListValue(*this),
*other_value, result);
}
result = BoolValue(false);
return absl::OkStatus();
}
absl::StatusOr<Value> ParsedJsonListValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
size_t ParsedJsonListValue::Size() const {
if (value_ == nullptr) {
return 0;
}
return static_cast<size_t>(
well_known_types::GetListValueReflectionOrDie(value_->GetDescriptor())
.ValuesSize(*value_));
}
absl::Status ParsedJsonListValue::Get(ValueManager& value_manager, size_t index,
Value& result) const {
if (value_ == nullptr) {
return absl::InvalidArgumentError("index out of bounds");
}
const auto reflection =
well_known_types::GetListValueReflectionOrDie(value_->GetDescriptor());
if (ABSL_PREDICT_FALSE(index >=
static_cast<size_t>(reflection.ValuesSize(*value_)))) {
return absl::InvalidArgumentError("index out of bounds");
}
result = common_internal::ParsedJsonValue(
value_manager.GetMemoryManager().arena(),
Borrowed(value_, &reflection.Values(*value_, static_cast<int>(index))));
return absl::OkStatus();
}
absl::StatusOr<Value> ParsedJsonListValue::Get(ValueManager& value_manager,
size_t index) const {
Value result;
CEL_RETURN_IF_ERROR(Get(value_manager, index, result));
return result;
}
absl::Status ParsedJsonListValue::ForEach(ValueManager& value_manager,
ForEachCallback callback) const {
return ForEach(value_manager,
[callback = std::move(callback)](size_t, const Value& value)
-> absl::StatusOr<bool> { return callback(value); });
}
absl::Status ParsedJsonListValue::ForEach(
ValueManager& value_manager, ForEachWithIndexCallback callback) const {
if (value_ == nullptr) {
return absl::OkStatus();
}
Value scratch;
const auto reflection =
well_known_types::GetListValueReflectionOrDie(value_->GetDescriptor());
const int size = reflection.ValuesSize(*value_);
for (int i = 0; i < size; ++i) {
scratch = common_internal::ParsedJsonValue(
value_manager.GetMemoryManager().arena(),
Borrowed(value_, &reflection.Values(*value_, i)));
CEL_ASSIGN_OR_RETURN(auto ok, callback(static_cast<size_t>(i), scratch));
if (!ok) {
break;
}
}
return absl::OkStatus();
}
namespace {
class ParsedJsonListValueIterator final : public ValueIterator {
public:
explicit ParsedJsonListValueIterator(Owned<const google::protobuf::Message> message)
: message_(std::move(message)),
reflection_(well_known_types::GetListValueReflectionOrDie(
message_->GetDescriptor())),
size_(reflection_.ValuesSize(*message_)) {}
bool HasNext() override { return index_ < size_; }
absl::Status Next(ValueManager& value_manager, Value& result) override {
if (ABSL_PREDICT_FALSE(index_ >= size_)) {
return absl::FailedPreconditionError(
"`ValueIterator::Next` called after `ValueIterator::HasNext` "
"returned false");
}
result = common_internal::ParsedJsonValue(
value_manager.GetMemoryManager().arena(),
Borrowed(message_, &reflection_.Values(*message_, index_)));
++index_;
return absl::OkStatus();
}
private:
const Owned<const google::protobuf::Message> message_;
const well_known_types::ListValueReflection reflection_;
const int size_;
int index_ = 0;
};
}
absl::StatusOr<absl::Nonnull<std::unique_ptr<ValueIterator>>>
ParsedJsonListValue::NewIterator(ValueManager& value_manager) const {
if (value_ == nullptr) {
return NewEmptyValueIterator();
}
return std::make_unique<ParsedJsonListValueIterator>(value_);
}
namespace {
absl::optional<internal::Number> AsNumber(const Value& value) {
if (auto int_value = value.AsInt(); int_value) {
return internal::Number::FromInt64(*int_value);
}
if (auto uint_value = value.AsUint(); uint_value) {
return internal::Number::FromUint64(*uint_value);
}
if (auto double_value = value.AsDouble(); double_value) {
return internal::Number::FromDouble(*double_value);
}
return absl::nullopt;
}
}
absl::Status ParsedJsonListValue::Contains(ValueManager& value_manager,
const Value& other,
Value& result) const {
if (value_ == nullptr) {
result = BoolValue(false);
return absl::OkStatus();
}
if (ABSL_PREDICT_FALSE(other.IsError() || other.IsUnknown())) {
result = other;
return absl::OkStatus();
}
const auto reflection =
well_known_types::GetListValueReflectionOrDie(value_->GetDescriptor());
if (reflection.ValuesSize(*value_) > 0) {
const auto value_reflection = well_known_types::GetValueReflectionOrDie(
reflection.GetValueDescriptor());
if (other.IsNull()) {
for (const auto& element : reflection.Values(*value_)) {
const auto element_kind_case = value_reflection.GetKindCase(element);
if (element_kind_case == google::protobuf::Value::KIND_NOT_SET ||
element_kind_case == google::protobuf::Value::kNullValue) {
result = BoolValue(true);
return absl::OkStatus();
}
}
} else if (const auto other_value = other.AsBool(); other_value) {
for (const auto& element : reflection.Values(*value_)) {
if (value_reflection.GetKindCase(element) ==
google::protobuf::Value::kBoolValue &&
value_reflection.GetBoolValue(element) == *other_value) {
result = BoolValue(true);
return absl::OkStatus();
}
}
} else if (const auto other_value = AsNumber(other); other_value) {
for (const auto& element : reflection.Values(*value_)) {
if (value_reflection.GetKindCase(element) ==
google::protobuf::Value::kNumberValue &&
internal::Number::FromDouble(
value_reflection.GetNumberValue(element)) == *other_value) {
result = BoolValue(true);
return absl::OkStatus();
}
}
} else if (const auto other_value = other.AsString(); other_value) {
std::string scratch;
for (const auto& element : reflection.Values(*value_)) {
if (value_reflection.GetKindCase(element) ==
google::protobuf::Value::kStringValue &&
absl::visit(
[&](const auto& alternative) -> bool {
return *other_value == alternative;
},
well_known_types::AsVariant(
value_reflection.GetStringValue(element, scratch)))) {
result = BoolValue(true);
return absl::OkStatus();
}
}
} else if (const auto other_value = other.AsList(); other_value) {
for (const auto& element : reflection.Values(*value_)) {
if (value_reflection.GetKindCase(element) ==
google::protobuf::Value::kListValue) {
CEL_RETURN_IF_ERROR(other_value->Equal(
value_manager,
ParsedJsonListValue(Owned(
Owner(value_), &value_reflection.GetListValue(element))),
result));
if (result.IsTrue()) {
return absl::OkStatus();
}
}
}
} else if (const auto other_value = other.AsMap(); other_value) {
for (const auto& element : reflection.Values(*value_)) {
if (value_reflection.GetKindCase(element) ==
google::protobuf::Value::kStructValue) {
CEL_RETURN_IF_ERROR(other_value->Equal(
value_manager,
ParsedJsonMapValue(Owned(
Owner(value_), &value_reflection.GetStructValue(element))),
result));
if (result.IsTrue()) {
return absl::OkStatus();
}
}
}
}
}
result = BoolValue(false);
return absl::OkStatus();
}
absl::StatusOr<Value> ParsedJsonListValue::Contains(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Contains(value_manager, other, result));
return result;
}
bool operator==(const ParsedJsonListValue& lhs,
const ParsedJsonListValue& rhs) {
if (cel::to_address(lhs.value_) == cel::to_address(rhs.value_)) {
return true;
}
if (cel::to_address(lhs.value_) == nullptr) {
return rhs.IsEmpty();
}
if (cel::to_address(rhs.value_) == nullptr) {
return lhs.IsEmpty();
}
return internal::JsonListEquals(*lhs.value_, *rhs.value_);
}
} | #include <cstddef>
#include <vector>
#include "google/protobuf/struct.pb.h"
#include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/json.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "common/value_manager.h"
#include "common/value_testing.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::cel::test::BoolValueIs;
using ::cel::test::IsNullValue;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
using ::testing::PrintToStringParamName;
using ::testing::TestWithParam;
using ::testing::VariantWith;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
class ParsedJsonListValueTest : public TestWithParam<AllocatorKind> {
public:
void SetUp() override {
switch (GetParam()) {
case AllocatorKind::kArena:
arena_.emplace();
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::Pooling(arena()),
NewThreadCompatibleTypeReflector(MemoryManager::Pooling(arena())));
break;
case AllocatorKind::kNewDelete:
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::ReferenceCounting(),
NewThreadCompatibleTypeReflector(
MemoryManager::ReferenceCounting()));
break;
}
}
void TearDown() override {
value_manager_.reset();
arena_.reset();
}
Allocator<> allocator() {
return arena_ ? ArenaAllocator(&*arena_) : NewDeleteAllocator();
}
absl::Nullable<google::protobuf::Arena*> arena() { return allocator().arena(); }
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
ValueManager& value_manager() { return **value_manager_; }
template <typename T>
auto GeneratedParseTextProto(absl::string_view text) {
return ::cel::internal::GeneratedParseTextProto<T>(
allocator(), text, descriptor_pool(), message_factory());
}
template <typename T>
auto DynamicParseTextProto(absl::string_view text) {
return ::cel::internal::DynamicParseTextProto<T>(
allocator(), text, descriptor_pool(), message_factory());
}
private:
absl::optional<google::protobuf::Arena> arena_;
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ParsedJsonListValueTest, Kind) {
EXPECT_EQ(ParsedJsonListValue::kind(), ParsedJsonListValue::kKind);
EXPECT_EQ(ParsedJsonListValue::kind(), ValueKind::kList);
}
TEST_P(ParsedJsonListValueTest, GetTypeName) {
EXPECT_EQ(ParsedJsonListValue::GetTypeName(), ParsedJsonListValue::kName);
EXPECT_EQ(ParsedJsonListValue::GetTypeName(), "google.protobuf.ListValue");
}
TEST_P(ParsedJsonListValueTest, GetRuntimeType) {
EXPECT_EQ(ParsedJsonListValue::GetRuntimeType(), JsonListType());
}
TEST_P(ParsedJsonListValueTest, DebugString_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
EXPECT_EQ(valid_value.DebugString(), "[]");
}
TEST_P(ParsedJsonListValueTest, IsZeroValue_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
EXPECT_TRUE(valid_value.IsZeroValue());
}
TEST_P(ParsedJsonListValueTest, SerializeTo_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
absl::Cord serialized;
EXPECT_THAT(valid_value.SerializeTo(value_manager(), serialized), IsOk());
EXPECT_THAT(serialized, IsEmpty());
}
TEST_P(ParsedJsonListValueTest, ConvertToJson_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
EXPECT_THAT(valid_value.ConvertToJson(value_manager()),
IsOkAndHolds(VariantWith<JsonArray>(JsonArray())));
}
TEST_P(ParsedJsonListValueTest, Equal_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
EXPECT_THAT(valid_value.Equal(value_manager(), BoolValue()),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(
valid_value.Equal(
value_manager(),
ParsedJsonListValue(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"))),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Equal(value_manager(), ListValue()),
IsOkAndHolds(BoolValueIs(true)));
}
TEST_P(ParsedJsonListValueTest, Empty_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
EXPECT_TRUE(valid_value.IsEmpty());
}
TEST_P(ParsedJsonListValueTest, Size_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(R"pb()pb"));
EXPECT_EQ(valid_value.Size(), 0);
}
TEST_P(ParsedJsonListValueTest, Get_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(
R"pb(values {}
values { bool_value: true })pb"));
EXPECT_THAT(valid_value.Get(value_manager(), 0), IsOkAndHolds(IsNullValue()));
EXPECT_THAT(valid_value.Get(value_manager(), 1),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Get(value_manager(), 2),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST_P(ParsedJsonListValueTest, ForEach_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(
R"pb(values {}
values { bool_value: true })pb"));
{
std::vector<Value> values;
EXPECT_THAT(
valid_value.ForEach(value_manager(),
[&](const Value& element) -> absl::StatusOr<bool> {
values.push_back(element);
return true;
}),
IsOk());
EXPECT_THAT(values, ElementsAre(IsNullValue(), BoolValueIs(true)));
}
{
std::vector<Value> values;
EXPECT_THAT(valid_value.ForEach(
value_manager(),
[&](size_t, const Value& element) -> absl::StatusOr<bool> {
values.push_back(element);
return true;
}),
IsOk());
EXPECT_THAT(values, ElementsAre(IsNullValue(), BoolValueIs(true)));
}
}
TEST_P(ParsedJsonListValueTest, NewIterator_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(
R"pb(values {}
values { bool_value: true })pb"));
ASSERT_OK_AND_ASSIGN(auto iterator, valid_value.NewIterator(value_manager()));
ASSERT_TRUE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()), IsOkAndHolds(IsNullValue()));
ASSERT_TRUE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()), IsOkAndHolds(BoolValueIs(true)));
ASSERT_FALSE(iterator->HasNext());
EXPECT_THAT(iterator->Next(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(ParsedJsonListValueTest, Contains_Dynamic) {
ParsedJsonListValue valid_value(
DynamicParseTextProto<google::protobuf::ListValue>(
R"pb(values {}
values { bool_value: true }
values { number_value: 1.0 }
values { string_value: "foo" }
values { list_value: {} }
values { struct_value: {} })pb"));
EXPECT_THAT(valid_value.Contains(value_manager(), BytesValue()),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Contains(value_manager(), NullValue()),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Contains(value_manager(), BoolValue(false)),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Contains(value_manager(), BoolValue(true)),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Contains(value_manager(), DoubleValue(0.0)),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Contains(value_manager(), DoubleValue(1.0)),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Contains(value_manager(), StringValue("bar")),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Contains(value_manager(), StringValue("foo")),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(valid_value.Contains(
value_manager(),
ParsedJsonListValue(
DynamicParseTextProto<google::protobuf::ListValue>(
R"pb(values {}
values { bool_value: true }
values { number_value: 1.0 }
values { string_value: "foo" }
values { list_value: {} }
values { struct_value: {} })pb"))),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Contains(value_manager(), ListValue()),
IsOkAndHolds(BoolValueIs(true)));
EXPECT_THAT(
valid_value.Contains(
value_manager(),
ParsedJsonMapValue(DynamicParseTextProto<google::protobuf::Struct>(
R"pb(fields {
key: "foo"
value: { bool_value: true }
})pb"))),
IsOkAndHolds(BoolValueIs(false)));
EXPECT_THAT(valid_value.Contains(value_manager(), MapValue()),
IsOkAndHolds(BoolValueIs(true)));
}
INSTANTIATE_TEST_SUITE_P(ParsedJsonListValueTest, ParsedJsonListValueTest,
::testing::Values(AllocatorKind::kArena,
AllocatorKind::kNewDelete),
PrintToStringParamName());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_json_list_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_json_list_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
ce3f8a43-50fd-4413-8f1e-68a60566350c | cpp | tensorflow/tensorflow | name_utils | tensorflow/compiler/mlir/utils/name_utils.cc | tensorflow/core/data/name_utils_test.cc | #include "tensorflow/compiler/mlir/utils/name_utils.h"
#include <cctype>
#include <string>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/LLVM.h"
namespace mlir {
namespace {
bool IsLegalChar(char c, bool first_char) {
if (isalpha(c)) return true;
if (isdigit(c)) return true;
if (c == '.') return true;
if (c == '_') return true;
if (first_char) return false;
if (c == '/') return true;
if (c == '-') return true;
return false;
}
}
void LegalizeNodeName(std::string& name) {
if (name.empty()) return;
if (!IsLegalChar(name[0], true)) name[0] = '.';
for (char& c : llvm::drop_begin(name, 1))
if (!IsLegalChar(c, false)) c = '.';
}
std::string GetNameFromLoc(Location loc) {
llvm::SmallVector<llvm::StringRef, 8> loc_names;
llvm::SmallVector<Location, 8> locs;
locs.push_back(loc);
bool names_is_nonempty = false;
while (!locs.empty()) {
Location curr_loc = locs.pop_back_val();
if (auto name_loc = mlir::dyn_cast<NameLoc>(curr_loc)) {
auto name = name_loc.getName().strref().split('@').first;
if (!name.ends_with(":")) {
loc_names.push_back(name);
if (!name.empty()) names_is_nonempty = true;
}
continue;
} else if (auto call_loc = mlir::dyn_cast<CallSiteLoc>(curr_loc)) {
locs.push_back(call_loc.getCallee());
continue;
} else if (auto fused_loc = mlir::dyn_cast<FusedLoc>(curr_loc)) {
auto reversed_fused_locs = llvm::reverse(fused_loc.getLocations());
locs.append(reversed_fused_locs.begin(), reversed_fused_locs.end());
continue;
}
loc_names.push_back(llvm::StringRef());
}
if (names_is_nonempty)
return llvm::join(loc_names.begin(), loc_names.end(), ";");
return "";
}
} | #include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace data {
namespace {
TEST(DeviceNameUtils, ArgsToString) {
EXPECT_EQ(name_utils::ArgsToString({}), "");
EXPECT_EQ(name_utils::ArgsToString({"a"}), "(a)");
EXPECT_EQ(name_utils::ArgsToString({"1", "2", "3"}), "(1, 2, 3)");
}
TEST(NameUtilsTest, DatasetDebugString) {
EXPECT_EQ(name_utils::DatasetDebugString("Concatenate"),
"ConcatenateDatasetOp::Dataset");
name_utils::DatasetDebugStringParams range_params;
range_params.set_args(0, 10, 3);
EXPECT_EQ(name_utils::DatasetDebugString("Range", range_params),
"RangeDatasetOp(0, 10, 3)::Dataset");
name_utils::DatasetDebugStringParams shuffle_params;
shuffle_params.dataset_prefix = "FixedSeed";
shuffle_params.set_args(10, 1, 2);
EXPECT_EQ(name_utils::DatasetDebugString("Shuffle", shuffle_params),
"ShuffleDatasetOp(10, 1, 2)::FixedSeedDataset");
name_utils::DatasetDebugStringParams parallel_interleave_params;
parallel_interleave_params.op_version = 2;
EXPECT_EQ(name_utils::DatasetDebugString("ParallelInterleave",
parallel_interleave_params),
"ParallelInterleaveDatasetV2Op::Dataset");
}
TEST(NameUtilsTest, OpName) {
EXPECT_EQ(name_utils::OpName("Range"), "RangeDataset");
EXPECT_EQ(name_utils::OpName("Concatenate", name_utils::OpNameParams()),
"ConcatenateDataset");
name_utils::OpNameParams params;
params.op_version = 2;
EXPECT_EQ(name_utils::OpName("ParallelInterleave", params),
"ParallelInterleaveDatasetV2");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/utils/name_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/name_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f985559-39e3-4126-a682-992bb1d338c5 | cpp | tensorflow/tensorflow | uniform_dequantize_quantize | tensorflow/lite/experimental/shlo/legacy/src/uniform_dequantize_quantize.cc | tensorflow/lite/experimental/shlo/legacy/test/uniform_dequantize_quantize_test.cc | #include <cstddef>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
absl::Status CheckDequantizeParameters(const QuantizedTensor& operand,
Tensor& result) {
if (operand.shape() != result.shape()) {
return absl::InvalidArgumentError("Inconsistent input/output shapes");
} else if (operand.expressed_type() != result.element_type()) {
return absl::InvalidArgumentError("Inconsistent element types");
} else if (!operand.is_per_tensor_quantized()) {
return absl::InvalidArgumentError("Unsupported input quantization");
}
if (operand.layout().has_strides() || result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type>
absl::Status UniformDequantize(const QuantizedTensor& operand, Tensor& result) {
if (auto check = CheckDequantizeParameters(operand, result); !check.ok()) {
return check;
}
const QuantizedParameter& operand_quant_param =
operand.type().element_type().parameters(0);
size_t n = operand.num_elements();
using S = Storage<storage_type>;
using E = Storage<expressed_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
for (size_t i = 0; i < n; ++i) {
auto operand_storage = S::Get(operand_buffer, i);
auto operand_expressed = Dequantize<storage_type, expressed_type>(
operand_storage, operand_quant_param);
auto result_expressed = operand_expressed;
E::Set(result_buffer, i, result_expressed);
}
return absl::OkStatus();
}
absl::Status CheckQuantizeParameters(const Tensor& operand,
QuantizedTensor& result) {
if (operand.shape() != result.shape()) {
return absl::InvalidArgumentError("Inconsistent input/output shapes");
} else if (operand.element_type() != result.expressed_type()) {
return absl::InvalidArgumentError("Inconsistent element types");
} else if (!result.is_per_tensor_quantized()) {
return absl::InvalidArgumentError("Unsupported output quantization");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type>
absl::Status UniformQuantize(const Tensor& operand, QuantizedTensor& result) {
if (auto check = CheckQuantizeParameters(operand, result); !check.ok()) {
return check;
}
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
size_t n = operand.num_elements();
using S = Storage<storage_type>;
using E = Storage<expressed_type>;
auto operand_buffer = operand.buffer();
auto result_buffer = result.buffer();
using ET = typename E::Type;
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
for (size_t i = 0; i < n; ++i) {
auto operand_expressed = E::Get(operand_buffer, i);
auto result_expressed = operand_expressed;
auto result_storage = QuantizePartial<storage_type, expressed_type>(
result_expressed, result_scale_inv, result_quant_param.zero_point);
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result); !status.ok()) {
return status;
}
return absl::OkStatus();
}
}
absl::Status UniformDequantize(const QuantizedTensor& operand, Tensor& result) {
DISPATCH_QUANTIZED(UniformDequantize, operand.storage_type(),
operand.expressed_type(), operand, result);
}
absl::Status UniformQuantize(const Tensor& operand, QuantizedTensor& result) {
DISPATCH_QUANTIZED(UniformQuantize, result.storage_type(),
result.expressed_type(), operand, result);
}
} | #include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/matchers.h"
namespace stablehlo {
namespace testing {
template <ElementType storage_type, ElementType expressed_type>
void test(std::initializer_list<DimensionSize>&& shape,
QuantizedParameter&& quantized_parameter,
std::vector<typename Storage<expressed_type>::Type>&& input_values) {
Tensor input(TensorType(Shape(shape), expressed_type), input_values.data());
std::vector<typename Storage<storage_type>::Type> quant_values(
input_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor quant(
QuantizedTensorType(Shape(shape), std::move(element_type)),
quant_values.data());
std::vector<typename Storage<expressed_type>::Type> result_values(
input_values.size());
Tensor result(TensorType(Shape(shape), expressed_type), result_values.data());
ASSERT_OK(UniformQuantize(input, quant));
ASSERT_OK(UniformDequantize(quant, result));
EXPECT_THAT(result, IsAlmostSame(input));
}
TEST(QuantizeDequantize, All) {
test<ElementType::kSI8, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kBF16>(
{4}, {.scale = 1e-1, .zero_point = -5}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI8, ElementType::kF16>({4}, {.scale = 1, .zero_point = 5},
{-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kF16>(
{4}, {.scale = 1e-1, .zero_point = -10}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI8, ElementType::kF32>({4}, {.scale = 1, .zero_point = 5},
{-2, -1, 0, 1, 2});
test<ElementType::kSI8, ElementType::kF32>(
{4}, {.scale = 1e-1, .zero_point = +10}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI16, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI16, ElementType::kBF16>(
{4}, {.scale = 1e-1, .zero_point = 5}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI16, ElementType::kBF16>(
{4}, {.scale = 1e-2, .zero_point = -5}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI16, ElementType::kF16>(
{4}, {.scale = 1, .zero_point = 0}, {-2, -1, 0, 1, 2});
test<ElementType::kSI16, ElementType::kF16>(
{4}, {.scale = 1e-1, .zero_point = -10}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI16, ElementType::kF16>(
{4}, {.scale = 1e-2, .zero_point = 10}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1, .zero_point = +7}, {-2, -1, 0, 1, 2});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1e-1, .zero_point = -7}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1e-2, .zero_point = 0}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kBF16>(
{4}, {.scale = 1e-3, .zero_point = 0}, {-2.222, -1.111, 0, 1.111, 2.222});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1, .zero_point = +7}, {-2, -1, 0, 1, 2});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1e-1, .zero_point = -7}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1e-2, .zero_point = 10}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kF16>(
{4}, {.scale = 1e-3, .zero_point = -0},
{-2.222, -1.111, 0, 1.111, 2.222});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1, .zero_point = +7}, {-2, -1, 0, 1, 2});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1e-1, .zero_point = -7}, {-2.2, -1.1, 0, 1.1, 2.2});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1e-2, .zero_point = 10}, {-2.22, -1.11, 0, 1.11, 2.22});
test<ElementType::kSI32, ElementType::kF32>(
{4}, {.scale = 1e-3, .zero_point = -0},
{-2.222, -1.111, 0, 1.111, 2.222});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/uniform_dequantize_quantize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/uniform_dequantize_quantize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1008aca8-d9ec-47ce-bf94-ea245a28e353 | cpp | google/arolla | group_op | arolla/array/group_op.h | arolla/array/group_op_test.cc | #ifndef AROLLA_ARRAY_GROUP_OP_H_
#define AROLLA_ARRAY_GROUP_OP_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/array/id_filter.h"
#include "arolla/array/ops_util.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/ops/dense_group_ops.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/optional_value.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/binary_search.h"
#include "arolla/util/meta.h"
#include "arolla/util/view_types.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace array_ops_internal {
template <class Accumulator, class ParentTypes, class ChildTypes,
bool ForwardId = false, bool UseDenseGroupOps = true>
class ArrayGroupOpImpl;
template <class Accumulator, class... ParentTs, class... ChildTs,
bool ForwardId, bool UseDenseGroupOps>
class ArrayGroupOpImpl<Accumulator, meta::type_list<ParentTs...>,
meta::type_list<ChildTs...>, ForwardId,
UseDenseGroupOps> {
using ParentUtil =
ArrayOpsUtil<true, meta::type_list<ParentTs...>>;
using ChildUtil =
ArrayOpsUtil<false, meta::type_list<ChildTs...>>;
using MappingAndChildUtil =
ArrayOpsUtil<false,
meta::type_list<int64_t, ChildTs...>>;
using ResT = strip_optional_t<typename Accumulator::result_type>;
using DenseGroupOp = dense_ops_internal::DenseGroupOpsImpl<
Accumulator, meta::type_list<ParentTs...>, meta::type_list<ChildTs...>,
ForwardId>;
static constexpr bool kIsAggregator = Accumulator::IsAggregator();
static constexpr bool kIsPartial = Accumulator::IsPartial();
static constexpr bool kIsFull = Accumulator::IsFull();
static constexpr double kNonEmptyGroupProbLimit = 0.25;
public:
explicit ArrayGroupOpImpl(RawBufferFactory* buffer_factory,
Accumulator empty_accumulator = Accumulator())
: buffer_factory_(buffer_factory),
empty_accumulator_(std::move(empty_accumulator)) {}
absl::StatusOr<Array<ResT>> Apply(const ArrayEdge& edge,
const AsArray<ParentTs>&... p_args,
const AsArray<ChildTs>&... c_args) const {
if constexpr (UseDenseGroupOps) {
if (edge.edge_values().IsDenseForm() &&
(p_args.IsDenseForm() && ... && true) &&
(c_args.IsDenseForm() && ... && true)) {
auto op = [this](const auto&... args) ABSL_ATTRIBUTE_NOINLINE {
return DenseGroupOp(buffer_factory_, empty_accumulator_)
.Apply(args...);
};
ASSIGN_OR_RETURN(DenseArray<ResT> res,
op(edge.ToDenseArrayEdge(), p_args.dense_data()...,
c_args.dense_data()...));
return Array<ResT>(res);
}
}
if (((p_args.size() != edge.parent_size()) || ... || false)) {
return SizeMismatchError({edge.parent_size(), p_args.size()...});
}
if (((c_args.size() != edge.child_size()) || ... || false)) {
return SizeMismatchError({edge.child_size(), c_args.size()...});
}
switch (edge.edge_type()) {
case ArrayEdge::SPLIT_POINTS: {
const Buffer<int64_t>& splits = edge.edge_values().dense_data().values;
ChildUtil child_util(edge.child_size(), c_args..., buffer_factory_);
if constexpr (kIsAggregator) {
if constexpr (sizeof...(ParentTs) == 0) {
if (child_util.PresentCountUpperEstimate() <
kNonEmptyGroupProbLimit * edge.parent_size()) {
return ApplyAggregatorWithSplitPointsOnVerySparseData(
edge.parent_size(), child_util, splits.span());
}
}
ParentUtil parent_util(edge.parent_size(), p_args...,
buffer_factory_);
return ApplyAggregatorWithSplitPoints(parent_util, child_util,
splits);
} else {
ParentUtil parent_util(edge.parent_size(), p_args...,
buffer_factory_);
if (child_util.PresentCountUpperEstimate() >
edge.child_size() * IdFilter::kDenseSparsityLimit) {
return ApplyDenseWithSplitPoints(parent_util, child_util, splits);
} else {
return ApplySparseWithSplitPoints(parent_util, child_util, splits);
}
}
}
case ArrayEdge::MAPPING: {
MappingAndChildUtil mapchild_util(edge.child_size(), edge.edge_values(),
c_args..., buffer_factory_);
if constexpr (kIsAggregator && sizeof...(ParentTs) == 0) {
if (mapchild_util.PresentCountUpperEstimate() <
kNonEmptyGroupProbLimit * edge.parent_size()) {
return ApplyAggregatorWithMappingOnVerySparseData(
edge.parent_size(), mapchild_util);
}
}
ParentUtil parent_util(edge.parent_size(), p_args..., buffer_factory_);
return ApplyWithMapping(parent_util, mapchild_util);
}
default:
return absl::InvalidArgumentError("unsupported edge type");
}
}
absl::StatusOr<
std::conditional_t<Accumulator::IsAggregator(),
typename Accumulator::result_type, Array<ResT>>>
Apply(const ArrayGroupScalarEdge& edge, view_type_t<ParentTs>... p_args,
const AsArray<ChildTs>&... c_args) const {
if constexpr (UseDenseGroupOps) {
if ((c_args.IsDenseForm() && ... && true)) {
auto op = [this](const auto&... args) ABSL_ATTRIBUTE_NOINLINE {
return DenseGroupOp(buffer_factory_, empty_accumulator_)
.Apply(args...);
};
ASSIGN_OR_RETURN(auto res, op(edge.ToDenseArrayGroupScalarEdge(),
p_args..., c_args.dense_data()...));
if constexpr (Accumulator::IsAggregator()) {
return res;
} else {
return Array<ResT>(res);
}
}
}
if (((c_args.size() != edge.child_size()) || ... || false)) {
return SizeMismatchError({edge.child_size(), c_args.size()...});
}
ChildUtil util(edge.child_size(), c_args..., buffer_factory_);
Accumulator accumulator = empty_accumulator_;
accumulator.Reset(p_args...);
if constexpr (kIsAggregator) {
AggregateSingleGroup(accumulator, util, 0, edge.child_size());
auto res = accumulator.GetResult();
RETURN_IF_ERROR(accumulator.GetStatus());
return typename Accumulator::result_type(std::move(res));
} else {
const int64_t max_present_count = util.PresentCountUpperEstimate();
if (kIsPartial && max_present_count >
edge.child_size() * IdFilter::kDenseSparsityLimit) {
DenseArrayBuilder<ResT> builder(edge.child_size(), buffer_factory_);
auto fn = [&](int64_t child_id, view_type_t<ChildTs>... args) {
Add(accumulator, child_id, args...);
builder.Set(child_id, accumulator.GetResult());
};
util.Iterate(0, edge.child_size(), fn);
RETURN_IF_ERROR(accumulator.GetStatus());
return Array<ResT>(std::move(builder).Build());
}
SparseArrayBuilder<ResT> builder(edge.child_size(), max_present_count,
buffer_factory_);
auto fn = [&](int64_t child_id, view_type_t<ChildTs>... args) {
Add(accumulator, child_id, args...);
if constexpr (kIsPartial) {
builder.SetByOffset(builder.NextOffset(), accumulator.GetResult());
}
builder.AddId(child_id);
};
util.Iterate(0, edge.child_size(), fn);
if constexpr (kIsFull) {
accumulator.FinalizeFullGroup();
for (int64_t offset = 0; offset < builder.NextOffset(); ++offset) {
builder.SetByOffset(offset, accumulator.GetResult());
}
}
RETURN_IF_ERROR(accumulator.GetStatus());
return std::move(builder).Build();
}
}
private:
absl::StatusOr<Array<ResT>> ApplyWithMapping(
ParentUtil& parent_util, MappingAndChildUtil& mapchild_util) const {
std::vector<Accumulator> accumulators(parent_util.size(),
empty_accumulator_);
std::vector<bool> valid_parents(parent_util.size(), false);
parent_util.IterateSimple(
[&](int64_t parent, view_type_t<ParentTs>... args) {
accumulators[parent].Reset(args...);
valid_parents[parent] = true;
});
const int64_t child_row_count = mapchild_util.size();
const int64_t max_present_count = mapchild_util.PresentCountUpperEstimate();
if (kIsAggregator ||
(kIsPartial &&
max_present_count > child_row_count * IdFilter::kDenseSparsityLimit)) {
return ApplyAggregatorOrDensePartialWithMapping(
parent_util, mapchild_util, accumulators, valid_parents);
}
DCHECK(kIsFull || kIsPartial);
SparseArrayBuilder<ResT> builder(child_row_count, max_present_count,
buffer_factory_);
std::vector<int64_t> parent_ids;
if constexpr (kIsFull) {
parent_ids.reserve(max_present_count);
}
auto fn = [&](int64_t child_id, int64_t parent_id,
view_type_t<ChildTs>... args) {
if (!valid_parents[parent_id]) return;
auto& accumulator = accumulators[parent_id];
Add(accumulator, child_id, args...);
if constexpr (kIsPartial) {
builder.SetByOffset(builder.NextOffset(), accumulator.GetResult());
} else {
DCHECK(kIsFull);
parent_ids.push_back(parent_id);
}
builder.AddId(child_id);
};
mapchild_util.IterateSimple(fn);
if constexpr (kIsFull) {
int64_t parent_id = 0;
for (bool valid : valid_parents) {
if (valid) accumulators[parent_id].FinalizeFullGroup();
parent_id++;
}
for (int64_t offset = 0; offset < builder.NextOffset(); ++offset) {
builder.SetByOffset(offset,
accumulators[parent_ids[offset]].GetResult());
}
}
int64_t parent_id = 0;
for (bool valid : valid_parents) {
if (valid) {
RETURN_IF_ERROR(accumulators[parent_id].GetStatus());
}
parent_id++;
}
return std::move(builder).Build();
}
absl::StatusOr<Array<ResT>> ApplyAggregatorWithMappingOnVerySparseData(
size_t parent_size, MappingAndChildUtil& mapchild_util) const {
static_assert(kIsAggregator);
static_assert(sizeof...(ParentTs) == 0);
absl::flat_hash_map<int64_t, Accumulator> accumulators;
mapchild_util.IterateSimple(
[&](int64_t child_id, int64_t parent_id, view_type_t<ChildTs>... args) {
auto it = accumulators.find(parent_id);
if (it == accumulators.end()) {
it = accumulators.emplace(parent_id, empty_accumulator_).first;
it->second.Reset();
}
Add(it->second, child_id, args...);
});
std::vector<std::pair<
int64_t, decltype(Accumulator(empty_accumulator_).GetResult())>>
results;
results.reserve(accumulators.size());
for (auto& [parent_id, accumulator] : accumulators) {
results.emplace_back(parent_id, accumulator.GetResult());
RETURN_IF_ERROR(accumulator.GetStatus());
}
std::sort(results.begin(), results.end(),
[](const auto& a, const auto& b) { return a.first < b.first; });
Buffer<int64_t>::Builder ids_bldr(accumulators.size(), buffer_factory_);
DenseArrayBuilder<ResT> dense_builder(accumulators.size(), buffer_factory_);
int64_t offset = 0;
for (auto& [parent_id, value] : results) {
ids_bldr.Set(offset, parent_id);
dense_builder.Set(offset, value);
offset++;
}
OptionalValue<ResT> missing_id_value;
if (accumulators.size() < parent_size) {
Accumulator acc(empty_accumulator_);
acc.Reset();
missing_id_value = OptionalValue<ResT>(acc.GetResult());
RETURN_IF_ERROR(acc.GetStatus());
}
IdFilter id_filter(parent_size, std::move(ids_bldr).Build());
return Array<ResT>(parent_size, std::move(id_filter),
std::move(dense_builder).Build(), missing_id_value);
}
absl::StatusOr<Array<ResT>> ApplyAggregatorOrDensePartialWithMapping(
ParentUtil& parent_util, MappingAndChildUtil& mapchild_util,
std::vector<Accumulator>& accumulators,
std::vector<bool>& valid_parents) const {
DCHECK(kIsAggregator || kIsPartial);
DenseArrayBuilder<ResT> builder(
kIsAggregator ? parent_util.size() : mapchild_util.size(),
buffer_factory_);
auto fn = [&](int64_t child_id, int64_t parent_id,
view_type_t<ChildTs>... args) {
auto& accumulator = accumulators[parent_id];
if constexpr (kIsAggregator) {
Add(accumulator, child_id, args...);
} else {
if (valid_parents[parent_id]) {
Add(accumulator, child_id, args...);
builder.Set(child_id, accumulator.GetResult());
}
}
};
mapchild_util.IterateSimple(fn);
int64_t parent_id = 0;
for (bool valid : valid_parents) {
if (valid) {
if constexpr (kIsAggregator) {
builder.Set(parent_id, accumulators[parent_id].GetResult());
}
RETURN_IF_ERROR(accumulators[parent_id].GetStatus());
}
parent_id++;
}
return Array<ResT>(std::move(builder).Build());
}
absl::StatusOr<Array<ResT>> ApplyAggregatorWithSplitPoints(
ParentUtil& parent_util, ChildUtil& child_util,
const Buffer<int64_t>& splits) const {
static_assert(kIsAggregator);
DCHECK_EQ(splits.size(), parent_util.size() + 1);
Accumulator accumulator = empty_accumulator_;
DenseArrayBuilder<ResT> builder(parent_util.size(), buffer_factory_);
auto process_group = [&](int64_t parent_id, view_type_t<ParentTs>... args) {
accumulator.Reset(args...);
int64_t child_from = splits[parent_id];
int64_t child_to = splits[parent_id + 1];
AggregateSingleGroup(accumulator, child_util, child_from, child_to);
builder.Set(parent_id, accumulator.GetResult());
};
parent_util.IterateSimple(process_group);
RETURN_IF_ERROR(accumulator.GetStatus());
return Array<ResT>(std::move(builder).Build());
}
void AggregateSingleGroup(Accumulator& accumulator, ChildUtil& child_util,
int64_t child_from, int64_t child_to) const {
static_assert(kIsAggregator);
auto fn = [&](int64_t child_id, view_type_t<ChildTs>... args) {
Add(accumulator, child_id, args...);
};
auto repeated_fn = [&](int64_t first_child_id, int64_t count,
view_type_t<ChildTs>... args) {
AddN(accumulator, first_child_id, count, args...);
};
child_util.Iterate(child_from, child_to, fn, empty_missing_fn, repeated_fn);
}
absl::StatusOr<Array<ResT>> ApplyAggregatorWithSplitPointsOnVerySparseData(
int64_t parent_size, ChildUtil& child_util,
absl::Span<const int64_t> splits) const {
static_assert(kIsAggregator);
static_assert(sizeof...(ParentTs) == 0);
DCHECK_EQ(splits.size(), parent_size + 1);
const int64_t max_res_dense_count =
std::min(parent_size, child_util.PresentCountUpperEstimate());
Buffer<int64_t>::Builder ids_bldr(max_res_dense_count, buffer_factory_);
DenseArrayBuilder<ResT> dense_builder(max_res_dense_count, buffer_factory_);
int64_t res_offset = 0;
int64_t next_parent_id = 0;
Accumulator accumulator = empty_accumulator_;
accumulator.Reset();
absl::Status status = absl::OkStatus();
auto add_previous_to_results = [&]() {
if (next_parent_id > 0 && status.ok()) {
ids_bldr.Set(res_offset, next_parent_id - 1);
dense_builder.Set(res_offset, accumulator.GetResult());
status = accumulator.GetStatus();
accumulator.Reset();
res_offset++;
}
};
child_util.IterateSimple([&](int64_t child_id,
view_type_t<ChildTs>... args) {
if (child_id >= splits[next_parent_id]) {
add_previous_to_results();
next_parent_id = GallopingLowerBound(splits.begin() + next_parent_id,
splits.end(), child_id + 1) -
splits.begin();
}
Add(accumulator, child_id, args...);
});
add_previous_to_results();
RETURN_IF_ERROR(status);
OptionalValue<ResT> missing_id_value;
if (res_offset < parent_size) {
missing_id_value = OptionalValue<ResT>(accumulator.GetResult());
RETURN_IF_ERROR(accumulator.GetStatus());
}
IdFilter id_filter(parent_size, std::move(ids_bldr).Build(res_offset));
return Array<ResT>(parent_size, std::move(id_filter),
std::move(dense_builder).Build(res_offset),
missing_id_value);
}
absl::StatusOr<Array<ResT>> ApplyDenseWithSplitPoints(
ParentUtil& parent_util, ChildUtil& child_util,
const Buffer<int64_t>& splits) const {
static_assert(kIsPartial || kIsFull);
DenseArrayBuilder<ResT> builder(child_util.size(), buffer_factory_);
std::vector<int64_t> processed_rows;
Accumulator accumulator = empty_accumulator_;
auto fn = [&](int64_t child_id, view_type_t<ChildTs>... args) {
Add(accumulator, child_id, args...);
if constexpr (kIsPartial) {
builder.Set(child_id, accumulator.GetResult());
} else {
processed_rows.push_back(child_id);
}
};
auto process_group = [&](int64_t parent_id, view_type_t<ParentTs>... args) {
accumulator.Reset(args...);
int64_t child_from = splits[parent_id];
int64_t child_to = splits[parent_id + 1];
child_util.Iterate(child_from, child_to, fn);
if constexpr (kIsFull) {
accumulator.FinalizeFullGroup();
for (int64_t row_id : processed_rows) {
builder.Set(row_id, accumulator.GetResult());
}
processed_rows.clear();
}
};
parent_util.IterateSimple(process_group);
RETURN_IF_ERROR(accumulator.GetStatus());
return Array<ResT>(std::move(builder).Build());
}
absl::StatusOr<Array<ResT>> ApplySparseWithSplitPoints(
ParentUtil& parent_util, ChildUtil& child_util,
const Buffer<int64_t>& splits) const {
static_assert(kIsPartial || kIsFull);
SparseArrayBuilder<ResT> builder(child_util.size(),
child_util.PresentCountUpperEstimate(),
buffer_factory_);
Accumulator accumulator = empty_accumulator_;
auto fn = [&](int64_t child_id, view_type_t<ChildTs>... args) {
Add(accumulator, child_id, args...);
if constexpr (kIsPartial) {
builder.SetByOffset(builder.NextOffset(), accumulator.GetResult());
}
builder.AddId(child_id);
};
auto process_group = [&](int64_t parent_id, view_type_t<ParentTs>... args) {
accumulator.Reset(args...);
int64_t child_from = splits[parent_id];
int64_t child_to = splits[parent_id + 1];
int64_t offset = builder.NextOffset();
child_util.Iterate(child_from, child_to, fn);
if constexpr (kIsFull) {
accumulator.FinalizeFullGroup();
while (offset < builder.NextOffset()) {
builder.SetByOffset(offset++, accumulator.GetResult());
}
}
};
parent_util.IterateSimple(process_group);
RETURN_IF_ERROR(accumulator.GetStatus());
return std::move(builder).Build();
}
void Add(Accumulator& accumulator, int64_t child_id,
view_type_t<ChildTs>... args) const {
if constexpr (ForwardId) {
accumulator.Add(child_id, args...);
} else {
(void)child_id;
accumulator.Add(args...);
}
}
void AddN(Accumulator& accumulator, int64_t first_child_id, int64_t N,
view_type_t<ChildTs>... args) const {
if constexpr (ForwardId) {
for (int64_t i = 0; i < N; ++i) {
accumulator.Add(first_child_id + i, args...);
}
} else {
(void)first_child_id;
accumulator.AddN(N, args...);
}
}
RawBufferFactory* buffer_factory_;
const Accumulator empty_accumulator_;
};
}
template <class Accumulator>
using ArrayGroupOp =
array_ops_internal::ArrayGroupOpImpl<Accumulator,
typename Accumulator::parent_types,
typename Accumulator::child_types>;
template <class Accumulator>
using ArrayGroupOpWithId = array_ops_internal::ArrayGroupOpImpl<
Accumulator, typename Accumulator::parent_types,
meta::tail_t<typename Accumulator::child_types>, true>;
}
#endif | #include "arolla/array/group_op.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/strings/string_view.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/array/id_filter.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qexpr/operators/testing/accumulators.h"
#include "arolla/util/meta.h"
#include "arolla/util/text.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Test;
template <class Accumulator>
using ArrayGroupOpNoDense = array_ops_internal::ArrayGroupOpImpl<
Accumulator, typename Accumulator::parent_types,
typename Accumulator::child_types, false,
false>;
template <class Accumulator>
using ArrayGroupOpWithIdNoDense = array_ops_internal::ArrayGroupOpImpl<
Accumulator, typename Accumulator::parent_types,
meta::tail_t<typename Accumulator::child_types>, true,
false>;
TEST(ArrayGroupOp, FullArrayGroupSum) {
auto values = CreateArray<float>({5.0f, 8.0f, 3.0f, 6.0f});
auto detail_to_group = CreateArray<int64_t>({1, 1, 2, 3});
auto splits = CreateArray<int64_t>({0, 0, 2, 3, 4});
ArrayGroupOpNoDense<testing::AggSumAccumulator<float>> agg(
GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromMapping(
detail_to_group, 4));
EXPECT_THAT(*agg.Apply(edge1, values),
ElementsAre(std::nullopt, 13.0f, 3.0f, 6.0f));
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromSplitPoints(splits));
EXPECT_THAT(*agg.Apply(edge2, values),
ElementsAre(std::nullopt, 13.0f, 3.0f, 6.0f));
}
TEST(ArrayGroupOp, DenseGroupOpShortcut) {
auto values = CreateArray<float>({5.0f, 8.0f, 3.0f, 6.0f});
auto detail_to_group = CreateArray<int64_t>({1, 1, 2, 3});
auto splits = CreateArray<int64_t>({0, 0, 2, 3, 4});
ArrayGroupOp<testing::AggSumAccumulator<float>> agg(GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromMapping(
detail_to_group, 4));
EXPECT_THAT(*agg.Apply(edge1, values),
ElementsAre(std::nullopt, 13.0f, 3.0f, 6.0f));
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromSplitPoints(splits));
EXPECT_THAT(*agg.Apply(edge2, values),
ElementsAre(std::nullopt, 13.0f, 3.0f, 6.0f));
}
TEST(ArrayGroupOp, ForwardId) {
auto splits = CreateArray<int64_t>({0, 0, 2, 3, 4});
ASSERT_OK_AND_ASSIGN(ArrayEdge edge, ArrayEdge::FromSplitPoints(splits));
std::vector<int64_t> ids;
ArrayGroupOpWithIdNoDense<testing::CollectIdsAccumulator> op(
GetHeapBufferFactory(), testing::CollectIdsAccumulator(&ids));
EXPECT_OK(op.Apply(edge).status());
EXPECT_THAT(ids, ElementsAre(0, 1, 2, 3));
}
TEST(ArrayGroupOp, FullArrayAverageWithErrorStatus) {
auto values = CreateArray<float>({5.0f, 8.0f, 3.0f, 6.0f});
auto detail_to_group = CreateArray<int64_t>({1, 1, 2, 3});
auto splits = CreateArray<int64_t>({0, 0, 2, 3, 4});
ArrayGroupOpNoDense<testing::AverageAccumulator> agg(GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromMapping(
detail_to_group, 4));
EXPECT_THAT(
agg.Apply(edge1, values),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group")));
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromSplitPoints(splits));
EXPECT_THAT(
agg.Apply(edge2, values),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group")));
}
TEST(ArrayGroupOp, AggregationOnVerySparseArray) {
int parent_size = 100;
int group_size = 5;
int child_size = parent_size * group_size;
Buffer<int64_t>::Builder splits_bldr(parent_size + 1);
for (int i = 0; i < parent_size + 1; ++i) {
splits_bldr.Set(i, i * group_size);
}
auto splits = Array<int64_t>(std::move(splits_bldr).Build());
Buffer<int64_t>::Builder mapping_bldr(child_size);
for (int i = 0; i < child_size; ++i) {
mapping_bldr.Set((i + child_size / 2) % child_size, i / group_size);
}
auto mapping = Array<int64_t>(std::move(mapping_bldr).Build());
ASSERT_OK_AND_ASSIGN(ArrayEdge splits_edge,
ArrayEdge::FromSplitPoints(splits));
ASSERT_OK_AND_ASSIGN(ArrayEdge mapping_edge,
ArrayEdge::FromMapping(mapping, parent_size));
{
ArrayGroupOpNoDense<testing::AggSumAccumulator<float>> agg(
GetHeapBufferFactory());
auto ids = CreateBuffer<int64_t>({3, 25, 438, 439});
auto values = CreateBuffer<float>({1.0f, 2.0f, 3.0f, 4.0f});
Array<float> array(child_size, IdFilter(child_size, ids), {values});
ASSERT_OK_AND_ASSIGN(Array<float> res_splits,
agg.Apply(splits_edge, array));
ASSERT_TRUE(res_splits.IsSparseForm());
EXPECT_FALSE(res_splits.HasMissingIdValue());
EXPECT_THAT(res_splits.id_filter().ids(), ElementsAre(0, 5, 87));
EXPECT_THAT(res_splits.dense_data(), ElementsAre(1, 2, 7));
ASSERT_OK_AND_ASSIGN(Array<float> res_mapping,
agg.Apply(mapping_edge, array));
ASSERT_TRUE(res_mapping.IsSparseForm());
EXPECT_FALSE(res_mapping.HasMissingIdValue());
EXPECT_THAT(res_mapping.id_filter().ids(), ElementsAre(37, 50, 55));
EXPECT_THAT(res_mapping.dense_data(), ElementsAre(7, 1, 2));
}
{
ArrayGroupOpNoDense<testing::AggSumAccumulator<float>> agg(
GetHeapBufferFactory());
auto ids = CreateBuffer<int64_t>({23, 25, 438, 439});
auto values = CreateBuffer<float>({1.0f, 2.0f, 3.0f, 4.0f});
Array<float> array(child_size, IdFilter(child_size, ids), {values});
ASSERT_OK_AND_ASSIGN(Array<float> res_splits,
agg.Apply(splits_edge, array));
ASSERT_TRUE(res_splits.IsSparseForm());
EXPECT_FALSE(res_splits.HasMissingIdValue());
EXPECT_THAT(res_splits.id_filter().ids(), ElementsAre(4, 5, 87));
EXPECT_THAT(res_splits.dense_data(), ElementsAre(1, 2, 7));
ASSERT_OK_AND_ASSIGN(Array<float> res_mapping,
agg.Apply(mapping_edge, array));
ASSERT_TRUE(res_mapping.IsSparseForm());
EXPECT_FALSE(res_mapping.HasMissingIdValue());
EXPECT_THAT(res_mapping.id_filter().ids(), ElementsAre(37, 54, 55));
EXPECT_THAT(res_mapping.dense_data(), ElementsAre(7, 1, 2));
}
{
ArrayGroupOpNoDense<testing::AggCountAccumulator<float>> agg(
GetHeapBufferFactory());
auto ids = CreateBuffer<int64_t>({3, 25, 438, 439});
auto values = CreateBuffer<float>({1.0f, 2.0f, 3.0f, 4.0f});
Array<float> array(child_size, IdFilter(child_size, ids), {values});
ASSERT_OK_AND_ASSIGN(Array<int64_t> res_splits,
agg.Apply(splits_edge, array));
ASSERT_TRUE(res_splits.IsSparseForm());
EXPECT_TRUE(res_splits.HasMissingIdValue());
EXPECT_EQ(res_splits.missing_id_value().value, 0);
EXPECT_THAT(res_splits.id_filter().ids(), ElementsAre(0, 5, 87));
EXPECT_THAT(res_splits.dense_data(), ElementsAre(1, 1, 2));
ASSERT_OK_AND_ASSIGN(Array<int64_t> res_mapping,
agg.Apply(mapping_edge, array));
ASSERT_TRUE(res_mapping.IsSparseForm());
EXPECT_TRUE(res_mapping.HasMissingIdValue());
EXPECT_EQ(res_mapping.missing_id_value().value, 0);
EXPECT_THAT(res_mapping.id_filter().ids(), ElementsAre(37, 50, 55));
EXPECT_THAT(res_mapping.dense_data(), ElementsAre(2, 1, 1));
}
{
ArrayGroupOpNoDense<testing::AverageAccumulator> agg(
GetHeapBufferFactory());
auto ids = CreateBuffer<int64_t>({3, 25, 38, 39});
auto values = CreateBuffer<float>({1.0f, 2.0f, 3.0f, 4.0f});
Array<float> array(child_size, IdFilter(child_size, ids), {values});
EXPECT_THAT(
agg.Apply(splits_edge, array),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group")));
EXPECT_THAT(
agg.Apply(mapping_edge, array),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group")));
}
}
TEST(ArrayGroupOp, AverageToScalar) {
ArrayGroupOpNoDense<testing::AverageAccumulator> agg(GetHeapBufferFactory());
EXPECT_THAT(agg.Apply(ArrayGroupScalarEdge(3),
CreateArray<float>({1.0f, 3.0f, 8.0f})),
IsOkAndHolds(4.0f));
EXPECT_THAT(
agg.Apply(ArrayGroupScalarEdge(0), Array<float>()),
StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("empty group")));
}
TEST(ArrayGroupOp, AggregationToScalar) {
ArrayGroupOpNoDense<testing::AggSumAccumulator<float>> agg(
GetHeapBufferFactory());
{
auto values = CreateArray<float>({5.0f, 8.0f, 3.0f, 6.0f});
ArrayGroupScalarEdge edge(values.size());
EXPECT_EQ(*agg.Apply(edge, values), 22.0f);
}
{
auto values = Array<float>(10, 5.0f);
ArrayGroupScalarEdge edge(values.size());
EXPECT_EQ(*agg.Apply(edge, values), 50.0f);
}
{
auto values = CreateArray<float>({5.0f, std::nullopt, std::nullopt, 6.0f})
.ToSparseForm();
ArrayGroupScalarEdge edge(values.size());
EXPECT_EQ(*agg.Apply(edge, values), 11.0f);
}
{
auto values =
CreateArray<float>({5.0f, 3.0f, 3.0f, 6.0f}).ToSparseForm(3.0f);
ArrayGroupScalarEdge edge(values.size());
EXPECT_EQ(*agg.Apply(edge, values), 17.0f);
}
}
TEST(ArrayGroupOp, RankValues) {
auto values = CreateArray<float>({3.0f, 5.0f, 2.0f, 1.0f, 3.1f, 7.0f});
auto detail_to_group = CreateArray<int64_t>({0, 0, 0, 0, 1, 1});
auto splits = CreateArray<int64_t>({0, 4, 6});
ArrayGroupOpNoDense<testing::RankValuesAccumulator<float>> agg(
GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromMapping(
detail_to_group, 2));
EXPECT_THAT(*agg.Apply(edge1, values), ElementsAre(1, 0, 2, 3, 1, 0));
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromSplitPoints(splits));
EXPECT_THAT(*agg.Apply(edge2, values), ElementsAre(1, 0, 2, 3, 1, 0));
EXPECT_THAT(*agg.Apply(ArrayGroupScalarEdge(values.size()), values),
ElementsAre(5, 1, 4, 0, 2, 3));
}
TEST(ArrayGroupOp, RankValuesSparse) {
auto values = CreateArray<float>(100, {0, 10, 20, 30, 40, 50},
{3.0f, 5.0f, 2.0f, 1.0f, 3.1f, 7.0f});
auto detail_to_group =
CreateArray<int64_t>(100, {0, 10, 20, 30, 40, 50}, {0, 0, 0, 0, 1, 1});
auto splits = CreateArray<int64_t>({0, 40, 100});
ArrayGroupOpNoDense<testing::RankValuesAccumulator<float>> agg(
GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromMapping(
detail_to_group, 2));
{
ASSERT_OK_AND_ASSIGN(auto res, agg.Apply(edge1, values));
EXPECT_TRUE(res.IsSparseForm());
EXPECT_THAT(res.id_filter().ids(), ElementsAre(0, 10, 20, 30, 40, 50));
EXPECT_THAT(res.dense_data(), ElementsAre(1, 0, 2, 3, 1, 0));
}
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromSplitPoints(splits));
{
ASSERT_OK_AND_ASSIGN(auto res, agg.Apply(edge2, values));
EXPECT_TRUE(res.IsSparseForm());
EXPECT_THAT(res.id_filter().ids(), ElementsAre(0, 10, 20, 30, 40, 50));
EXPECT_THAT(res.dense_data(), ElementsAre(1, 0, 2, 3, 1, 0));
}
{
ASSERT_OK_AND_ASSIGN(
auto res, agg.Apply(ArrayGroupScalarEdge(values.size()), values));
EXPECT_TRUE(res.IsSparseForm());
EXPECT_THAT(res.id_filter().ids(), ElementsAre(0, 10, 20, 30, 40, 50));
EXPECT_THAT(res.dense_data(), ElementsAre(5, 1, 4, 0, 2, 3));
}
}
TEST(ArrayGroupOp, PartialSparseMapping) {
auto a = CreateArray<float>({2.0f, 1.0f, 1.0f});
auto b = CreateArray<float>({2.0f, 2.0f, 1.0f});
auto c = CreateArray<float>({0.0f, -1.0f, -1.0f});
IdFilter ids(100, CreateBuffer<int64_t>({5, 10, 15, 20, 25, 30}));
auto x = Array<float>(
100, ids, CreateDenseArray<float>({1.0, 1.0, 1.0, 1.0, 1.0, 1.0}));
auto y = Array<float>(
100, ids, CreateDenseArray<float>({1.0, 2.0, 3.0, 1.0, 3.0, 2.0}));
auto z = Array<float>(
100, ids, CreateDenseArray<float>({1.0, 2.0, 1.0, 2.0, 1.0, 2.0}));
IdFilter edge_ids(100, CreateBuffer<int64_t>({0, 5, 10, 15, 20, 25}));
auto detail_to_group = Array<int64_t>(
100, edge_ids, CreateDenseArray<int64_t>({0, 0, 1, 1, 2, 2}));
ArrayGroupOpNoDense<testing::WeightedSumAccumulator> agg(
GetHeapBufferFactory());
{
ASSERT_OK_AND_ASSIGN(ArrayEdge edge,
ArrayEdge::FromMapping(detail_to_group,
3));
ASSERT_OK_AND_ASSIGN(auto res, agg.Apply(edge, a, b, c, x, y, z));
EXPECT_TRUE(res.IsSparseForm());
EXPECT_FALSE(res.HasMissingIdValue());
EXPECT_THAT(res.id_filter().ids(), ElementsAre(5, 10, 15, 20, 25));
EXPECT_THAT(res.dense_data(), ElementsAre(4.f, 3.f, 6.f, 0.f, 3.f));
}
{
ASSERT_OK_AND_ASSIGN(
ArrayEdge edge,
ArrayEdge::FromSplitPoints(CreateArray<int64_t>({0, 10, 20, 100})));
ASSERT_OK_AND_ASSIGN(auto res, agg.Apply(edge, a, b, c, x, y, z));
EXPECT_TRUE(res.IsSparseForm());
EXPECT_FALSE(res.HasMissingIdValue());
EXPECT_THAT(res.id_filter().ids(), ElementsAre(5, 10, 15, 20, 25, 30));
EXPECT_THAT(res.dense_data(), ElementsAre(4.f, 3.f, 6.f, 0.f, 3.f, 1.0f));
}
}
TEST(ArrayGroupOp, PartialDenseMapping) {
auto a = CreateArray<float>({2.0f, 1.0f, 1.0f});
auto b = CreateArray<float>({2.0f, 2.0f, std::nullopt});
auto c = CreateArray<float>({0.0f, -1.0f, -1.0f});
auto x = CreateArray<float>({1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
auto y = CreateArray<float>({1.0f, 2.0f, 3.0f, 1.0f, 3.0f, 2.0f});
auto z = CreateArray<float>({1.f, 2.f, 1.f, std::nullopt, 1.f, 2.f});
auto splits = CreateArray<int64_t>({0, 2, 5, 6});
auto detail_to_group = CreateArray<int64_t>({0, 0, 1, 1, 1, 2});
ArrayGroupOpNoDense<testing::WeightedSumAccumulator> agg(
GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromSplitPoints(splits));
EXPECT_THAT(*agg.Apply(edge1, a, b, c, x, y, z),
ElementsAre(4.f, 6.f, 6.f, std::nullopt, 6.f, std::nullopt));
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromMapping(
detail_to_group, 3));
EXPECT_THAT(*agg.Apply(edge2, a, b, c, x, y, z),
ElementsAre(4.f, 6.f, 6.f, std::nullopt, 6.f, std::nullopt));
}
TEST(ArrayGroupOp, PartialGroupScalarEdge) {
auto x = CreateArray<float>({1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f});
auto y = CreateArray<float>({1.0f, 2.0f, 3.0f, 1.0f, 3.0f, 2.0f});
auto z = CreateArray<float>({1.f, 2.f, 1.f, std::nullopt, 1.f, 2.f});
ArrayGroupOpNoDense<testing::WeightedSumAccumulator> agg(
GetHeapBufferFactory());
EXPECT_THAT(*agg.Apply(ArrayGroupScalarEdge(6), 2.0f, 2.0f, 0.0f, x, y, z),
ElementsAre(4.f, 6.f, 8.f, std::nullopt, 8.f, 6.f));
EXPECT_THAT(
*agg.Apply(ArrayGroupScalarEdge(6), 2.0f, 2.0f, 0.0f,
x.ToSparseForm(1.0f), y.ToSparseForm(1.0f), z.ToSparseForm()),
ElementsAre(4.f, 6.f, 8.f, std::nullopt, 8.f, 6.f));
}
TEST(ArrayGroupOp, OptionalText) {
auto detail_to_group = CreateArray<int64_t>({1, 1, 2, 3, 3});
auto splits = CreateArray<int64_t>({0, 0, 2, 3, 5});
auto prefixes = CreateArray<Text>(
{Text("empty"), Text("some:\n"), Text("prefix:\n"), std::nullopt});
auto values = CreateArray<Text>(
{Text("w1"), std::nullopt, Text("w3"), Text("w4"), Text("w5")});
auto comments =
CreateArray<Text>({std::nullopt, Text("it is word #2"), std::nullopt,
Text("it is word #4"), std::nullopt});
ArrayGroupOpNoDense<testing::AggTextAccumulator> agg(GetHeapBufferFactory());
ASSERT_OK_AND_ASSIGN(ArrayEdge edge1, ArrayEdge::FromMapping(
detail_to_group, 4));
ASSERT_OK_AND_ASSIGN(Array<Text> res1,
agg.Apply(edge1, prefixes, values, comments));
ASSERT_OK_AND_ASSIGN(ArrayEdge edge2, ArrayEdge::FromSplitPoints(splits));
ASSERT_OK_AND_ASSIGN(Array<Text> res2,
agg.Apply(edge2, prefixes, values, comments));
using V = absl::string_view;
EXPECT_THAT(res1,
ElementsAre(V("empty"), V("some:\nw1\n"), V("prefix:\nw3\n"),
V("w4 (it is word #4)\nw5\n")));
EXPECT_EQ(res1.size(), res2.size());
for (int64_t i = 0; i < res1.size(); ++i) {
EXPECT_EQ(res1[i], res2[i]);
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/array/group_op.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/array/group_op_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9ba49551-4fc5-4137-807f-a64223fef3f8 | cpp | google/quiche | quic_spdy_server_stream_base | quiche/quic/core/http/quic_spdy_server_stream_base.cc | quiche/quic/core/http/quic_spdy_server_stream_base_test.cc | #include "quiche/quic/core/http/quic_spdy_server_stream_base.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/platform/api/quiche_flag_utils.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
QuicSpdyServerStreamBase::QuicSpdyServerStreamBase(QuicStreamId id,
QuicSpdySession* session,
StreamType type)
: QuicSpdyStream(id, session, type) {}
QuicSpdyServerStreamBase::QuicSpdyServerStreamBase(PendingStream* pending,
QuicSpdySession* session)
: QuicSpdyStream(pending, session) {}
void QuicSpdyServerStreamBase::CloseWriteSide() {
if (!fin_received() && !rst_received() && sequencer()->ignore_read_data() &&
!rst_sent()) {
QUICHE_DCHECK(fin_sent() || !session()->connection()->connected());
QUIC_DVLOG(1) << " Server: Send QUIC_STREAM_NO_ERROR on stream " << id();
MaybeSendStopSending(QUIC_STREAM_NO_ERROR);
}
QuicSpdyStream::CloseWriteSide();
}
void QuicSpdyServerStreamBase::StopReading() {
if (!fin_received() && !rst_received() && write_side_closed() &&
!rst_sent()) {
QUICHE_DCHECK(fin_sent());
QUIC_DVLOG(1) << " Server: Send QUIC_STREAM_NO_ERROR on stream " << id();
MaybeSendStopSending(QUIC_STREAM_NO_ERROR);
}
QuicSpdyStream::StopReading();
}
bool QuicSpdyServerStreamBase::ValidateReceivedHeaders(
const QuicHeaderList& header_list) {
if (!QuicSpdyStream::ValidateReceivedHeaders(header_list)) {
return false;
}
bool saw_connect = false;
bool saw_protocol = false;
bool saw_path = false;
bool saw_scheme = false;
bool saw_method = false;
std::optional<std::string> authority;
std::optional<std::string> host;
bool is_extended_connect = false;
for (const std::pair<std::string, std::string>& pair : header_list) {
if (pair.first == ":method") {
saw_method = true;
if (pair.second == "CONNECT") {
saw_connect = true;
if (saw_protocol) {
is_extended_connect = true;
}
}
} else if (pair.first == ":protocol") {
saw_protocol = true;
if (saw_connect) {
is_extended_connect = true;
}
} else if (pair.first == ":scheme") {
saw_scheme = true;
} else if (pair.first == ":path") {
saw_path = true;
} else if (pair.first == ":authority") {
authority = pair.second;
} else if (absl::StrContains(pair.first, ":")) {
set_invalid_request_details(
absl::StrCat("Unexpected ':' in header ", pair.first, "."));
QUIC_DLOG(ERROR) << invalid_request_details();
return false;
} else if (pair.first == "host") {
host = pair.second;
}
if (is_extended_connect) {
if (!spdy_session()->allow_extended_connect()) {
set_invalid_request_details(
"Received extended-CONNECT request while it is disabled.");
QUIC_DLOG(ERROR) << invalid_request_details();
return false;
}
} else if (saw_method && !saw_connect) {
if (saw_protocol) {
set_invalid_request_details(
"Received non-CONNECT request with :protocol header.");
QUIC_DLOG(ERROR) << "Receive non-CONNECT request with :protocol.";
return false;
}
}
}
if (GetQuicReloadableFlag(quic_allow_host_in_request2)) {
QUICHE_RELOADABLE_FLAG_COUNT_N(quic_allow_host_in_request2, 2, 3);
if (host && (!authority || *authority != *host)) {
QUIC_CODE_COUNT(http3_host_header_does_not_match_authority);
set_invalid_request_details("Host header does not match authority");
return false;
}
}
if (is_extended_connect) {
if (saw_scheme && saw_path && authority) {
return true;
}
set_invalid_request_details(
"Missing required pseudo headers for extended-CONNECT.");
QUIC_DLOG(ERROR) << invalid_request_details();
return false;
}
if (saw_connect) {
if (saw_path || saw_scheme) {
set_invalid_request_details(
"Received invalid CONNECT request with disallowed pseudo header.");
QUIC_DLOG(ERROR) << invalid_request_details();
return false;
}
return true;
}
if (saw_method && authority && saw_path && saw_scheme) {
return true;
}
set_invalid_request_details("Missing required pseudo headers.");
QUIC_DLOG(ERROR) << invalid_request_details();
return false;
}
} | #include "quiche/quic/core/http/quic_spdy_server_stream_base.h"
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/core/qpack/value_splitting_header_list.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/http/http_header_block.h"
using testing::_;
namespace quic {
namespace test {
namespace {
class TestQuicSpdyServerStream : public QuicSpdyServerStreamBase {
public:
TestQuicSpdyServerStream(QuicStreamId id, QuicSpdySession* session,
StreamType type)
: QuicSpdyServerStreamBase(id, session, type) {}
void OnBodyAvailable() override {}
};
class QuicSpdyServerStreamBaseTest : public QuicTest {
protected:
QuicSpdyServerStreamBaseTest()
: session_(new MockQuicConnection(&helper_, &alarm_factory_,
Perspective::IS_SERVER)) {
session_.Initialize();
session_.connection()->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(session_.perspective()));
stream_ =
new TestQuicSpdyServerStream(GetNthClientInitiatedBidirectionalStreamId(
session_.transport_version(), 0),
&session_, BIDIRECTIONAL);
session_.ActivateStream(absl::WrapUnique(stream_));
helper_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
}
QuicSpdyServerStreamBase* stream_ = nullptr;
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
MockQuicSpdySession session_;
};
TEST_F(QuicSpdyServerStreamBaseTest,
SendQuicRstStreamNoErrorWithEarlyResponse) {
stream_->StopReading();
if (session_.version().UsesHttp3()) {
EXPECT_CALL(session_,
MaybeSendStopSendingFrame(_, QuicResetStreamError::FromInternal(
QUIC_STREAM_NO_ERROR)))
.Times(1);
} else {
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_STREAM_NO_ERROR), _))
.Times(1);
}
QuicStreamPeer::SetFinSent(stream_);
stream_->CloseWriteSide();
}
TEST_F(QuicSpdyServerStreamBaseTest,
DoNotSendQuicRstStreamNoErrorWithRstReceived) {
EXPECT_FALSE(stream_->reading_stopped());
EXPECT_CALL(session_,
MaybeSendRstStreamFrame(
_,
QuicResetStreamError::FromInternal(
VersionHasIetfQuicFrames(session_.transport_version())
? QUIC_STREAM_CANCELLED
: QUIC_RST_ACKNOWLEDGEMENT),
_))
.Times(1);
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
stream_->OnStreamReset(rst_frame);
if (VersionHasIetfQuicFrames(session_.transport_version())) {
QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream_->id(),
QUIC_STREAM_CANCELLED);
session_.OnStopSendingFrame(stop_sending);
}
EXPECT_TRUE(stream_->reading_stopped());
EXPECT_TRUE(stream_->write_side_closed());
}
TEST_F(QuicSpdyServerStreamBaseTest, AllowExtendedConnect) {
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeader(":protocol", "webtransport");
header_list.OnHeader(":path", "/path");
header_list.OnHeader(":scheme", "http");
header_list.OnHeaderBlockEnd(128, 128);
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_EQ(GetQuicReloadableFlag(quic_act_upon_invalid_header) &&
!session_.allow_extended_connect(),
stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, AllowExtendedConnectProtocolFirst) {
QuicHeaderList header_list;
header_list.OnHeader(":protocol", "webtransport");
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeader(":path", "/path");
header_list.OnHeader(":scheme", "http");
header_list.OnHeaderBlockEnd(128, 128);
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_EQ(GetQuicReloadableFlag(quic_act_upon_invalid_header) &&
!session_.allow_extended_connect(),
stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidExtendedConnect) {
if (!session_.version().UsesHttp3()) {
return;
}
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeader(":protocol", "webtransport");
header_list.OnHeader(":scheme", "http");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, VanillaConnectAllowed) {
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeaderBlockEnd(128, 128);
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_FALSE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidVanillaConnect) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "CONNECT");
header_list.OnHeader(":scheme", "http");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidNonConnectWithProtocol) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "GET");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":path", "/path");
header_list.OnHeader(":protocol", "webtransport");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidRequestWithoutScheme) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":method", "GET");
header_list.OnHeader(":path", "/path");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidRequestWithoutAuthority) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":method", "GET");
header_list.OnHeader(":path", "/path");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidRequestWithoutMethod) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":path", "/path");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidRequestWithoutPath) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":method", "POST");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, InvalidRequestHeader) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":method", "POST");
header_list.OnHeader("invalid:header", "value");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, HostHeaderWithoutAuthority) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
SetQuicReloadableFlag(quic_allow_host_in_request2, true);
QuicHeaderList header_list;
header_list.OnHeader("host", "www.google.com:4433");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":method", "POST");
header_list.OnHeader(":path", "/path");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, HostHeaderWitDifferentAuthority) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
SetQuicReloadableFlag(quic_allow_host_in_request2, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":method", "POST");
header_list.OnHeader(":path", "/path");
header_list.OnHeader("host", "mail.google.com:4433");
header_list.OnHeaderBlockEnd(128, 128);
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_TRUE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, ValidHostHeader) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
SetQuicReloadableFlag(quic_allow_host_in_request2, true);
QuicHeaderList header_list;
header_list.OnHeader(":authority", "www.google.com:4433");
header_list.OnHeader(":scheme", "http");
header_list.OnHeader(":method", "POST");
header_list.OnHeader(":path", "/path");
header_list.OnHeader("host", "www.google.com:4433");
header_list.OnHeaderBlockEnd(128, 128);
stream_->OnStreamHeaderList(false, 0, header_list);
EXPECT_FALSE(stream_->rst_sent());
}
TEST_F(QuicSpdyServerStreamBaseTest, EmptyHeaders) {
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
quiche::HttpHeaderBlock empty_header;
quic::test::NoopQpackStreamSenderDelegate encoder_stream_sender_delegate;
NoopDecoderStreamErrorDelegate decoder_stream_error_delegate;
auto qpack_encoder = std::make_unique<quic::QpackEncoder>(
&decoder_stream_error_delegate, HuffmanEncoding::kEnabled,
CookieCrumbling::kEnabled);
qpack_encoder->set_qpack_stream_sender_delegate(
&encoder_stream_sender_delegate);
std::string payload =
qpack_encoder->EncodeHeaderList(stream_->id(), empty_header, nullptr);
std::string headers_frame_header =
quic::HttpEncoder::SerializeHeadersFrameHeader(payload.length());
EXPECT_CALL(
session_,
MaybeSendRstStreamFrame(
_, QuicResetStreamError::FromInternal(QUIC_BAD_APPLICATION_PAYLOAD),
_));
stream_->OnStreamFrame(QuicStreamFrame(
stream_->id(), true, 0, absl::StrCat(headers_frame_header, payload)));
EXPECT_TRUE(stream_->rst_sent());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_spdy_server_stream_base.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_spdy_server_stream_base_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b811e6c8-d09e-4d7a-93b9-742eb93dce0e | cpp | tensorflow/tensorflow | stochastic_cast_op | tensorflow/compiler/tf2xla/kernels/stochastic_cast_op.cc | tensorflow/core/ops/stochastic_cast_op_test.cc | #include "tensorflow/core/kernels/stochastic_cast_op.h"
#include <string>
#include "absl/status/statusor.h"
#include "tensorflow/compiler/tf2xla/kernels/random_ops_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
namespace tensorflow {
namespace {
class StochasticCastToInt : public XlaOpKernel {
static constexpr int kInputIndex = 0;
public:
explicit StochasticCastToInt(OpKernelConstruction* ctx)
: XlaOpKernel(ctx),
device_type_string_(ctx->device_type().type_string()) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("Tin", &from_type_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("Tout", &to_type_));
}
void Compile(XlaOpKernelContext* ctx) override {
TensorShape shape;
shape = ctx->InputShape(kInputIndex);
absl::StatusOr<xla::XlaOp> randoms_or = BuildUniformRandoms(
ctx, from_type_, device_type_string_, shape, xla::Zero, xla::One);
OP_REQUIRES_OK(ctx, randoms_or.status());
xla::XlaOp input = ctx->Input(kInputIndex);
if (from_type_ == DT_BFLOAT16) {
input = xla::ConvertElementType(input, xla::F32);
}
xla::XlaOp result = xla::Select(
xla::Lt(input, xla::ScalarLike(input, 0)),
xla::Floor(xla::Add(input, randoms_or.value())),
xla::Floor(xla::Sub(xla::Add(input, xla::ScalarLike(input, 1)),
randoms_or.value())));
result = xla::Select(xla::Eq(input, xla::Floor(input)), input, result);
xla::PrimitiveType to_type;
OP_REQUIRES_OK(ctx, DataTypeToPrimitiveType(to_type_, &to_type));
result = xla::ConvertElementType(result, to_type);
ctx->SetOutput(0, result);
}
private:
DataType from_type_;
DataType to_type_;
std::string device_type_string_;
};
REGISTER_XLA_OP(Name("StochasticCastToInt")
.CompileTimeConstantInput("alg")
.TypeConstraint("Tin",
{DT_DOUBLE, DT_FLOAT, DT_HALF, DT_BFLOAT16})
.TypeConstraint("Tout", {DT_INT32, DT_INT16, DT_INT8}),
StochasticCastToInt);
}
} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(StochasticCastOpTest, StochasticCastToIntShapeInference) {
ShapeInferenceTestOp op("StochasticCastToInt");
INFER_OK(op, "[4,2];[1];[1];[]", "in0");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[4,2];[1,2];[1];[]");
INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[4,2];[1];[1,2];[]");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[4,2];[1];[1];[1]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/stochastic_cast_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/stochastic_cast_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8e0291d-db3d-484b-b823-43c245721dd8 | cpp | tensorflow/tensorflow | resource_loader | third_party/xla/third_party/tsl/tsl/platform/resource_loader.cc | tensorflow/core/platform/resource_loader_test.cc | #include "tsl/platform/resource_loader.h"
#include <cstdlib>
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/test.h"
namespace tsl {
std::string GetDataDependencyFilepath(const std::string& relative_path) {
const char* srcdir = std::getenv("TEST_SRCDIR");
if (!srcdir) {
LOG(FATAL) << "Environment variable TEST_SRCDIR unset!";
}
const char* workspace = std::getenv("TEST_WORKSPACE");
if (!workspace) {
LOG(FATAL) << "Environment variable TEST_WORKSPACE unset!";
}
return kIsOpenSource
? io::JoinPath(srcdir, workspace, relative_path)
: io::JoinPath(srcdir, workspace, "third_party", relative_path);
}
} | #include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
string DataDependencyPath() {
return io::JoinPath("tensorflow", "core", "platform", "resource_loader.h");
}
TEST(ResourceLoaderTest, FindsAndOpensFile) {
string filepath = GetDataDependencyFilepath(DataDependencyPath());
Status s = Env::Default()->FileExists(filepath);
EXPECT_TRUE(s.ok()) << "No file found at this location: " << filepath;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/resource_loader.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/resource_loader_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9aff0930-974e-4a79-88e7-15f021c37799 | cpp | tensorflow/tensorflow | verifier | tensorflow/lite/core/tools/verifier.cc | tensorflow/lite/core/tools/verifier_test.cc | #include "tensorflow/lite/core/tools/verifier.h"
#include <stdarg.h>
#include <algorithm>
#include <climits>
#include <complex>
#include <cstdint>
#include <cstring>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/types/optional.h"
#include "flatbuffers/string.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/tools/verifier_internal.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
const char* NameOrEmptyString(const flatbuffers::String* str) {
if (str == nullptr || str->c_str() == nullptr) {
return "";
}
return str->c_str();
}
bool IsNullOrEmptyString(const flatbuffers::String* str) {
return strcmp(NameOrEmptyString(str), "") == 0;
}
void ReportError(ErrorReporter* error_reporter, const char* format, ...) {
if (error_reporter) {
va_list args;
va_start(args, format);
TF_LITE_REPORT_ERROR(error_reporter, format, args);
va_end(args);
}
}
uint32_t GetIntPtr(const char* ptr) {
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
return flatbuffers::EndianScalar(*reinterpret_cast<const uint32_t*>(ptr));
#else
return *reinterpret_cast<const uint32_t*>(ptr);
#endif
}
const uint32_t kMaxNumString = UINT_MAX / sizeof(int32_t) - 2;
bool VerifyStringTensorBuffer(const Tensor& tensor, const Buffer& buffer,
ErrorReporter* error_reporter) {
uint32_t buffer_size = buffer.data()->size();
if (buffer_size < sizeof(uint32_t)) {
ReportError(error_reporter, "String tensor %s is invalid (empty)",
NameOrEmptyString(tensor.name()));
return false;
}
const char* buffer_ptr = reinterpret_cast<const char*>(buffer.data()->data());
uint32_t num_strings = GetIntPtr(buffer_ptr);
if (num_strings > kMaxNumString) {
ReportError(error_reporter,
"String tensor %s has invalid num of string set: %d",
NameOrEmptyString(tensor.name()), num_strings);
return false;
}
uint32_t header_offsets =
static_cast<uint32_t>(num_strings + 2) * sizeof(int32_t);
if (buffer_size < header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer requires at least %d bytes, but is "
"allocated with %d bytes",
NameOrEmptyString(tensor.name()), header_offsets, buffer_size);
return false;
}
uint32_t prev_ptr = header_offsets;
uint32_t offset = sizeof(int32_t);
if (GetIntPtr(buffer_ptr + offset) != header_offsets) {
ReportError(error_reporter,
"String tensor %s buffer initial offset must be: %d",
NameOrEmptyString(tensor.name()), header_offsets);
return false;
}
offset += sizeof(int32_t);
for (int i = 1, end = num_strings; i <= end; i++, offset += sizeof(int32_t)) {
int string_offset = GetIntPtr(buffer_ptr + offset);
if (string_offset < static_cast<int>(prev_ptr) ||
string_offset > static_cast<int>(buffer_size)) {
ReportError(error_reporter,
"String tensor %s buffer is invalid: index %d",
NameOrEmptyString(tensor.name()), i);
return false;
}
}
if (GetIntPtr(buffer_ptr + offset - sizeof(int32_t)) != buffer_size) {
ReportError(error_reporter,
"String tensor %s buffer last offset must be %d",
NameOrEmptyString(tensor.name()), buffer_size);
return false;
}
return true;
}
bool CheckArraySegments(const DimensionMetadata* dim_metadata) {
if (dim_metadata->array_segments() == nullptr) {
return false;
}
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return (dim_metadata->array_segments_as_Int32Vector()->values() !=
nullptr);
case SparseIndexVector_Uint16Vector:
return (dim_metadata->array_segments_as_Uint16Vector()->values() !=
nullptr);
case SparseIndexVector_Uint8Vector:
return (dim_metadata->array_segments_as_Uint8Vector()->values() !=
nullptr);
default:
return false;
}
}
int GetSizeOfSegments(const DimensionMetadata* dim_metadata) {
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return dim_metadata->array_segments_as_Int32Vector()->values()->size();
case SparseIndexVector_Uint16Vector:
return dim_metadata->array_segments_as_Uint16Vector()->values()->size();
case SparseIndexVector_Uint8Vector:
return dim_metadata->array_segments_as_Uint8Vector()->values()->size();
default:
return -1;
}
}
int GetValueOfSegmentsAt(const DimensionMetadata* dim_metadata, const int i) {
switch (dim_metadata->array_segments_type()) {
case SparseIndexVector_Int32Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Int32Vector()->values()->Get(i));
case SparseIndexVector_Uint16Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Uint16Vector()->values()->Get(i));
case SparseIndexVector_Uint8Vector:
return static_cast<int>(
dim_metadata->array_segments_as_Uint8Vector()->values()->Get(i));
default:
return -1;
}
}
bool CheckArrayIndices(const DimensionMetadata* dim_metadata) {
if (dim_metadata->array_indices() == nullptr) {
return false;
}
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return (dim_metadata->array_indices_as_Int32Vector()->values() !=
nullptr);
case SparseIndexVector_Uint16Vector:
return (dim_metadata->array_indices_as_Uint16Vector()->values() !=
nullptr);
case SparseIndexVector_Uint8Vector:
return (dim_metadata->array_indices_as_Uint8Vector()->values() !=
nullptr);
default:
return false;
}
}
int GetSizeOfIndices(const DimensionMetadata* dim_metadata) {
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return dim_metadata->array_indices_as_Int32Vector()->values()->size();
case SparseIndexVector_Uint16Vector:
return dim_metadata->array_indices_as_Uint16Vector()->values()->size();
case SparseIndexVector_Uint8Vector:
return dim_metadata->array_indices_as_Uint8Vector()->values()->size();
default:
return -1;
}
}
int GetValueOfIndicesAt(const DimensionMetadata* dim_metadata, const int i) {
switch (dim_metadata->array_indices_type()) {
case SparseIndexVector_Int32Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Int32Vector()->values()->Get(i));
case SparseIndexVector_Uint16Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Uint16Vector()->values()->Get(i));
case SparseIndexVector_Uint8Vector:
return static_cast<int>(
dim_metadata->array_indices_as_Uint8Vector()->values()->Get(i));
default:
return -1;
}
return -1;
}
absl::optional<uint64_t> VerifyAndCountElements(
const SparsityParameters& sparsity, const std::vector<int>& dim_sizes) {
const int total_level = sparsity.traversal_order()->size();
uint64_t num_elements = 1;
for (int i = 0; i < total_level; i++) {
const int original_dim = sparsity.traversal_order()->Get(i);
const auto* dim_metadata = sparsity.dim_metadata()->Get(i);
if (dim_metadata->format() == DimensionType_DENSE) {
if (dim_metadata->dense_size() != dim_sizes[original_dim]) {
return absl::nullopt;
}
num_elements *= dim_metadata->dense_size();
} else {
if (!CheckArraySegments(dim_metadata) ||
!CheckArrayIndices(dim_metadata)) {
return absl::nullopt;
}
int array_segments_size = GetSizeOfSegments(dim_metadata);
int array_indices_size = GetSizeOfIndices(dim_metadata);
for (int j = 0; j < array_segments_size - 1; j++) {
if (GetValueOfSegmentsAt(dim_metadata, j) < 0 ||
GetValueOfSegmentsAt(dim_metadata, j + 1) < 0 ||
GetValueOfSegmentsAt(dim_metadata, j) >
GetValueOfSegmentsAt(dim_metadata, j + 1)) {
return absl::nullopt;
}
}
if (static_cast<int>(num_elements) != array_segments_size - 1) {
return absl::nullopt;
}
if (array_indices_size !=
GetValueOfSegmentsAt(dim_metadata, array_segments_size - 1)) {
return absl::nullopt;
}
for (int j = 0; j < array_indices_size; j++) {
if (GetValueOfIndicesAt(dim_metadata, j) < 0 ||
GetValueOfIndicesAt(dim_metadata, j) >= dim_sizes[original_dim]) {
return absl::nullopt;
}
}
num_elements = array_indices_size;
}
}
return num_elements;
}
absl::optional<uint64_t> VerifyAndCountSparseElements(const Tensor& tensor) {
const auto* sparsity = tensor.sparsity();
if (sparsity->traversal_order() == nullptr ||
sparsity->dim_metadata() == nullptr) {
return absl::nullopt;
}
const int total_dims = sparsity->traversal_order()->size();
const int original_rank = tensor.shape()->size();
const int sparsity_dim_metadata_size = sparsity->dim_metadata()->size();
if (total_dims < original_rank || sparsity_dim_metadata_size != total_dims) {
return absl::nullopt;
}
const int block_rank = total_dims - original_rank;
if (block_rank > 0) {
if (sparsity->block_map() == nullptr) {
return absl::nullopt;
}
const int sparse_rank = sparsity->block_map()->size();
if (sparse_rank != block_rank) {
return absl::nullopt;
}
}
std::vector<int> traversal_order(total_dims);
for (int i = 0; i < total_dims; i++) {
traversal_order[i] = sparsity->traversal_order()->Get(i);
}
std::sort(traversal_order.begin(), traversal_order.begin() + original_rank);
for (int i = 0; i < original_rank; i++) {
if (traversal_order[i] != i) {
return absl::nullopt;
}
}
std::sort(traversal_order.begin() + original_rank, traversal_order.end());
for (int i = original_rank; i < total_dims; i++) {
if (traversal_order[i] != i) {
return absl::nullopt;
}
}
std::vector<int> expanded_dim_sizes;
expanded_dim_sizes.resize(total_dims);
for (int i = 0; i < original_rank; i++) {
expanded_dim_sizes[i] = tensor.shape()->Get(i);
}
for (int i = 0; i < block_rank; i++) {
int original_block_dim =
sparsity->traversal_order()->Get(i + original_rank);
if (original_block_dim < 0 || original_block_dim >= total_dims) {
return absl::nullopt;
}
int block_dim_size =
sparsity->dim_metadata()->Get(i + original_rank)->dense_size();
if (block_dim_size <= 0) {
return absl::nullopt;
}
expanded_dim_sizes[original_block_dim] = block_dim_size;
int mapped_block_dim = sparsity->block_map()->Get(i);
if (mapped_block_dim < 0 || mapped_block_dim >= total_dims) {
return absl::nullopt;
}
expanded_dim_sizes[mapped_block_dim] /= block_dim_size;
}
return VerifyAndCountElements(*sparsity, expanded_dim_sizes);
}
bool VerifyNumericTensorBuffer(const Tensor& tensor, const Buffer& buffer,
ErrorReporter* error_reporter) {
uint64_t bytes_required = 1;
if (!tensor.shape()) {
return true;
}
if (tensor.sparsity() != nullptr) {
const auto num_elements = VerifyAndCountSparseElements(tensor);
if (!num_elements.has_value()) {
ReportError(error_reporter, "Tensor %s has invalid sparsity parameters",
NameOrEmptyString(tensor.name()));
return false;
}
bytes_required = num_elements.value();
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
} else {
for (int dim : *tensor.shape()) {
bytes_required *= dim;
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
}
}
switch (tensor.type()) {
case TensorType_FLOAT32:
bytes_required *= sizeof(float);
break;
case TensorType_FLOAT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_BFLOAT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_FLOAT64:
bytes_required *= sizeof(double);
break;
case TensorType_INT32:
bytes_required *= sizeof(int32_t);
break;
case TensorType_UINT32:
bytes_required *= sizeof(uint32_t);
break;
case TensorType_INT4:
bytes_required *= sizeof(int8_t);
break;
case TensorType_UINT8:
bytes_required *= sizeof(uint8_t);
break;
case TensorType_INT8:
bytes_required *= sizeof(int8_t);
break;
case TensorType_INT64:
bytes_required *= sizeof(int64_t);
break;
case TensorType_UINT64:
bytes_required *= sizeof(uint64_t);
break;
case TensorType_BOOL:
bytes_required *= sizeof(bool);
break;
case TensorType_INT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_UINT16:
bytes_required *= sizeof(uint16_t);
break;
case TensorType_COMPLEX64:
bytes_required *= sizeof(std::complex<float>);
break;
case TensorType_COMPLEX128:
bytes_required *= sizeof(std::complex<double>);
break;
default:
ReportError(error_reporter, "Tensor %s invalid type: %d",
NameOrEmptyString(tensor.name()), tensor.type());
return false;
}
if (bytes_required > UINT_MAX) {
ReportError(error_reporter, "Tensor %s dimension overflow",
NameOrEmptyString(tensor.name()));
return false;
}
if (bytes_required != buffer.data()->size()) {
ReportError(
error_reporter,
"Tensor %s requires %d bytes, but is allocated with %d bytes buffer",
NameOrEmptyString(tensor.name()), bytes_required,
buffer.data()->size());
return false;
}
return true;
}
using flatbuffers::Offset;
using flatbuffers::Vector;
bool VerifyOperators(const Vector<Offset<Operator>>& operators,
ErrorReporter* error_reporter) {
for (const auto* op : operators) {
if (!op->inputs()) {
ReportError(error_reporter, "Missing 'inputs' for operator.");
return false;
}
if (!op->outputs()) {
ReportError(error_reporter, "Missing 'outputs' for operator.");
return false;
}
}
return true;
}
bool IsConstantTensor(const Tensor& tensor, const Model& model) {
if (!tensor.buffer() || !model.buffers()) return false;
if (tensor.buffer() > 0 && tensor.buffer() < model.buffers()->size()) {
auto* buffer = model.buffers()->Get(tensor.buffer());
if (buffer && buffer->data()) {
return true;
}
}
return false;
}
bool VerifySubGraphConsistency(const Model& model, const SubGraph& subgraph,
ErrorReporter* error_reporter) {
absl::flat_hash_set<int> subgraph_input_tensors, constant_tensors,
variable_tensors, output_tensors;
if (subgraph.tensors()) {
for (int i = 0, end = subgraph.tensors()->size(); i < end; ++i) {
const auto* tensor = subgraph.tensors()->Get(i);
if (IsConstantTensor(*tensor, model)) {
constant_tensors.insert(i);
} else if (tensor->is_variable()) {
variable_tensors.insert(i);
}
}
}
if (subgraph.inputs()) {
for (const int tensor_idx : *subgraph.inputs()) {
subgraph_input_tensors.insert(tensor_idx);
}
}
if (subgraph.operators()) {
for (int op_idx = 0, end = subgraph.operators()->size(); op_idx < end;
++op_idx) {
const auto* op = subgraph.operators()->Get(op_idx);
if (!model.operator_codes() ||
(op->opcode_index() >= model.operator_codes()->size())) {
ReportError(error_reporter,
"Operator %d does not exist in model op codes",
op->opcode_index());
return false;
}
const auto& opcode = model.operator_codes()->Get(op->opcode_index());
auto builtin_code = GetBuiltinCode(opcode);
for (const int input_idx : *op->inputs()) {
if (input_idx == kTfLiteOptionalTensor) continue;
if (constant_tensors.find(input_idx) == constant_tensors.end() &&
variable_tensors.find(input_idx) == variable_tensors.end() &&
subgraph_input_tensors.find(input_idx) ==
subgraph_input_tensors.end() &&
output_tensors.find(input_idx) == output_tensors.end()) {
ReportError(error_reporter,
"Input tensor %d to op %d (%s) is not produced",
input_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
}
}
for (const int output_idx : *op->outputs()) {
if (constant_tensors.find(output_idx) != constant_tensors.end()) {
ReportError(
error_reporter, "Output tensor %d to op %d (%s) is a constant",
output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
} else if (variable_tensors.find(output_idx) !=
variable_tensors.end()) {
ReportError(
error_reporter, "Output tensor %d to op %d (%s) is a variable",
output_idx, op_idx, EnumNameBuiltinOperator(builtin_code));
return false;
} else if (subgraph_input_tensors.find(output_idx) !=
subgraph_input_tensors.end()) {
ReportError(error_reporter,
"Output tensor %d to op %d (%s) is a subgraph input",
output_idx, op_idx,
EnumNameBuiltinOperator(builtin_code));
return false;
} else if (output_tensors.find(output_idx) != output_tensors.end()) {
ReportError(error_reporter,
"Output tensor %d to op %d (%s) is an output from "
"another op. There is a cycle in the graph",
output_idx, op_idx,
EnumNameBuiltinOperator(builtin_code));
return false;
}
output_tensors.insert(output_idx);
}
}
}
return true;
}
bool VerifySubGraphs(const Model& model, ErrorReporter* error_reporter) {
if (!model.subgraphs()) {
ReportError(error_reporter, "Missing 'subgraphs' section.");
return false;
}
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->operators()) {
ReportError(error_reporter, "Missing 'operators' section in subgraph.");
return false;
}
if (!VerifyOperators(*subgraph->operators(), error_reporter)) {
return false;
}
if (!VerifySubGraphConsistency(model, *subgraph, error_reporter)) {
return false;
}
}
return true;
}
bool VerifyTensors(const Model& model, ErrorReporter* error_reporter) {
if (!model.subgraphs()) {
return true;
}
if (!model.buffers()) {
ReportError(error_reporter, "Missing 'buffers' section.");
return false;
}
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->tensors()) {
continue;
}
for (const auto* tensor : *subgraph->tensors()) {
if (!tensor->buffer()) {
continue;
}
if (tensor->buffer() >= model.buffers()->size()) {
ReportError(error_reporter, "Tensor %s invalid buffer index: %d",
NameOrEmptyString(tensor->name()), tensor->buffer());
return false;
}
auto* buffer = model.buffers()->Get(tensor->buffer());
if (!buffer) {
ReportError(error_reporter, "Tensor %s buffer %d not set",
NameOrEmptyString(tensor->name()), tensor->buffer());
return false;
}
if (buffer->data()) {
if (tensor->type() == TensorType_STRING) {
if (!VerifyStringTensorBuffer(*tensor, *buffer, error_reporter)) {
return false;
}
} else {
if (!VerifyNumericTensorBuffer(*tensor, *buffer, error_reporter)) {
return false;
}
}
}
}
}
return true;
}
bool VerifyOps(const Model& model, const OpResolver& resolver,
ErrorReporter* error_reporter) {
if (!model.operator_codes()) {
return true;
}
absl::flat_hash_set<int> regular_code_indices;
absl::flat_hash_set<int> validation_code_indices;
for (const auto* subgraph : *model.subgraphs()) {
if (!subgraph->operators()) {
continue;
}
if (subgraph->name() && IsValidationSubgraph(subgraph->name()->c_str())) {
for (const auto& op : *(subgraph->operators())) {
validation_code_indices.insert(op->opcode_index());
}
} else {
for (const auto* op : *(subgraph->operators())) {
regular_code_indices.insert(op->opcode_index());
}
}
}
for (int i = 0; i < model.operator_codes()->size(); i++) {
const auto* opcode = model.operator_codes()->Get(i);
auto builtin_code = GetBuiltinCode(opcode);
if (builtin_code < BuiltinOperator_MIN ||
builtin_code > BuiltinOperator_MAX) {
ReportError(error_reporter, "Operator id '%d' is out of range.",
builtin_code);
return false;
}
if (builtin_code == BuiltinOperator_CUSTOM) {
if (IsNullOrEmptyString(opcode->custom_code())) {
ReportError(error_reporter,
"Invalid custom op name, cannot be null/empty.");
return false;
} else if (!resolver.FindOp(opcode->custom_code()->c_str(),
opcode->version())) {
if (regular_code_indices.contains(i) ||
!validation_code_indices.contains(i)) {
ReportError(error_reporter, "Unsupported custom op: %s, version: %d",
opcode->custom_code()->c_str(), opcode->version());
return false;
}
}
} else {
if (!resolver.FindOp(builtin_code, opcode->version())) {
ReportError(error_reporter, "Unsupported builtin op: %s, version: %d",
EnumNameBuiltinOperator(builtin_code), opcode->version());
return false;
}
}
}
return true;
}
bool VerifyModel(const Model* model, ErrorReporter* error_reporter) {
if (model == nullptr) {
ReportError(error_reporter, "Invalid flatbuffer format");
return false;
}
if (model->version() != TFLITE_SCHEMA_VERSION) {
ReportError(error_reporter, "Invalid model version %d", model->version());
return false;
}
if (!VerifySubGraphs(*model, error_reporter)) {
return false;
}
if (!VerifyTensors(*model, error_reporter)) {
return false;
}
return true;
}
}
bool Verify(const void* buf, size_t len, ErrorReporter* error_reporter) {
const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
return VerifyModel(model, error_reporter);
}
bool Verify(const void* buf, size_t len, const OpResolver& resolver,
ErrorReporter* error_reporter) {
const Model* model = internal::VerifyFlatBufferAndGetModel(buf, len);
if (!VerifyModel(model, error_reporter)) {
return false;
}
if (!VerifyOps(*model, resolver, error_reporter)) {
return false;
}
return true;
}
} | #include "tensorflow/lite/core/tools/verifier.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/buffer.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_conversion_utils.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include "tensorflow/lite/error_reporter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/util.h"
#include "tensorflow/lite/version.h"
namespace tflite {
namespace {
static const char* kSparseTensorTestModel =
"tensorflow/lite/testdata/sparse_tensor.bin";
}
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() : buffer_size_(0) {}
int Report(const char* format, va_list args) override {
buffer_size_ = vsnprintf(buffer_, kBufferSize, format, args);
return buffer_size_;
}
int GetBufferSize() { return buffer_size_; }
string GetAsString() const { return string(buffer_, buffer_size_); }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
int buffer_size_;
};
class TfLiteFlatbufferModelBuilder {
public:
TfLiteFlatbufferModelBuilder() {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
}
TfLiteFlatbufferModelBuilder(const std::vector<BuiltinOperator>& builtin_ops,
const std::vector<std::string>& custom_ops) {
buffers_.push_back(
CreateBuffer(builder_, builder_.CreateVector(std::vector<uint8_t>{})));
for (const auto& iter : builtin_ops) {
resolver_.AddBuiltin(iter, &fake_op_);
}
for (const auto& iter : custom_ops) {
resolver_.AddCustom(iter.data(), &fake_op_);
}
}
void AddTensor(const std::vector<int>& shape, tflite::TensorType type,
const std::vector<uint8_t>& buffer, const char* name,
const bool is_variable = false) {
int buffer_index = 0;
if (!buffer.empty()) {
buffer_index = buffers_.size();
buffers_.push_back(CreateBuffer(builder_, builder_.CreateVector(buffer)));
}
if (shape.empty()) {
tensors_.push_back(CreateTensorDirect(builder_, nullptr, type,
buffer_index, name,
0, is_variable));
return;
}
tensors_.push_back(CreateTensorDirect(builder_, &shape, type, buffer_index,
name, 0,
is_variable));
}
void AddOperator(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
tflite::BuiltinOperator builtin_op, const char* custom_op) {
operator_codes_.push_back(
CreateOperatorCodeDirect(builder_, builtin_op, custom_op));
operators_.push_back(CreateOperator(
builder_, operator_codes_.size() - 1, builder_.CreateVector(inputs),
builder_.CreateVector(outputs), BuiltinOptions_NONE,
0,
0, tflite::CustomOptionsFormat_FLEXBUFFERS));
}
enum BuilderMode {
kBuilderModeEmptyVectorIsEmpty,
kBuilderModeEmptyVectorIsNull,
kBuilderModeDefault = kBuilderModeEmptyVectorIsEmpty,
};
void FinishModel(const std::vector<int32_t>& inputs,
const std::vector<int32_t>& outputs,
BuilderMode mode = kBuilderModeDefault) {
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>({CreateSubGraph(
builder_, CreateVector(tensors_, mode), CreateVector(inputs, mode),
CreateVector(outputs, mode), CreateVector(operators_, mode),
builder_.CreateString("test_subgraph"))});
auto result = CreateModel(
builder_, TFLITE_SCHEMA_VERSION, CreateVector(operator_codes_, mode),
CreateVector(subgraph, mode), builder_.CreateString("test_model"),
CreateVector(buffers_, mode));
tflite::FinishModelBuffer(builder_, result);
}
bool Verify() {
return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
&mock_reporter_);
}
bool VerifyWithOpResolver() {
return tflite::Verify(builder_.GetBufferPointer(), builder_.GetSize(),
resolver_, &mock_reporter_);
}
string GetErrorString() { return mock_reporter_.GetAsString(); }
private:
template <typename T>
flatbuffers::Offset<flatbuffers::Vector<T>> CreateVector(
const std::vector<T>& v, BuilderMode mode) {
if (mode == kBuilderModeEmptyVectorIsNull && v.empty()) {
return 0;
}
return builder_.CreateVector(v);
}
flatbuffers::FlatBufferBuilder builder_;
MutableOpResolver resolver_;
TfLiteRegistration fake_op_{};
MockErrorReporter mock_reporter_;
std::vector<flatbuffers::Offset<Operator>> operators_;
std::vector<flatbuffers::Offset<OperatorCode>> operator_codes_;
std::vector<flatbuffers::Offset<Tensor>> tensors_;
std::vector<flatbuffers::Offset<Buffer>> buffers_;
};
TEST(VerifyModel, TestEmptyModel) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0, 0,
0, 0);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Missing 'subgraphs' section."));
}
TEST(VerifyModel, TestEmptyVector) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor({}, TensorType_UINT8, {}, "empty_vector");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {3});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
}
TEST(VerifyModel, TestSimpleModel) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TestNullTensors) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.FinishModel(
{}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(builder.GetErrorString(),
"Input tensor 0 to op 0 (CUSTOM) is not produced");
}
TEST(VerifyModel, TestNullOperators) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.FinishModel(
{0, 1}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Missing 'operators' section in subgraph"));
}
TEST(VerifyModel, TestNullInputs) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel(
{}, {2}, TfLiteFlatbufferModelBuilder::kBuilderModeEmptyVectorIsNull);
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TestCorruptedData) {
std::string model = "123";
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(model.data(), model.size(), MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Invalid flatbuffer format"));
}
TEST(VerifyModel, TestUnsupportedVersion) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, 1, 0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("Invalid model version 1"));
}
TEST(VerifyModel, TestRandomModificationIsNotAllowed) {
flatbuffers::FlatBufferBuilder builder;
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION,
0,
0, 0, 0);
::tflite::FinishModelBuffer(builder, model);
std::string model_content(reinterpret_cast<char*>(builder.GetBufferPointer()),
builder.GetSize());
for (size_t i = 0; i < model_content.size(); i++) {
model_content[i] = (model_content[i] + 137) % 255;
EXPECT_FALSE(Verify(model_content.data(), model_content.size(),
MutableOpResolver{}, DefaultErrorReporter()))
<< "Fail at position: " << i;
}
}
TEST(VerifyModel, TestIntTensorShapeIsGreaterThanBuffer) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires 6 bytes, but is "
"allocated with 4 bytes buffer"));
}
TEST(VerifyModel, TestIntTensorShapeIsSmallerThanBuffer) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2, 1}, TensorType_UINT8, {1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires 2 bytes, but is "
"allocated with 4 bytes buffer"));
}
TEST(VerifyModel, TestIntTensorShapeOverflow) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({1024, 2048, 4096}, TensorType_UINT8, {1, 2, 3, 4},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex("Tensor input dimension overflow"));
}
TEST(VerifyModel, TensorBufferIsNotValid) {
flatbuffers::FlatBufferBuilder builder;
std::vector<int> shape = {2, 3};
auto tensors = builder.CreateVector(std::vector<flatbuffers::Offset<Tensor>>{
CreateTensorDirect(builder, &shape, TensorType_INT32, 2,
"input", 0)});
auto subgraph = std::vector<flatbuffers::Offset<SubGraph>>(
{CreateSubGraph(builder, tensors, 0, 0,
0, builder.CreateString("Main"))});
auto buffers = builder.CreateVector(std::vector<flatbuffers::Offset<Buffer>>{
CreateBuffer(builder, builder.CreateVector(
std::vector<uint8_t>{1, 2, 3, 4, 5, 6})),
});
auto model = CreateModel(builder, TFLITE_SCHEMA_VERSION, 0,
builder.CreateVector(subgraph),
builder.CreateString("SmartReply"), buffers);
::tflite::FinishModelBuffer(builder, model);
MockErrorReporter mock_reporter;
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(),
MutableOpResolver{}, &mock_reporter));
EXPECT_THAT(
mock_reporter.GetAsString(),
::testing::ContainsRegex("Missing 'operators' section in subgraph."));
}
TEST(VerifyModel, StringTensorIsEmpty) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor({2}, TensorType_STRING, {0x00}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(builder.GetErrorString(), "String tensor input is invalid (empty)");
}
TEST(VerifyModel, StringTensorHasInvalidNumString) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{0x00, 0x00, 0x00, 0x20, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer requires at least -2147483640 bytes, "
"but is allocated with 18 bytes"));
}
TEST(VerifyModel, StringTensorOffsetTooSmall) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 12, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B'}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer initial offset must be: 16"));
}
TEST(VerifyModel, StringTensorOffsetOutOfRange) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 22, 0, 0, 0, 'A', 'B'}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer is invalid: index 2"));
}
TEST(VerifyModel, StringTensorIsLargerThanRequired) {
TfLiteFlatbufferModelBuilder builder;
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 18, 0, 0, 0, 'A', 'B', 'C'},
"input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"String tensor input buffer last offset must be 19"));
}
TEST(VerifyModel, AllOpsAreSupported) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output2");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.AddOperator({0, 1}, {3}, BuiltinOperator_CUSTOM, "CustomOp");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, UseUnsupportedBuiltinOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_SUB}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Unsupported builtin op: ADD, version: 1"));
}
TEST(VerifyModel, UseUnsupportedCustomOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "Not supported");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"Unsupported custom op: Not supported, version: 1"));
}
TEST(VerifyModel, UseUnnamedCustomOps) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"NewOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
EXPECT_EQ("", builder.GetErrorString());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(builder.GetErrorString(),
::testing::ContainsRegex(
"Invalid custom op name, cannot be null/empty."));
}
TEST(VerifyModel, UnpopulatedInputToOp) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({1, 2}, {3}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor({2, 3}, TensorType_UINT8, {}, "invalid_input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 2}, {3});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Input tensor 1 to op 0 (CUSTOM) is not produced",
builder.GetErrorString());
}
TEST(VerifyModel, MultipleOpsOutputToSameTensor) {
TfLiteFlatbufferModelBuilder builder({BuiltinOperator_ADD}, {"CustomOp"});
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input1");
builder.AddTensor({2, 2}, TensorType_UINT8, {1, 2, 3, 4}, "input2");
builder.AddTensor({2, 2}, TensorType_UINT8, {}, "output1");
builder.AddOperator({0, 1}, {2}, BuiltinOperator_ADD, nullptr);
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "CustomOp");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ(
"Output tensor 2 to op 1 (CUSTOM) is an output from another op. "
"There is a cycle in the graph",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsAConstantTensor) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {1, 2, 3, 4, 5, 6}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a constant",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsSubgraphInput) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1, 2}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a subgraph input",
builder.GetErrorString());
}
TEST(VerifyModel, OutputIsAVariable) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({0, 1}, {2}, BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output", true);
builder.FinishModel({0, 1}, {2});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_EQ("Output tensor 2 to op 0 (CUSTOM) is a variable",
builder.GetErrorString());
}
TEST(VerifyModel, OpWithOptionalTensor) {
TfLiteFlatbufferModelBuilder builder({}, {"test"});
builder.AddOperator({kTfLiteOptionalTensor, 0, 1}, {2},
BuiltinOperator_CUSTOM, "test");
builder.AddTensor({2, 3}, TensorType_UINT8, {1, 2, 3, 4, 5, 6}, "input");
builder.AddTensor(
{2}, TensorType_STRING,
{2, 0, 0, 0, 16, 0, 0, 0, 17, 0, 0, 0, 19, 0, 0, 0, 'A', 'B', 'C'},
"data");
builder.AddTensor({2, 3}, TensorType_INT32, {}, "output");
builder.FinishModel({0, 1}, {2});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
EXPECT_EQ("", builder.GetErrorString());
}
TEST(VerifyModel, TypedTensorShapeMismatchWithTensorBufferSize) {
TfLiteFlatbufferModelBuilder builder;
for (int tensor_type = TensorType_MIN; tensor_type <= TensorType_MAX;
++tensor_type) {
if (tensor_type == TensorType_STRING) continue;
builder.AddTensor({2, 3}, static_cast<TensorType>(tensor_type),
{1, 2, 3, 4}, "input");
builder.FinishModel({}, {});
ASSERT_FALSE(builder.Verify());
ASSERT_FALSE(builder.VerifyWithOpResolver());
EXPECT_THAT(
builder.GetErrorString(),
::testing::ContainsRegex("Tensor input requires .* bytes, but is "
"allocated with 4 bytes buffer"));
}
}
TEST(VerifyModel, TypedTensorShapeMatchesTensorBufferSize) {
TfLiteFlatbufferModelBuilder builder;
for (int tensor_type = TensorType_MIN; tensor_type <= TensorType_MAX;
++tensor_type) {
if (tensor_type == TensorType_STRING ||
tensor_type == TensorType_RESOURCE || tensor_type == TensorType_VARIANT)
continue;
TfLiteType lite_type = kTfLiteNoType;
ASSERT_EQ(ConvertTensorType(static_cast<TensorType>(tensor_type),
&lite_type, nullptr),
kTfLiteOk);
size_t size_bytes = 0;
ASSERT_EQ(GetSizeOfType(nullptr, lite_type, &size_bytes),
kTfLiteOk);
std::vector<uint8_t> buffer(size_bytes);
builder.AddTensor({1}, static_cast<TensorType>(tensor_type), buffer,
"input");
builder.FinishModel({}, {});
ASSERT_TRUE(builder.Verify());
ASSERT_TRUE(builder.VerifyWithOpResolver());
}
}
TEST(VerifyModel, SimpleValidSparseTensor) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_TRUE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
}
TEST(VerifyModel, InvalidSparseTensorMissingBlockMap) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->block_map = {};
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, InvalidSparseTensorIndexOutOfBound) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->dim_metadata[1]->array_indices.AsUint8Vector()->values[1] =
5;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, InvalidSparseTensorInvalidBuffer) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
scoped_model->buffers[1]->data = {0, 1, 2, 3, 4, 5, 6, 7};
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex(
"requires 12 bytes, but is allocated with 8 bytes buffer"));
}
TEST(VerifyModel, InvalidSparseTensorInvalidTraversalOrder) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->traversal_order[0] = 10;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_FALSE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_FALSE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
EXPECT_THAT(mock_reporter.GetAsString(),
::testing::ContainsRegex("invalid sparsity parameters"));
}
TEST(VerifyModel, ValidSparseTensorBCSC) {
const auto model = FlatBufferModel::BuildFromFile(
tensorflow::GetDataDependencyFilepath(kSparseTensorTestModel).c_str());
ASSERT_TRUE(model);
std::unique_ptr<ModelT> scoped_model;
scoped_model.reset(model->GetModel()->UnPack());
auto* tensor = scoped_model->subgraphs[0]->tensors[0].get();
tensor->sparsity->traversal_order = {1, 0, 3, 2};
tensor->sparsity->block_map = {0, 1};
tensor->sparsity->dim_metadata[0]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[0]->dense_size = 2;
tensor->sparsity->dim_metadata[1]->format = DimensionType_SPARSE_CSR;
tensor->sparsity->dim_metadata[1]->array_segments.AsUint8Vector()->values = {
0, 1, 3};
tensor->sparsity->dim_metadata[1]->array_indices.AsUint8Vector()->values = {
0, 0, 1};
tensor->sparsity->dim_metadata[2]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[2]->dense_size = 2;
tensor->sparsity->dim_metadata[3]->format = DimensionType_DENSE;
tensor->sparsity->dim_metadata[3]->dense_size = 2;
flatbuffers::FlatBufferBuilder builder;
auto model_ = Model::Pack(builder, scoped_model.get());
::tflite::FinishModelBuffer(builder, model_);
MockErrorReporter mock_reporter;
MutableOpResolver resolver;
TfLiteRegistration fake_op{};
resolver.AddCustom("FakeOp", &fake_op);
ASSERT_TRUE(
Verify(builder.GetBufferPointer(), builder.GetSize(), &mock_reporter));
ASSERT_TRUE(Verify(builder.GetBufferPointer(), builder.GetSize(), resolver,
&mock_reporter));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/core/tools/verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
36714099-ba8a-4f52-bf2e-b3079fd2dd83 | cpp | tensorflow/tensorflow | strip_unused_nodes | tensorflow/tools/graph_transforms/strip_unused_nodes.cc | tensorflow/tools/graph_transforms/strip_unused_nodes_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
namespace {
Status TypeForPlaceholder(const TransformFuncContext& context,
const string& node_name, DataType* result) {
*result = DT_FLOAT;
if (context.params.count("type")) {
if (context.params.at("type").size() != 1) {
return errors::InvalidArgument(
"You must pass no more than one default 'type' to "
"strip_unused_nodes");
}
const string& type_string = context.params.at("type")[0];
if (!DataTypeFromString(type_string, result)) {
return errors::InvalidArgument("Couldn't understand type argument '",
type_string, "'");
}
}
if (context.params.count("name") || context.params.count("type_for_name")) {
if (!context.params.count("name") ||
!context.params.count("type_for_name") ||
(context.params.at("type_for_name").size() !=
context.params.at("name").size())) {
return errors::InvalidArgument(
"You must pass a 'type_for_name' arg for every 'name', e.g. "
"strip_unused_nodes(name=foo, type_for_name=float, name=bar, "
"type_for_name=quint8");
}
const int name_count = context.params.at("name").size();
for (int i = 0; i < name_count; ++i) {
if (context.params.at("name")[i] == node_name) {
const string& type_string = context.params.at("type_for_name")[i];
if (!DataTypeFromString(type_string, result)) {
return errors::InvalidArgument("Couldn't understand type argument '",
type_string, "'");
}
}
}
}
return OkStatus();
}
Status ShapeForPlaceholder(const TransformFuncContext& context,
const string& node_name, TensorShape* result) {
*result = {};
if (context.params.count("shape")) {
if (context.params.at("shape").size() != 1) {
return errors::InvalidArgument(
"You must pass no more than one default 'shape' to "
"strip_unused_nodes");
}
const string& shape_string = context.params.at("shape")[0];
TF_RETURN_IF_ERROR(TensorShapeFromString(shape_string, result));
}
if (context.params.count("name") || context.params.count("shape_for_name")) {
if (!context.params.count("name") ||
!context.params.count("shape_for_name") ||
(context.params.at("shape_for_name").size() !=
context.params.at("name").size())) {
return errors::InvalidArgument(
"You must pass a 'shape_for_name' arg for every 'name', e.g. "
"strip_unused_nodes(name=foo, shape_for_name=\"2,2,1\", name=bar, "
"shape_for_name=\"1\"");
}
const int name_count = context.params.at("name").size();
for (int i = 0; i < name_count; ++i) {
if (context.params.at("name")[i] == node_name) {
const string& shape_string = context.params.at("shape_for_name")[i];
TF_RETURN_IF_ERROR(TensorShapeFromString(shape_string, result));
}
}
}
return OkStatus();
}
}
Status StripUnusedNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
std::set<string> required_nodes;
std::set<string> input_nodes;
for (const string& input : context.input_names) {
required_nodes.insert(NodeNameFromInput(input));
input_nodes.insert(NodeNameFromInput(input));
}
for (const string& output : context.output_names) {
required_nodes.insert(output);
}
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(input_graph_def, &node_lookup);
std::vector<string> current_inputs;
current_inputs.reserve(context.output_names.size());
for (const string& output_name : context.output_names) {
current_inputs.push_back(NodeNameFromInput(output_name));
}
while (!current_inputs.empty()) {
std::set<string> next_inputs;
for (const string& current_input : current_inputs) {
required_nodes.insert(current_input);
if (input_nodes.count(current_input)) {
continue;
}
if (!node_lookup.count(current_input)) {
return errors::InvalidArgument("Input node ", current_input,
" not found in graph");
}
const NodeDef* current_node = node_lookup[current_input];
for (const string& input_name : current_node->input()) {
string input_node_name = NodeNameFromInput(input_name);
if (!required_nodes.count(input_node_name)) {
next_inputs.insert(input_node_name);
}
}
}
current_inputs =
std::vector<string>(next_inputs.begin(), next_inputs.end());
}
GraphDef filtered_graph_def;
FilterGraphDef(input_graph_def,
[&](const NodeDef& node) {
return required_nodes.count(node.name()) > 0;
},
&filtered_graph_def);
output_graph_def->Clear();
for (const NodeDef& node : filtered_graph_def.node()) {
if (input_nodes.count(node.name())) {
NodeDef placeholder_node;
if (node.op() == "Placeholder") {
placeholder_node = node;
} else {
placeholder_node.set_op("Placeholder");
placeholder_node.set_name(node.name());
DataType type;
TF_RETURN_IF_ERROR(TypeForPlaceholder(context, node.name(), &type));
TensorShape shape;
TF_RETURN_IF_ERROR(ShapeForPlaceholder(context, node.name(), &shape));
SetNodeAttr("dtype", type, &placeholder_node);
SetNodeAttr("shape", shape, &placeholder_node);
}
*(output_graph_def->mutable_node()->Add()) = placeholder_node;
} else {
*(output_graph_def->mutable_node()->Add()) = node;
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("strip_unused_nodes", StripUnusedNodes);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status StripUnusedNodes(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class StripUnusedNodesTest : public ::testing::Test {
protected:
void TestSimpleAdd() {
GraphDef graph_def;
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("a_node");
add_node->add_input("b_node");
NodeDef* a_node = graph_def.add_node();
a_node->set_name("a_node");
a_node->set_op("Const");
NodeDef* b_node = graph_def.add_node();
b_node->set_name("b_node");
b_node->set_op("Const");
NodeDef* c_node = graph_def.add_node();
c_node->set_name("c_node");
c_node->set_op("Const");
GraphDef result;
TF_ASSERT_OK(StripUnusedNodes(graph_def, {{}, {"add_node"}}, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ(1, node_lookup.count("a_node"));
EXPECT_EQ(1, node_lookup.count("b_node"));
EXPECT_EQ(0, node_lookup.count("c_node"));
}
void TestCommonAncestor() {
GraphDef graph_def;
NodeDef* add_node1 = graph_def.add_node();
add_node1->set_name("add_node1");
add_node1->set_op("Add");
add_node1->add_input("add_node2");
add_node1->add_input("add_node3");
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node1");
add_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* dangling_input = graph_def.add_node();
dangling_input->set_name("dangling_input");
dangling_input->set_op("Const");
NodeDef* add_node4 = graph_def.add_node();
add_node4->set_name("add_node4");
add_node4->set_op("Add");
add_node4->add_input("add_node2");
add_node4->add_input("add_node3");
GraphDef result;
TF_ASSERT_OK(StripUnusedNodes(
graph_def, {{"dangling_input"}, {"add_node1"}}, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node1"));
EXPECT_EQ(1, node_lookup.count("add_node2"));
EXPECT_EQ(1, node_lookup.count("add_node3"));
EXPECT_EQ(0, node_lookup.count("add_node4"));
EXPECT_EQ(1, node_lookup.count("const_node1"));
EXPECT_EQ(1, node_lookup.count("const_node2"));
EXPECT_EQ(1, node_lookup.count("const_node3"));
EXPECT_EQ(0, node_lookup.count("const_node4"));
EXPECT_EQ(1, node_lookup.count("dangling_input"));
}
void TestSimplePlaceholder() {
GraphDef graph_def;
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("mul_node");
add_node->add_input("a_node");
NodeDef* mul_node = graph_def.add_node();
mul_node->set_name("mul_node");
mul_node->set_op("Mul");
mul_node->add_input("b_node");
mul_node->add_input("c_node");
NodeDef* a_node = graph_def.add_node();
a_node->set_name("a_node");
a_node->set_op("Const");
NodeDef* b_node = graph_def.add_node();
b_node->set_name("b_node");
b_node->set_op("Const");
NodeDef* c_node = graph_def.add_node();
c_node->set_name("c_node");
c_node->set_op("Const");
GraphDef result;
TF_ASSERT_OK(
StripUnusedNodes(graph_def, {{"mul_node"}, {"add_node"}}, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ(1, node_lookup.count("mul_node"));
EXPECT_EQ("Placeholder", node_lookup["mul_node"]->op());
EXPECT_EQ(DT_FLOAT, node_lookup["mul_node"]->attr().at("dtype").type());
EXPECT_EQ(TensorShape({}),
TensorShape(node_lookup["mul_node"]->attr().at("shape").shape()));
EXPECT_EQ(1, node_lookup.count("a_node"));
EXPECT_EQ(0, node_lookup.count("b_node"));
EXPECT_EQ(0, node_lookup.count("c_node"));
}
void TestPlaceholderDefaultArgs() {
GraphDef graph_def;
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("mul_node");
add_node->add_input("a_node");
NodeDef* mul_node = graph_def.add_node();
mul_node->set_name("mul_node");
mul_node->set_op("Mul");
mul_node->add_input("b_node");
mul_node->add_input("c_node");
NodeDef* a_node = graph_def.add_node();
a_node->set_name("a_node");
a_node->set_op("Const");
NodeDef* b_node = graph_def.add_node();
b_node->set_name("b_node");
b_node->set_op("Const");
NodeDef* c_node = graph_def.add_node();
c_node->set_name("c_node");
c_node->set_op("Const");
GraphDef result;
TF_ASSERT_OK(StripUnusedNodes(graph_def,
{{"mul_node"},
{"add_node"},
{{"type", {"int32"}}, {"shape", {"1,2,3"}}}},
&result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ(1, node_lookup.count("mul_node"));
EXPECT_EQ("Placeholder", node_lookup["mul_node"]->op());
EXPECT_EQ(DT_INT32, node_lookup["mul_node"]->attr().at("dtype").type());
EXPECT_EQ(TensorShape({1, 2, 3}),
TensorShape(node_lookup["mul_node"]->attr().at("shape").shape()));
EXPECT_EQ(1, node_lookup.count("a_node"));
EXPECT_EQ(0, node_lookup.count("b_node"));
EXPECT_EQ(0, node_lookup.count("c_node"));
}
void TestPlaceholderNamedArgs() {
GraphDef graph_def;
NodeDef* add_node = graph_def.add_node();
add_node->set_name("add_node");
add_node->set_op("Add");
add_node->add_input("mul_node");
add_node->add_input("a_node");
NodeDef* mul_node = graph_def.add_node();
mul_node->set_name("mul_node");
mul_node->set_op("Mul");
mul_node->add_input("b_node");
mul_node->add_input("c_node");
NodeDef* a_node = graph_def.add_node();
a_node->set_name("a_node");
a_node->set_op("Const");
NodeDef* b_node = graph_def.add_node();
b_node->set_name("b_node");
b_node->set_op("Const");
NodeDef* c_node = graph_def.add_node();
c_node->set_name("c_node");
c_node->set_op("Const");
GraphDef result;
TF_ASSERT_OK(StripUnusedNodes(graph_def,
{{"mul_node", "a_node"},
{"add_node"},
{{"name", {"a_node", "mul_node"}},
{"type_for_name", {"int64", "quint8"}},
{"shape_for_name", {"1,2", "1, 2, 3"}}}},
&result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("add_node"));
EXPECT_EQ(1, node_lookup.count("mul_node"));
EXPECT_EQ("Placeholder", node_lookup["mul_node"]->op());
EXPECT_EQ(DT_QUINT8, node_lookup["mul_node"]->attr().at("dtype").type());
EXPECT_EQ(TensorShape({1, 2, 3}),
TensorShape(node_lookup["mul_node"]->attr().at("shape").shape()));
EXPECT_EQ(1, node_lookup.count("a_node"));
EXPECT_EQ("Placeholder", node_lookup["a_node"]->op());
EXPECT_EQ(DT_INT64, node_lookup["a_node"]->attr().at("dtype").type());
EXPECT_EQ(TensorShape({1, 2}),
TensorShape(node_lookup["a_node"]->attr().at("shape").shape()));
EXPECT_EQ(0, node_lookup.count("b_node"));
EXPECT_EQ(0, node_lookup.count("c_node"));
}
};
TEST_F(StripUnusedNodesTest, TestSimpleAdd) { TestSimpleAdd(); }
TEST_F(StripUnusedNodesTest, TestCommonAncestor) { TestCommonAncestor(); }
TEST_F(StripUnusedNodesTest, TestSimplePlaceholder) { TestSimplePlaceholder(); }
TEST_F(StripUnusedNodesTest, TestPlaceholderDefaultArgs) {
TestPlaceholderDefaultArgs();
}
TEST_F(StripUnusedNodesTest, TestPlaceholderNamedArgs) {
TestPlaceholderNamedArgs();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/strip_unused_nodes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/strip_unused_nodes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fb09fbe9-6296-4eab-b4c4-62ab42ce86f5 | cpp | tensorflow/tensorflow | reduce_decomposer | third_party/xla/xla/service/reduce_decomposer.cc | third_party/xla/xla/service/reduce_decomposer_test.cc | #include "xla/service/reduce_decomposer.h"
#include <functional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/hlo_creation_utils.h"
namespace xla {
namespace {
class VariadicReductionLayoutEqualizer : public DfsHloRewriteVisitor {
public:
absl::Status HandleReduce(HloInstruction* hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
std::vector<HloInstruction*> new_inputs;
bool changed = false;
for (HloInstruction* input : reduce->inputs()) {
auto first_input = reduce->inputs()[0];
auto first_input_s = first_input->shape();
auto input_s = input->shape();
if (first_input_s.layout() != input_s.layout()) {
Shape new_input_s = ShapeUtil::MakeShapeWithDenseLayout(
input_s.element_type(), input_s.dimensions(),
first_input_s.layout().minor_to_major());
auto copy = MakeCopyHlo(input, new_input_s);
changed = true;
new_inputs.push_back(copy);
} else {
new_inputs.push_back(input);
}
}
if (changed) {
TF_ASSIGN_OR_RETURN(
auto new_reduce,
MakeReduceHlo(new_inputs, reduce->init_values(), reduce->dimensions(),
reduce->called_computations()[0]));
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, new_reduce));
}
return absl::OkStatus();
}
};
class ReduceDecomposerVisitor : public DfsHloRewriteVisitor {
public:
explicit ReduceDecomposerVisitor(HloPredicate custom_layout_allowed)
: custom_layout_allowed_(std::move(custom_layout_allowed)) {}
absl::Status HandleReduce(HloInstruction* hlo) override {
auto reduce = Cast<HloReduceInstruction>(hlo);
auto shape = reduce->shape();
if (custom_layout_allowed_ && custom_layout_allowed_(reduce)) {
return absl::OkStatus();
}
std::vector<Shape> expected_shapes(reduce->input_count());
for (int i = 0; i < reduce->input_count(); i++) {
expected_shapes[i] = ExpectedOutputShape(reduce, i);
TF_RET_CHECK(reduce->inputs()[i]->shape().layout() ==
reduce->inputs()[0]->shape().layout());
}
std::vector<Shape> output_shapes;
if (shape.IsTuple()) {
for (int i = 0; i < shape.tuple_shapes_size(); i++) {
output_shapes.push_back(ShapeUtil::GetTupleElementShape(shape, i));
TF_RET_CHECK(output_shapes[i].layout() == output_shapes[0].layout());
}
} else {
output_shapes.push_back(shape);
}
TF_RET_CHECK(!output_shapes.empty());
if (ShapeUtil::MakeMaybeTupleShape(expected_shapes) !=
ShapeUtil::MakeMaybeTupleShape(output_shapes)) {
TF_ASSIGN_OR_RETURN(auto r_prime,
MakeReduceHlo(reduce->inputs(), reduce->init_values(),
reduce->dimensions(),
reduce->called_computations()[0]));
TF_RET_CHECK(r_prime->shape() ==
ShapeUtil::MakeMaybeTupleShape(expected_shapes));
if (!shape.IsTuple()) {
auto copy = MakeCopyHlo(r_prime, shape);
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, copy));
return absl::OkStatus();
}
std::vector<HloInstruction*> copies;
for (int i = 0; i < reduce->input_count(); i++) {
TF_ASSIGN_OR_RETURN(auto from, GetOutput(r_prime, i));
auto copy = MakeCopyHlo(from, output_shapes[i]);
copies.push_back(copy);
}
auto out = MaybeMakeTuple(copies);
TF_RETURN_IF_ERROR(ReplaceInstruction(reduce, out));
}
return absl::OkStatus();
}
private:
absl::StatusOr<HloInstruction*> GetOutput(HloInstruction* instr, int idx) {
if (instr->shape().IsTuple()) {
return MakeGetTupleElementHlo(instr, idx);
} else {
TF_RET_CHECK(idx == 0);
return instr;
}
}
Shape ExpectedOutputShape(HloReduceInstruction* reduce, int input_idx) {
Shape reduce_shape = reduce->shape();
auto output_shape = reduce_shape.IsTuple()
? reduce_shape.tuple_shapes(input_idx)
: reduce_shape;
auto* operand = reduce->inputs()[input_idx];
auto operand_shape = operand->shape();
return ShapeUtil::DeleteDimensions(reduce->dimensions(), operand_shape);
}
HloPredicate custom_layout_allowed_;
};
}
absl::StatusOr<bool> ReduceDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed1,
VariadicReductionLayoutEqualizer{}.RunOnModule(
module, execution_threads));
TF_ASSIGN_OR_RETURN(
bool changed2,
ReduceDecomposerVisitor{custom_layout_allowed_}.RunOnModule(
module, execution_threads));
return changed1 || changed2;
}
} | #include "xla/service/reduce_decomposer.h"
#include <functional>
#include <memory>
#include <optional>
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class ReduceDecomposerTest : public HloTestBase {};
TEST_F(ReduceDecomposerTest, ReducePerformsTransposition) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = add(a, b)
}
ENTRY c {
p = f32[5,3,4]{2,1,0} parameter(0)
z = f32[] constant(0)
ROOT r = f32[5,4]{0,1} reduce(p, z), dimensions={1}, to_apply=add
}
)";
RunAndFilecheckHloRewrite(
hlo,
ReduceDecomposer{[&](const HloInstruction*) {
return true;
}},
std::nullopt);
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},
R"(
)");
}
TEST_F(ReduceDecomposerTest, ReduceNaturalLayout) {
const char* hlo = R"(
HloModule module
add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT out = add(a, b)
}
ENTRY c {
p = f32[5,3,4]{2,1,0} parameter(0)
z = f32[] constant(0)
ROOT r = reduce(p, z), dimensions={1}, to_apply=add
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);
}
TEST_F(ReduceDecomposerTest, VariadicReductionWithTranspose) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{0,1,2},
u32[2,3,4]{0,1,2}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{},
R"(
)");
}
TEST_F(ReduceDecomposerTest, VariadicReductionDescendingLayout) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{3,2,1,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, std::nullopt);
}
TEST_F(ReduceDecomposerTest, VariadicReductionInputsDifferentLayout) {
const char* hlo = R"(
HloModule ReduceWithLayoutChangeVariadicDifferent
argmax {
running_max = f32[] parameter(0)
running_max_idx = u32[] parameter(1)
current_value = f32[] parameter(2)
current_value_idx = u32[] parameter(3)
current = (f32[], u32[]) tuple(running_max, running_max_idx)
potential = (f32[], u32[]) tuple(current_value, current_value_idx)
cmp_code = pred[] compare(current_value, running_max), direction=GT
new_max = f32[] select(cmp_code, current_value, running_max)
new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx)
ROOT out = (f32[], u32[]) tuple(new_max, new_idx)
}
ENTRY main {
arg0 = f32[2,3,4,1024]{3,2,1,0} parameter(0)
idxs = u32[2,3,4,1024]{2,1,3,0} parameter(1)
constant0 = f32[] constant(0)
constant1 = u32[] constant(0)
ROOT reduce0 = (
f32[2,3,4]{2,1,0},
u32[2,3,4]{2,1,0}
) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax
}
)";
RunAndFilecheckHloRewrite(hlo, ReduceDecomposer{}, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7015e367-82f3-4d91-a11d-8439c8d9ae65 | cpp | tensorflow/tensorflow | op_level_cost_estimator | tensorflow/core/grappler/costs/op_level_cost_estimator.cc | tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc | #include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/costs/cost_estimator.h"
#include "tensorflow/core/grappler/costs/op_context.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
#include "tensorflow/core/util/overflow.h"
#include "tensorflow/core/util/padding.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace grappler {
constexpr int kOpsPerMac = 2;
constexpr char kGuaranteeConst[] = "GuaranteeConst";
constexpr char kAddN[] = "AddN";
constexpr char kBitCast[] = "BitCast";
constexpr char kConcatV2[] = "ConcatV2";
constexpr char kConv2d[] = "Conv2D";
constexpr char kConv2dBackpropFilter[] = "Conv2DBackpropFilter";
constexpr char kConv2dBackpropInput[] = "Conv2DBackpropInput";
constexpr char kFusedConv2dBiasActivation[] = "FusedConv2DBiasActivation";
constexpr char kDataFormatVecPermute[] = "DataFormatVecPermute";
constexpr char kDepthToSpace[] = "DepthToSpace";
constexpr char kDepthwiseConv2dNative[] = "DepthwiseConv2dNative";
constexpr char kDepthwiseConv2dNativeBackpropFilter[] =
"DepthwiseConv2dNativeBackpropFilter";
constexpr char kDepthwiseConv2dNativeBackpropInput[] =
"DepthwiseConv2dNativeBackpropInput";
constexpr char kMatMul[] = "MatMul";
constexpr char kXlaEinsum[] = "XlaEinsum";
constexpr char kEinsum[] = "Einsum";
constexpr char kExpandDims[] = "ExpandDims";
constexpr char kFill[] = "Fill";
constexpr char kSparseMatMul[] = "SparseMatMul";
constexpr char kSparseTensorDenseMatMul[] = "SparseTensorDenseMatMul";
constexpr char kPlaceholder[] = "Placeholder";
constexpr char kIdentity[] = "Identity";
constexpr char kIdentityN[] = "IdentityN";
constexpr char kRefIdentity[] = "RefIdentity";
constexpr char kNoOp[] = "NoOp";
constexpr char kReshape[] = "Reshape";
constexpr char kSplit[] = "Split";
constexpr char kSqueeze[] = "Squeeze";
constexpr char kRecv[] = "_Recv";
constexpr char kSend[] = "_Send";
constexpr char kBatchMatMul[] = "BatchMatMul";
constexpr char kBatchMatMulV2[] = "BatchMatMulV2";
constexpr char kOneHot[] = "OneHot";
constexpr char kPack[] = "Pack";
constexpr char kRank[] = "Rank";
constexpr char kRange[] = "Range";
constexpr char kShape[] = "Shape";
constexpr char kShapeN[] = "ShapeN";
constexpr char kSize[] = "Size";
constexpr char kStopGradient[] = "StopGradient";
constexpr char kPreventGradient[] = "PreventGradient";
constexpr char kGather[] = "Gather";
constexpr char kGatherNd[] = "GatherNd";
constexpr char kGatherV2[] = "GatherV2";
constexpr char kScatterAdd[] = "ScatterAdd";
constexpr char kScatterDiv[] = "ScatterDiv";
constexpr char kScatterMax[] = "ScatterMax";
constexpr char kScatterMin[] = "ScatterMin";
constexpr char kScatterMul[] = "ScatterMul";
constexpr char kScatterSub[] = "ScatterSub";
constexpr char kScatterUpdate[] = "ScatterUpdate";
constexpr char kSlice[] = "Slice";
constexpr char kStridedSlice[] = "StridedSlice";
constexpr char kSpaceToDepth[] = "SpaceToDepth";
constexpr char kTranspose[] = "Transpose";
constexpr char kTile[] = "Tile";
constexpr char kMaxPool[] = "MaxPool";
constexpr char kMaxPoolGrad[] = "MaxPoolGrad";
constexpr char kAvgPool[] = "AvgPool";
constexpr char kAvgPoolGrad[] = "AvgPoolGrad";
constexpr char kFusedBatchNorm[] = "FusedBatchNorm";
constexpr char kFusedBatchNormGrad[] = "FusedBatchNormGrad";
constexpr char kQuantizedMatMul[] = "QuantizedMatMul";
constexpr char kQuantizedMatMulV2[] = "QuantizedMatMulV2";
constexpr char kUnpack[] = "Unpack";
constexpr char kSoftmax[] = "Softmax";
constexpr char kResizeBilinear[] = "ResizeBilinear";
constexpr char kCropAndResize[] = "CropAndResize";
constexpr char kSwitch[] = "Switch";
constexpr char kMerge[] = "Merge";
constexpr char kEnter[] = "Enter";
constexpr char kExit[] = "Exit";
constexpr char kNextIteration[] = "NextIteration";
constexpr char kConst[] = "Const";
constexpr char kVariable[] = "Variable";
constexpr char kVariableV2[] = "VariableV2";
constexpr char kAutoReloadVariable[] = "AutoReloadVariable";
constexpr char kVarHandleOp[] = "VarHandleOp";
constexpr char kVarHandlesOp[] = "_VarHandlesOp";
constexpr char kReadVariableOp[] = "ReadVariableOp";
constexpr char kReadVariablesOp[] = "_ReadVariablesOp";
constexpr char kAssignVariableOp[] = "AssignVariableOp";
constexpr char kAssignAddVariableOp[] = "AssignAddVariableOp";
constexpr char kAssignSubVariableOp[] = "AssignSubVariableOp";
static const Costs::Duration kMinComputeTime(1);
static const int64_t kMinComputeOp = 1;
namespace {
std::string GetDataFormat(const OpInfo& op_info) {
std::string data_format = "NHWC";
if (op_info.attr().find("data_format") != op_info.attr().end()) {
data_format = op_info.attr().at("data_format").s();
}
return data_format;
}
std::string GetFilterFormat(const OpInfo& op_info) {
std::string filter_format = "HWIO";
if (op_info.attr().find("filter_format") != op_info.attr().end()) {
filter_format = op_info.attr().at("filter_format").s();
}
return filter_format;
}
Padding GetPadding(const OpInfo& op_info) {
if (op_info.attr().find("padding") != op_info.attr().end() &&
op_info.attr().at("padding").s() == "VALID") {
return Padding::VALID;
}
return Padding::SAME;
}
bool IsTraining(const OpInfo& op_info) {
if (op_info.attr().find("is_training") != op_info.attr().end() &&
op_info.attr().at("is_training").b()) {
return true;
}
return false;
}
std::vector<int64_t> GetStrides(const OpInfo& op_info) {
if (op_info.attr().find("strides") != op_info.attr().end()) {
const auto strides = op_info.attr().at("strides").list().i();
DCHECK(strides.size() == 4)
<< "Attr strides is not a length-4 vector: " << op_info.DebugString();
if (strides.size() != 4) return {1, 1, 1, 1};
return {strides[0], strides[1], strides[2], strides[3]};
}
return {1, 1, 1, 1};
}
std::vector<int64_t> GetKernelSize(const OpInfo& op_info) {
if (op_info.attr().find("ksize") != op_info.attr().end()) {
const auto ksize = op_info.attr().at("ksize").list().i();
DCHECK(ksize.size() == 4)
<< "Attr ksize is not a length-4 vector: " << op_info.DebugString();
if (ksize.size() != 4) return {1, 1, 1, 1};
return {ksize[0], ksize[1], ksize[2], ksize[3]};
}
return {1, 1, 1, 1};
}
int64_t GetOutputSize(const int64_t input, const int64_t filter,
const int64_t stride, const Padding& padding) {
if (padding == Padding::VALID) {
return (input - filter + stride) / stride;
} else {
return (input + stride - 1) / stride;
}
}
int64_t CwiseOutputElementCount(const OpInfo& op_info) {
int max_rank = 1;
for (const OpInfo::TensorProperties& input_properties : op_info.inputs()) {
max_rank = std::max(max_rank, input_properties.shape().dim_size());
}
TensorShapeProto output_shape;
output_shape.mutable_dim()->Reserve(max_rank);
for (int i = 0; i < max_rank; ++i) {
output_shape.add_dim();
}
for (const OpInfo::TensorProperties& input_properties : op_info.inputs()) {
const TensorShapeProto& input_shape = input_properties.shape();
for (int i = input_shape.dim_size() - 1; i >= 0; --i) {
int output_shape_dim_index =
i + output_shape.dim_size() - input_shape.dim_size();
output_shape.mutable_dim(output_shape_dim_index)
->set_size(std::max(output_shape.dim(output_shape_dim_index).size(),
input_shape.dim(i).size()));
}
}
int64_t count = 1;
for (int i = 0; i < output_shape.dim_size(); i++) {
count *= output_shape.dim(i).size();
}
return count;
}
bool CheckRepeatedDimensions(const absl::string_view dim_str) {
int str_size = dim_str.size();
for (int idx = 0; idx < str_size - 1; idx++) {
if (dim_str.find(dim_str[idx], idx + 1) != std::string::npos) {
return true;
}
}
return false;
}
bool IsEinsumCorrectlyFormed(const OpContext& einsum_context) {
const auto& op_info = einsum_context.op_info;
auto it = op_info.attr().find("equation");
if (it == op_info.attr().end()) return false;
const absl::string_view equation = it->second.s();
std::vector<std::string> equation_split = absl::StrSplit(equation, "->");
if (equation_split.empty()) {
LOG(WARNING) << "Einsum with malformed equation";
return false;
}
std::vector<absl::string_view> input_split =
absl::StrSplit(equation_split[0], ',');
if (op_info.inputs_size() != 2 || equation_split.size() != 2) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
absl::string_view rhs_str = equation_split[1];
absl::string_view a_input_str = input_split[0];
absl::string_view b_input_str = input_split[1];
if (absl::StrContains(a_input_str, "...") ||
absl::StrContains(b_input_str, "...")) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", ellipsis not supported";
return false;
}
constexpr int kMatrixRank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(kMatrixRank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(kMatrixRank, b_input.shape().dim_size()),
&b_input_shape_unknown);
if (a_input_str.size() != a_input_shape.size() ||
b_input_str.size() != b_input_shape.size()) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", equation subscripts don't match tensor rank.";
return false;
}
if (CheckRepeatedDimensions(a_input_str) ||
CheckRepeatedDimensions(b_input_str) ||
CheckRepeatedDimensions(rhs_str)) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op()
<< ", Subscripts where axis appears more than once for a single "
"input are not yet supported";
return false;
}
return true;
}
}
std::vector<int64_t> MaybeGetMinimumShape(
const TensorShapeProto& original_shape, int rank,
bool* found_unknown_shapes) {
std::vector<int64_t> minimal_shape(rank, 1L);
if (original_shape.dim_size() == 0) {
*found_unknown_shapes |= original_shape.unknown_rank();
return minimal_shape;
}
*found_unknown_shapes |= original_shape.dim_size() != rank;
for (int i = 0; i < std::min(rank, original_shape.dim_size()); ++i) {
if (original_shape.dim(i).size() < 0) {
*found_unknown_shapes = true;
} else {
minimal_shape[i] = original_shape.dim(i).size();
}
}
*found_unknown_shapes |= original_shape.unknown_rank();
return minimal_shape;
}
OpLevelCostEstimator::OpLevelCostEstimator() {
typedef absl::Status (OpLevelCostEstimator::*CostImpl)(
const OpContext& op_context, NodeCosts*) const;
auto wrap = [this](CostImpl impl)
-> std::function<absl::Status(const OpContext&, NodeCosts*)> {
return [this, impl](const OpContext& op_context, NodeCosts* node_costs) {
return (this->*impl)(op_context, node_costs);
};
};
device_cost_impl_.emplace(kConv2d,
wrap(&OpLevelCostEstimator::PredictConv2D));
device_cost_impl_.emplace(
kConv2dBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter));
device_cost_impl_.emplace(
kConv2dBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput));
device_cost_impl_.emplace(
kFusedConv2dBiasActivation,
wrap(&OpLevelCostEstimator::PredictFusedConv2DBiasActivation));
device_cost_impl_.emplace(kDepthwiseConv2dNative,
wrap(&OpLevelCostEstimator::PredictConv2D));
device_cost_impl_.emplace(
kDepthwiseConv2dNativeBackpropFilter,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropFilter));
device_cost_impl_.emplace(
kDepthwiseConv2dNativeBackpropInput,
wrap(&OpLevelCostEstimator::PredictConv2DBackpropInput));
device_cost_impl_.emplace(kMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kSparseMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(
kSparseTensorDenseMatMul,
wrap(&OpLevelCostEstimator::PredictSparseTensorDenseMatMul));
device_cost_impl_.emplace(kBatchMatMul,
wrap(&OpLevelCostEstimator::PredictBatchMatMul));
device_cost_impl_.emplace(kBatchMatMulV2,
wrap(&OpLevelCostEstimator::PredictBatchMatMul));
device_cost_impl_.emplace(kQuantizedMatMul,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kQuantizedMatMulV2,
wrap(&OpLevelCostEstimator::PredictMatMul));
device_cost_impl_.emplace(kXlaEinsum,
wrap(&OpLevelCostEstimator::PredictEinsum));
device_cost_impl_.emplace(kEinsum,
wrap(&OpLevelCostEstimator::PredictEinsum));
device_cost_impl_.emplace(kNoOp, wrap(&OpLevelCostEstimator::PredictNoOp));
device_cost_impl_.emplace(kGuaranteeConst,
wrap(&OpLevelCostEstimator::PredictNoOp));
device_cost_impl_.emplace(kGather,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kGatherNd,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kGatherV2,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kScatterAdd,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterDiv,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterMax,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterMin,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterMul,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterSub,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kScatterUpdate,
wrap(&OpLevelCostEstimator::PredictScatter));
device_cost_impl_.emplace(kSlice,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kStridedSlice,
wrap(&OpLevelCostEstimator::PredictGatherOrSlice));
device_cost_impl_.emplace(kPlaceholder,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kIdentity,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kIdentityN,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kRefIdentity,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kStopGradient,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kPreventGradient,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kReshape,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kRecv,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kSend,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kSwitch,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kMerge,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kEnter,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kExit,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kNextIteration,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kBitCast,
wrap(&OpLevelCostEstimator::PredictIdentity));
device_cost_impl_.emplace(kConcatV2,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kDataFormatVecPermute,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kDepthToSpace,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kExpandDims,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kFill,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kOneHot,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kPack,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kRange,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kSpaceToDepth,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kSplit,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kSqueeze,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kTranspose,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kTile,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kUnpack,
wrap(&OpLevelCostEstimator::PredictPureMemoryOp));
device_cost_impl_.emplace(kRank,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kShape,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kShapeN,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kSize,
wrap(&OpLevelCostEstimator::PredictMetadata));
device_cost_impl_.emplace(kMaxPool,
wrap(&OpLevelCostEstimator::PredictMaxPool));
device_cost_impl_.emplace(kMaxPoolGrad,
wrap(&OpLevelCostEstimator::PredictMaxPoolGrad));
device_cost_impl_.emplace(kAvgPool,
wrap(&OpLevelCostEstimator::PredictAvgPool));
device_cost_impl_.emplace(kAvgPoolGrad,
wrap(&OpLevelCostEstimator::PredictAvgPoolGrad));
device_cost_impl_.emplace(kFusedBatchNorm,
wrap(&OpLevelCostEstimator::PredictFusedBatchNorm));
device_cost_impl_.emplace(
kFusedBatchNormGrad,
wrap(&OpLevelCostEstimator::PredictFusedBatchNormGrad));
device_cost_impl_.emplace(kSoftmax,
wrap(&OpLevelCostEstimator::PredictSoftmax));
device_cost_impl_.emplace(kResizeBilinear,
wrap(&OpLevelCostEstimator::PredictResizeBilinear));
device_cost_impl_.emplace(kCropAndResize,
wrap(&OpLevelCostEstimator::PredictCropAndResize));
device_cost_impl_.emplace(
kAssignVariableOp, wrap(&OpLevelCostEstimator::PredictAssignVariableOps));
device_cost_impl_.emplace(
kAssignAddVariableOp,
wrap(&OpLevelCostEstimator::PredictAssignVariableOps));
device_cost_impl_.emplace(
kAssignSubVariableOp,
wrap(&OpLevelCostEstimator::PredictAssignVariableOps));
device_cost_impl_.emplace(kAddN, wrap(&OpLevelCostEstimator::PredictNaryOp));
persistent_ops_ = {
kConst, kVariable, kVariableV2, kAutoReloadVariable,
kVarHandleOp, kReadVariableOp, kVarHandlesOp, kReadVariablesOp};
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
const int quantize_v2_cost =
EIGEN_COST(scalar_product_op<float>) + EIGEN_COST(scalar_max_op<float>) +
EIGEN_COST(scalar_min_op<float>) + EIGEN_COST(scalar_round_op<float>);
const int quantize_and_dequantize_v2_cost =
quantize_v2_cost + EIGEN_COST(scalar_product_op<float>);
elementwise_ops_.emplace("Acos", EIGEN_COST(scalar_acos_op<float>));
elementwise_ops_.emplace("All", EIGEN_COST(scalar_boolean_and_op<bool>));
elementwise_ops_.emplace("ArgMax", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Asin", EIGEN_COST(scalar_asin_op<float>));
elementwise_ops_.emplace("Atan", EIGEN_COST(scalar_atan_op<float>));
elementwise_ops_.emplace("Atan2", EIGEN_COST(scalar_quotient_op<float>) +
EIGEN_COST(scalar_atan_op<float>));
elementwise_ops_.emplace(
"Cast", Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<float, int16>>::Cost);
elementwise_ops_.emplace("Ceil", EIGEN_COST(scalar_ceil_op<float>));
elementwise_ops_.emplace("Cos", EIGEN_COST(scalar_cos_op<float>));
elementwise_ops_.emplace("Dequantize", EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("Erf", 1);
elementwise_ops_.emplace("Erfc", 1);
elementwise_ops_.emplace("Exp", EIGEN_COST(scalar_exp_op<float>));
elementwise_ops_.emplace("Expm1", EIGEN_COST(scalar_expm1_op<float>));
elementwise_ops_.emplace("Floor", EIGEN_COST(scalar_floor_op<float>));
elementwise_ops_.emplace("Inv", EIGEN_COST(scalar_inverse_op<float>));
elementwise_ops_.emplace("InvGrad", 1);
elementwise_ops_.emplace("Lgamma", 1);
elementwise_ops_.emplace("Log", EIGEN_COST(scalar_log_op<float>));
elementwise_ops_.emplace("Log1p", EIGEN_COST(scalar_log1p_op<float>));
elementwise_ops_.emplace("Max", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Min", EIGEN_COST(scalar_min_op<float>));
elementwise_ops_.emplace("Neg", EIGEN_COST(scalar_opposite_op<float>));
elementwise_ops_.emplace("Prod", EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("QuantizeAndDequantizeV2",
quantize_and_dequantize_v2_cost);
elementwise_ops_.emplace("QuantizeAndDequantizeV4",
quantize_and_dequantize_v2_cost);
elementwise_ops_.emplace("QuantizedSigmoid",
EIGEN_COST(scalar_logistic_op<float>));
elementwise_ops_.emplace("QuantizeV2", quantize_v2_cost);
elementwise_ops_.emplace("Reciprocal", EIGEN_COST(scalar_inverse_op<float>));
elementwise_ops_.emplace("Relu", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Relu6", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Rint", 1);
elementwise_ops_.emplace("Round", EIGEN_COST(scalar_round_op<float>));
elementwise_ops_.emplace("Rsqrt", EIGEN_COST(scalar_rsqrt_op<float>));
elementwise_ops_.emplace("Sigmoid", EIGEN_COST(scalar_logistic_op<float>));
elementwise_ops_.emplace("Sign", EIGEN_COST(scalar_sign_op<float>));
elementwise_ops_.emplace("Sin", EIGEN_COST(scalar_sin_op<float>));
elementwise_ops_.emplace("Sqrt", EIGEN_COST(scalar_sqrt_op<float>));
elementwise_ops_.emplace("Square", EIGEN_COST(scalar_square_op<float>));
elementwise_ops_.emplace("Sum", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("Tan", EIGEN_COST(scalar_tan_op<float>));
elementwise_ops_.emplace("Tanh", EIGEN_COST(scalar_tanh_op<float>));
elementwise_ops_.emplace("TopKV2", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Add", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("AddV2", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("ApproximateEqual", 1);
elementwise_ops_.emplace("BiasAdd", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("QuantizedBiasAdd",
EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("Div", EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("Equal", 1);
elementwise_ops_.emplace("FloorDiv", EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("FloorMod", EIGEN_COST(scalar_mod_op<float>));
elementwise_ops_.emplace("Greater", 1);
elementwise_ops_.emplace("GreaterEqual", 1);
elementwise_ops_.emplace("Less", 1);
elementwise_ops_.emplace("LessEqual", 1);
elementwise_ops_.emplace("LogicalAnd",
EIGEN_COST(scalar_boolean_and_op<bool>));
elementwise_ops_.emplace("LogicalNot", 1);
elementwise_ops_.emplace("LogicalOr", EIGEN_COST(scalar_boolean_or_op<bool>));
elementwise_ops_.emplace("Maximum", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Minimum", EIGEN_COST(scalar_min_op<float>));
elementwise_ops_.emplace("Mod", EIGEN_COST(scalar_mod_op<float>));
elementwise_ops_.emplace("Mul", EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("NotEqual", 1);
elementwise_ops_.emplace("QuantizedAdd", EIGEN_COST(scalar_sum_op<float>));
elementwise_ops_.emplace("QuantizedMul",
EIGEN_COST(scalar_product_op<float>));
elementwise_ops_.emplace("RealDiv", EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("ReluGrad", EIGEN_COST(scalar_max_op<float>));
elementwise_ops_.emplace("Select", EIGEN_COST(scalar_boolean_or_op<bool>));
elementwise_ops_.emplace("SelectV2", EIGEN_COST(scalar_boolean_or_op<bool>));
elementwise_ops_.emplace("SquaredDifference",
EIGEN_COST(scalar_square_op<float>) +
EIGEN_COST(scalar_difference_op<float>));
elementwise_ops_.emplace("Sub", EIGEN_COST(scalar_difference_op<float>));
elementwise_ops_.emplace("TruncateDiv",
EIGEN_COST(scalar_quotient_op<float>));
elementwise_ops_.emplace("TruncateMod", EIGEN_COST(scalar_mod_op<float>));
elementwise_ops_.emplace("Where", 1);
#undef EIGEN_COST
compute_memory_overlap_ = false;
}
Costs OpLevelCostEstimator::PredictCosts(const OpContext& op_context) const {
Costs costs;
NodeCosts node_costs;
if (PredictNodeCosts(op_context, &node_costs).ok()) {
if (node_costs.has_costs) {
return node_costs.costs;
}
if (node_costs.minimum_cost_op) {
costs.compute_time = kMinComputeTime;
costs.execution_time = kMinComputeTime;
costs.memory_time = 0;
costs.intermediate_memory_time = 0;
costs.intermediate_memory_read_time = 0;
costs.intermediate_memory_write_time = 0;
} else {
costs = PredictOpCountBasedCost(
node_costs.num_compute_ops, node_costs.num_total_read_bytes(),
node_costs.num_total_write_bytes(), op_context.op_info);
}
VLOG(1) << "Operation " << op_context.op_info.op() << " takes "
<< costs.execution_time.count() << " ns.";
costs.max_memory = node_costs.max_memory;
costs.persistent_memory = node_costs.persistent_memory;
costs.temporary_memory = node_costs.temporary_memory;
costs.inaccurate = node_costs.inaccurate;
costs.num_ops_with_unknown_shapes =
node_costs.num_nodes_with_unknown_shapes;
costs.num_ops_total = node_costs.num_nodes;
return costs;
}
LOG(WARNING) << "Error in PredictCost() for the op: "
<< op_context.op_info.ShortDebugString();
costs = Costs::ZeroCosts(true);
costs.num_ops_with_unknown_shapes = node_costs.num_nodes_with_unknown_shapes;
return costs;
}
absl::Status OpLevelCostEstimator::PredictNodeCosts(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
auto it = device_cost_impl_.find(op_info.op());
if (it != device_cost_impl_.end()) {
std::function<absl::Status(const OpContext&, NodeCosts*)> estimator =
it->second;
return estimator(op_context, node_costs);
}
if (persistent_ops_.find(op_info.op()) != persistent_ops_.end()) {
return PredictVariable(op_context, node_costs);
}
if (elementwise_ops_.find(op_info.op()) != elementwise_ops_.end()) {
return PredictCwiseOp(op_context, node_costs);
}
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
node_costs->num_nodes_with_unknown_op_type = 1;
return PredictCostOfAnUnknownOp(op_context, node_costs);
}
DeviceInfo OpLevelCostEstimator::GetDeviceInfo(
const DeviceProperties& device) const {
double gflops = -1;
double gb_per_sec = -1;
if (device.type() == "CPU") {
gflops = device.num_cores() * device.frequency() * 1e-3;
if (gb_per_sec < 0) {
if (device.bandwidth() > 0) {
gb_per_sec = device.bandwidth() / 1e6;
} else {
gb_per_sec = 32;
}
}
} else if (device.type() == "GPU") {
const auto& device_env = device.environment();
auto it = device_env.find("architecture");
if (it != device_env.end()) {
const std::string architecture = device_env.at("architecture");
int cores_per_multiprocessor;
if (architecture < "3") {
cores_per_multiprocessor = 32;
} else if (architecture < "4") {
cores_per_multiprocessor = 192;
} else if (architecture < "6") {
cores_per_multiprocessor = 128;
} else {
cores_per_multiprocessor = 64;
}
gflops = device.num_cores() * device.frequency() * 1e-3 *
cores_per_multiprocessor * kOpsPerMac;
if (device.bandwidth() > 0) {
gb_per_sec = device.bandwidth() / 1e6;
} else {
gb_per_sec = 100;
}
} else {
gflops = 100;
gb_per_sec = 12;
}
} else {
LOG_EVERY_N(WARNING, 1000) << "Unknown device type: " << device.type()
<< ", assuming PCIe between CPU and GPU.";
gflops = 1;
gb_per_sec = 12;
}
VLOG(1) << "Device: " << device.type() << " gflops: " << gflops
<< " gb_per_sec: " << gb_per_sec;
return DeviceInfo(gflops, gb_per_sec);
}
absl::Status OpLevelCostEstimator::PredictCwiseOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t op_count = CalculateLargestInputCount(op_info, &found_unknown_shapes);
if (op_info.outputs_size() > 0) {
op_count = std::max(
op_count,
CalculateTensorElementCount(op_info.outputs(0), &found_unknown_shapes));
}
if (op_info.inputs_size() >= 2) {
op_count = std::max(op_count, CwiseOutputElementCount(op_info));
}
int op_cost = 1;
auto it = elementwise_ops_.find(op_info.op());
if (it != elementwise_ops_.end()) {
op_cost = it->second;
} else {
return errors::InvalidArgument("Not a cwise op: ", op_info.op());
}
return PredictDefaultNodeCosts(op_count * op_cost, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictCostOfAnUnknownOp(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
node_costs->inaccurate = true;
return PredictDefaultNodeCosts(0, op_context, &found_unknown_shapes,
node_costs);
}
Costs OpLevelCostEstimator::PredictOpCountBasedCost(
double operations, const OpInfo& op_info) const {
bool unknown_shapes = false;
const double input_size = CalculateInputSize(op_info, &unknown_shapes);
const double output_size = CalculateOutputSize(op_info, &unknown_shapes);
Costs costs =
PredictOpCountBasedCost(operations, input_size, output_size, op_info);
costs.inaccurate = unknown_shapes;
costs.num_ops_with_unknown_shapes = unknown_shapes;
costs.max_memory = output_size;
return costs;
}
Costs OpLevelCostEstimator::PredictOpCountBasedCost(
double operations, double input_io_bytes, double output_io_bytes,
const OpInfo& op_info) const {
double total_io_bytes = input_io_bytes + output_io_bytes;
const DeviceInfo device_info = GetDeviceInfo(op_info.device());
if (device_info.gigaops <= 0 || device_info.gb_per_sec <= 0 ||
device_info.intermediate_read_gb_per_sec <= 0 ||
device_info.intermediate_write_gb_per_sec <= 0) {
VLOG(1) << "BAD DEVICE. Op:" << op_info.op()
<< " device type:" << op_info.device().type()
<< " device model:" << op_info.device().model();
}
Costs::NanoSeconds compute_cost(std::ceil(operations / device_info.gigaops));
VLOG(1) << "Op:" << op_info.op() << " GOps:" << operations / 1e9
<< " Compute Time (ns):" << compute_cost.count();
Costs::NanoSeconds memory_cost(
std::ceil(total_io_bytes / device_info.gb_per_sec));
VLOG(1) << "Op:" << op_info.op() << " Size (KB):" << (total_io_bytes) / 1e3
<< " Memory Time (ns):" << memory_cost.count();
double intermediate_read_time =
(input_io_bytes > 0)
? std::ceil(input_io_bytes / device_info.intermediate_read_gb_per_sec)
: 0;
double intermediate_write_time =
(output_io_bytes > 0)
? std::ceil(output_io_bytes /
device_info.intermediate_write_gb_per_sec)
: 0;
Costs::NanoSeconds intermediate_memory_cost =
compute_memory_overlap_
? std::max(intermediate_read_time, intermediate_write_time)
: (intermediate_read_time + intermediate_write_time);
VLOG(1) << "Op:" << op_info.op() << " Size (KB):" << (total_io_bytes) / 1e3
<< " Intermediate Memory Time (ns):"
<< intermediate_memory_cost.count();
Costs costs = Costs::ZeroCosts();
costs.compute_time = compute_cost;
costs.memory_time = memory_cost;
costs.intermediate_memory_time = intermediate_memory_cost;
costs.intermediate_memory_read_time =
Costs::NanoSeconds(intermediate_read_time);
costs.intermediate_memory_write_time =
Costs::NanoSeconds(intermediate_write_time);
CombineCostsAndUpdateExecutionTime(compute_memory_overlap_, &costs);
return costs;
}
int64_t OpLevelCostEstimator::CountConv2DOperations(
const OpInfo& op_info, bool* found_unknown_shapes) {
return CountConv2DOperations(op_info, nullptr, found_unknown_shapes);
}
OpLevelCostEstimator::ConvolutionDimensions
OpLevelCostEstimator::ConvolutionDimensionsFromInputs(
const TensorShapeProto& original_image_shape,
const TensorShapeProto& original_filter_shape, const OpInfo& op_info,
bool* found_unknown_shapes) {
VLOG(2) << "op features: " << op_info.DebugString();
VLOG(2) << "Original image shape: " << original_image_shape.DebugString();
VLOG(2) << "Original filter shape: " << original_filter_shape.DebugString();
int x_index, y_index, major_channel_index, minor_channel_index = -1;
const std::string& data_format = GetDataFormat(op_info);
if (data_format == "NCHW") {
major_channel_index = 1;
y_index = 2;
x_index = 3;
} else if (data_format == "NCHW_VECT_C") {
minor_channel_index = 1;
y_index = 2;
x_index = 3;
major_channel_index = 4;
} else {
y_index = 1;
x_index = 2;
major_channel_index = 3;
}
const std::string& filter_format = GetFilterFormat(op_info);
int filter_x_index, filter_y_index, in_major_channel_index, out_channel_index,
in_minor_channel_index = -1;
if (filter_format == "HWIO") {
filter_y_index = 0;
filter_x_index = 1;
in_major_channel_index = 2;
out_channel_index = 3;
} else if (filter_format == "OIHW_VECT_I") {
out_channel_index = 0;
in_minor_channel_index = 1;
filter_y_index = 2;
filter_x_index = 3;
in_major_channel_index = 4;
} else {
out_channel_index = 0;
in_major_channel_index = 1;
filter_y_index = 2;
filter_x_index = 3;
}
std::vector<int64_t> image_shape = MaybeGetMinimumShape(
original_image_shape, minor_channel_index >= 0 ? 5 : 4,
found_unknown_shapes);
std::vector<int64_t> filter_shape = MaybeGetMinimumShape(
original_filter_shape, in_minor_channel_index >= 0 ? 5 : 4,
found_unknown_shapes);
VLOG(2) << "Image shape: " << absl::StrJoin(image_shape, ", ");
VLOG(2) << "Filter shape: " << absl::StrJoin(filter_shape, ", ");
int64_t batch = image_shape[0];
int64_t ix = image_shape[x_index];
int64_t iy = image_shape[y_index];
int64_t iz = minor_channel_index >= 0 ? image_shape[minor_channel_index] *
image_shape[major_channel_index]
: image_shape[major_channel_index];
int64_t kx = filter_shape[filter_x_index];
int64_t ky = filter_shape[filter_y_index];
int64_t kz = in_minor_channel_index >= 0
? filter_shape[in_major_channel_index] *
filter_shape[in_minor_channel_index]
: filter_shape[in_major_channel_index];
std::vector<int64_t> strides = GetStrides(op_info);
const auto padding = GetPadding(op_info);
int64_t sx = strides[x_index];
int64_t sy = strides[y_index];
int64_t ox = GetOutputSize(ix, kx, sx, padding);
int64_t oy = GetOutputSize(iy, ky, sy, padding);
int64_t oz = filter_shape[out_channel_index];
if (iz != 1 && kz != 1) {
DCHECK_EQ(iz % kz, 0) << "Input channel " << iz
<< " is not a multiple of filter channel " << kz
<< ".";
if (iz % kz) {
*found_unknown_shapes = true;
}
} else {
iz = kz = std::max<int64_t>(iz, kz);
}
OpLevelCostEstimator::ConvolutionDimensions conv_dims = {
batch, ix, iy, iz, kx, ky, kz, oz, ox, oy, sx, sy, padding};
VLOG(1) << "Batch Size:" << batch;
VLOG(1) << "Image Dims:" << ix << "," << iy;
VLOG(1) << "Input Depth:" << iz;
VLOG(1) << "Kernel Dims:" << kx << "," << ky;
VLOG(1) << "Kernel Depth:" << kz;
VLOG(1) << "Output Dims:" << ox << "," << oy;
VLOG(1) << "Output Depth:" << oz;
VLOG(1) << "Strides:" << sx << "," << sy;
VLOG(1) << "Padding:" << (padding == Padding::VALID ? "VALID" : "SAME");
return conv_dims;
}
int64_t OpLevelCostEstimator::CountConv2DOperations(
const OpInfo& op_info, ConvolutionDimensions* conv_info,
bool* found_unknown_shapes) {
DCHECK(op_info.op() == kConv2d || op_info.op() == kDepthwiseConv2dNative)
<< "Invalid Operation: not Conv2D nor DepthwiseConv2dNative";
if (op_info.inputs_size() < 2) {
*found_unknown_shapes = true;
return 0;
}
ConvolutionDimensions conv_dims = ConvolutionDimensionsFromInputs(
op_info.inputs(0).shape(), op_info.inputs(1).shape(), op_info,
found_unknown_shapes);
int64_t ops = conv_dims.batch;
ops *= conv_dims.ox * conv_dims.oy;
ops *= conv_dims.kx * conv_dims.ky;
if (op_info.op() == kConv2d) {
ops *= conv_dims.kz * conv_dims.oz;
} else {
conv_dims.oz *= conv_dims.iz;
ops *= conv_dims.oz;
}
ops *= kOpsPerMac;
if (conv_info != nullptr) {
*conv_info = conv_dims;
}
return ops;
}
int64_t OpLevelCostEstimator::CountMatMulOperations(
const OpInfo& op_info, bool* found_unknown_shapes) {
return CountMatMulOperations(op_info, nullptr, found_unknown_shapes);
}
int64_t OpLevelCostEstimator::CountMatMulOperations(
const OpInfo& op_info, MatMulDimensions* mat_mul,
bool* found_unknown_shapes) {
bool transpose_a = false;
if (auto it = op_info.attr().find("transpose_a");
it != op_info.attr().end()) {
if (it->second.b()) transpose_a = true;
}
bool transpose_b = false;
if (auto it = op_info.attr().find("transpose_b");
it != op_info.attr().end()) {
if (it->second.b()) transpose_b = true;
}
return CountMatMulOperations(op_info, transpose_a, transpose_b, mat_mul,
found_unknown_shapes);
}
int64_t OpLevelCostEstimator::CountMatMulOperations(
const OpInfo& op_info, bool transpose_a, bool transpose_b,
MatMulDimensions* mat_mul, bool* found_unknown_shapes) {
double ops = 0;
if (op_info.inputs_size() < 2) {
LOG(ERROR) << "Need 2 inputs but got " << op_info.inputs_size();
*found_unknown_shapes = true;
return 0;
}
auto& a_matrix = op_info.inputs(0);
auto& b_matrix = op_info.inputs(1);
VLOG(1) << "transpose_a:" << transpose_a;
VLOG(1) << "transpose_b:" << transpose_b;
std::vector<int64_t> a_matrix_shape =
MaybeGetMinimumShape(a_matrix.shape(), 2, found_unknown_shapes);
std::vector<int64_t> b_matrix_shape =
MaybeGetMinimumShape(b_matrix.shape(), 2, found_unknown_shapes);
double m_dim, n_dim, k_dim, k_dim_b = 0;
if (transpose_a) {
m_dim = a_matrix_shape[1];
k_dim = a_matrix_shape[0];
} else {
m_dim = a_matrix_shape[0];
k_dim = a_matrix_shape[1];
}
if (transpose_b) {
k_dim_b = b_matrix_shape[1];
n_dim = b_matrix_shape[0];
} else {
k_dim_b = b_matrix_shape[0];
n_dim = b_matrix_shape[1];
}
VLOG(1) << "M, N, K: " << m_dim << "," << n_dim << "," << k_dim;
if (k_dim_b != 1 && k_dim != 1 && k_dim_b != k_dim) {
LOG(ERROR) << "Incompatible Matrix dimensions";
return ops;
} else {
k_dim = std::max(k_dim, k_dim_b);
}
ops = m_dim * n_dim * k_dim * 2;
VLOG(1) << "Operations for Matmul: " << ops;
if (mat_mul != nullptr) {
mat_mul->m = m_dim;
mat_mul->n = n_dim;
mat_mul->k = k_dim;
}
return ops;
}
bool OpLevelCostEstimator::GenerateBatchMatmulContextFromEinsum(
const OpContext& einsum_context, OpContext* batch_matmul_context,
bool* found_unknown_shapes) const {
if (batch_matmul_context == nullptr) {
VLOG(1) << "Output context should not be a nullptr.";
return false;
}
if (!IsEinsumCorrectlyFormed(einsum_context)) return false;
const auto& op_info = einsum_context.op_info;
std::vector<std::string> equation_split =
absl::StrSplit(op_info.attr().find("equation")->second.s(), "->");
std::vector<absl::string_view> input_split =
absl::StrSplit(equation_split[0], ',');
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
absl::string_view rhs_str = equation_split[1];
absl::string_view a_input_str = input_split[0];
absl::string_view b_input_str = input_split[1];
constexpr int kMatrixRank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(kMatrixRank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(kMatrixRank, b_input.shape().dim_size()),
&b_input_shape_unknown);
*found_unknown_shapes = a_input_shape_unknown || b_input_shape_unknown ||
(a_input.shape().dim_size() < kMatrixRank) ||
(b_input.shape().dim_size() < kMatrixRank);
OpInfo batch_matmul_op_info = op_info;
batch_matmul_op_info.mutable_inputs()->Clear();
batch_matmul_op_info.set_op("BatchMatMul");
AttrValue transpose_attribute;
transpose_attribute.set_b(false);
(*batch_matmul_op_info.mutable_attr())["transpose_a"] = transpose_attribute;
(*batch_matmul_op_info.mutable_attr())["transpose_b"] = transpose_attribute;
OpInfo::TensorProperties* a_matrix = batch_matmul_op_info.add_inputs();
TensorShapeProto* a_matrix_shape = a_matrix->mutable_shape();
a_matrix->set_dtype(a_input.dtype());
OpInfo::TensorProperties* b_matrix = batch_matmul_op_info.add_inputs();
b_matrix->set_dtype(b_input.dtype());
TensorShapeProto* b_matrix_shape = b_matrix->mutable_shape();
TensorShapeProto_Dim m_dim;
TensorShapeProto_Dim n_dim;
TensorShapeProto_Dim k_dim;
m_dim.set_size(1);
n_dim.set_size(1);
k_dim.set_size(1);
for (int i_idx = 0, a_input_str_size = a_input_str.size();
i_idx < a_input_str_size; ++i_idx) {
if (!absl::StrContains(b_input_str, a_input_str[i_idx])) {
if (!absl::StrContains(rhs_str, a_input_str[i_idx])) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
m_dim.set_size(m_dim.size() * a_input_shape[i_idx]);
continue;
} else if (!absl::StrContains(rhs_str, a_input_str[i_idx])) {
k_dim.set_size(k_dim.size() * a_input_shape[i_idx]);
continue;
}
a_matrix_shape->add_dim()->set_size(a_input_shape[i_idx]);
b_matrix_shape->add_dim()->set_size(a_input_shape[i_idx]);
}
for (int i_idx = 0, b_input_str_size = b_input_str.size();
i_idx < b_input_str_size; ++i_idx) {
if (!absl::StrContains(a_input_str, b_input_str[i_idx])) {
if (!absl::StrContains(rhs_str, b_input_str[i_idx])) {
VLOG(1) << "Missing accurate estimator for op: " << op_info.op();
return false;
}
n_dim.set_size(n_dim.size() * b_input_shape[i_idx]);
}
}
*(a_matrix_shape->add_dim()) = m_dim;
*(a_matrix_shape->add_dim()) = k_dim;
*(b_matrix_shape->add_dim()) = k_dim;
*(b_matrix_shape->add_dim()) = n_dim;
*batch_matmul_context = einsum_context;
batch_matmul_context->op_info = batch_matmul_op_info;
return true;
}
int64_t OpLevelCostEstimator::CountBatchMatMulOperations(
const OpInfo& op_info, bool* found_unknown_shapes) {
return CountBatchMatMulOperations(op_info, nullptr, found_unknown_shapes);
}
int64_t OpLevelCostEstimator::CountBatchMatMulOperations(
const OpInfo& op_info, BatchMatMulDimensions* batch_mat_mul,
bool* found_unknown_shapes) {
if (op_info.op() != kBatchMatMul && op_info.op() != kBatchMatMulV2) {
LOG(ERROR) << "Invalid Operation: " << op_info.op();
*found_unknown_shapes = true;
return 0;
}
if (op_info.inputs_size() != 2) {
LOG(ERROR) << "Expected 2 inputs but got " << op_info.inputs_size();
*found_unknown_shapes = true;
return 0;
}
double ops = 0;
const auto& a_input = op_info.inputs(0);
const auto& b_input = op_info.inputs(1);
const int matrix_rank = 2;
bool a_input_shape_unknown = false;
bool b_input_shape_unknown = false;
std::vector<int64_t> a_input_shape = MaybeGetMinimumShape(
a_input.shape(), std::max(matrix_rank, a_input.shape().dim_size()),
&a_input_shape_unknown);
std::vector<int64_t> b_input_shape = MaybeGetMinimumShape(
b_input.shape(), std::max(matrix_rank, b_input.shape().dim_size()),
&b_input_shape_unknown);
*found_unknown_shapes = a_input_shape_unknown || b_input_shape_unknown ||
(a_input.shape().dim_size() < matrix_rank) ||
(b_input.shape().dim_size() < matrix_rank);
std::vector<int64_t>* bigger_rank_shape = &a_input_shape;
std::vector<int64_t>* smaller_rank_shape = &b_input_shape;
if (b_input_shape.size() > a_input_shape.size()) {
bigger_rank_shape = &b_input_shape;
smaller_rank_shape = &a_input_shape;
}
int num_matmuls = 1;
for (int b_i = 0,
s_i = smaller_rank_shape->size() - bigger_rank_shape->size();
b_i < bigger_rank_shape->size() - matrix_rank; ++b_i, ++s_i) {
int b_dim = (*bigger_rank_shape)[b_i];
int s_dim = 1;
if (s_i >= 0) {
s_dim = (*smaller_rank_shape)[s_i];
}
if (batch_mat_mul != nullptr) {
batch_mat_mul->batch_dims.push_back(s_dim);
}
num_matmuls *= std::max(b_dim, s_dim);
}
OpInfo matmul_op_info;
matmul_op_info.set_op("MatMul");
bool transpose_a = false;
bool transpose_b = false;
if (auto it = op_info.attr().find("adj_x"); it != op_info.attr().end()) {
transpose_a = it->second.b();
} else if (auto it = op_info.attr().find("transpose_a");
it != op_info.attr().end()) {
transpose_a = it->second.b();
}
if (auto it = op_info.attr().find("adj_y"); it != op_info.attr().end()) {
transpose_b = it->second.b();
} else if (auto it = op_info.attr().find("transpose_b");
it != op_info.attr().end()) {
transpose_b = it->second.b();
}
OpInfo::TensorProperties* a_matrix = matmul_op_info.add_inputs();
a_matrix->set_dtype(a_input.dtype());
TensorShapeProto* a_matrix_shape = a_matrix->mutable_shape();
for (int i = std::max<int>(0, a_input_shape.size() - matrix_rank);
i < a_input_shape.size(); ++i) {
a_matrix_shape->add_dim()->set_size(a_input_shape[i]);
}
OpInfo::TensorProperties* b_matrix = matmul_op_info.add_inputs();
b_matrix->set_dtype(b_input.dtype());
TensorShapeProto* b_matrix_shape = b_matrix->mutable_shape();
for (int i = std::max<int>(0, b_input_shape.size() - matrix_rank);
i < b_input_shape.size(); ++i) {
b_matrix_shape->add_dim()->set_size(b_input_shape[i]);
}
if (batch_mat_mul != nullptr) {
batch_mat_mul->matmul_dims.m = (transpose_a)
? a_matrix_shape->dim(1).size()
: a_matrix_shape->dim(0).size();
batch_mat_mul->matmul_dims.k = (transpose_a)
? a_matrix_shape->dim(0).size()
: a_matrix_shape->dim(1).size();
batch_mat_mul->matmul_dims.n = (transpose_b)
? b_matrix_shape->dim(0).size()
: b_matrix_shape->dim(1).size();
}
ops += num_matmuls * CountMatMulOperations(matmul_op_info, transpose_a,
transpose_b, nullptr,
found_unknown_shapes);
return ops;
}
bool GetTensorShapeProtoFromTensorProto(const TensorProto& tensor_proto,
TensorShapeProto* tensor_shape_proto) {
tensor_shape_proto->Clear();
Tensor tensor(tensor_proto.dtype());
if (!tensor.FromProto(tensor_proto)) {
LOG(WARNING) << "GetTensorShapeProtoFromTensorProto() -- "
<< "failed to parse TensorProto: "
<< tensor_proto.DebugString();
return false;
}
if (tensor.dims() != 1) {
LOG(WARNING) << "GetTensorShapeProtoFromTensorProto() -- "
<< "tensor is not 1D: " << tensor.dims();
return false;
}
TensorProto temp_tensor;
tensor.AsProtoField(&temp_tensor);
#define TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(type) \
do { \
for (const auto& value : temp_tensor.type##_val()) { \
tensor_shape_proto->add_dim()->set_size(value); \
} \
} while (0)
if (tensor.dtype() == DT_INT32 || tensor.dtype() == DT_INT16 ||
tensor.dtype() == DT_INT8 || tensor.dtype() == DT_UINT8) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(int);
} else if (tensor.dtype() == DT_INT64) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(int64);
} else if (tensor.dtype() == DT_UINT32) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(uint32);
} else if (tensor.dtype() == DT_UINT64) {
TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO(uint64);
} else {
LOG(WARNING) << "GetTensorShapeProtoFromTensorProto() -- "
<< "Unsupported dtype: " << tensor.dtype();
return false;
}
#undef TENSOR_VALUES_TO_TENSOR_SHAPE_PROTO
return true;
}
int64_t OpLevelCostEstimator::CountConv2DBackpropInputOperations(
const OpInfo& op_info, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes) {
int64_t ops = 0;
DCHECK(op_info.op() == kConv2dBackpropInput ||
op_info.op() == kDepthwiseConv2dNativeBackpropInput)
<< "Invalid Operation: not kConv2dBackpropInput nor"
"kDepthwiseConv2dNativeBackpropInput";
if (op_info.inputs_size() < 2) {
*found_unknown_shapes = true;
return ops;
}
TensorShapeProto input_shape;
bool shape_found = false;
if (op_info.inputs(0).has_value()) {
const TensorProto& value = op_info.inputs(0).value();
shape_found = GetTensorShapeProtoFromTensorProto(value, &input_shape);
}
if (!shape_found && op_info.outputs_size() == 1) {
input_shape = op_info.outputs(0).shape();
shape_found = true;
}
if (!shape_found) {
input_shape.Clear();
for (int i = 0; i < 4; ++i) {
input_shape.add_dim()->set_size(1);
}
*found_unknown_shapes = true;
}
ConvolutionDimensions conv_dims = ConvolutionDimensionsFromInputs(
input_shape, op_info.inputs(1).shape(), op_info, found_unknown_shapes);
ops = conv_dims.batch;
ops *= conv_dims.ox * conv_dims.oy;
ops *= conv_dims.kx * conv_dims.ky;
if (op_info.op() == kConv2dBackpropInput) {
ops *= conv_dims.kz * conv_dims.oz;
} else {
conv_dims.oz *= conv_dims.iz;
ops *= conv_dims.oz;
}
ops *= kOpsPerMac;
VLOG(1) << "Operations for" << op_info.op() << " " << ops;
if (returned_conv_dims != nullptr) {
*returned_conv_dims = conv_dims;
}
return ops;
}
int64_t OpLevelCostEstimator::CountConv2DBackpropFilterOperations(
const OpInfo& op_info, ConvolutionDimensions* returned_conv_dims,
bool* found_unknown_shapes) {
int64_t ops = 0;
DCHECK(op_info.op() == kConv2dBackpropFilter ||
op_info.op() == kDepthwiseConv2dNativeBackpropFilter)
<< "Invalid Operation: not kConv2dBackpropFilter nor"
"kDepthwiseConv2dNativeBackpropFilter";
TensorShapeProto filter_shape;
bool shape_found = false;
if (op_info.inputs_size() >= 2 && op_info.inputs(1).has_value()) {
const TensorProto& value = op_info.inputs(1).value();
shape_found = GetTensorShapeProtoFromTensorProto(value, &filter_shape);
}
if (!shape_found && op_info.outputs_size() == 1) {
filter_shape = op_info.outputs(0).shape();
shape_found = true;
}
if (!shape_found) {
filter_shape.Clear();
for (int i = 0; i < 4; ++i) {
filter_shape.add_dim()->set_size(1);
}
*found_unknown_shapes = true;
}
if (op_info.inputs_size() < 1) {
*found_unknown_shapes = true;
return ops;
}
ConvolutionDimensions conv_dims = ConvolutionDimensionsFromInputs(
op_info.inputs(0).shape(), filter_shape, op_info, found_unknown_shapes);
ops = conv_dims.batch;
ops *= conv_dims.ox * conv_dims.oy;
ops *= conv_dims.kx * conv_dims.ky;
if (op_info.op() == kConv2dBackpropFilter) {
ops *= conv_dims.kz * conv_dims.oz;
} else {
conv_dims.oz *= conv_dims.iz;
ops *= conv_dims.oz;
}
ops *= kOpsPerMac;
VLOG(1) << "Operations for" << op_info.op() << " " << ops;
if (returned_conv_dims != nullptr) {
*returned_conv_dims = conv_dims;
}
return ops;
}
int64_t OpLevelCostEstimator::CalculateTensorElementCount(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) {
VLOG(2) << " with " << DataTypeString(tensor.dtype()) << " tensor of shape "
<< tensor.shape().DebugString();
int64_t tensor_size = 1;
int num_dims = std::max(1, tensor.shape().dim_size());
auto tensor_shape =
MaybeGetMinimumShape(tensor.shape(), num_dims, found_unknown_shapes);
for (int64_t dim : tensor_shape) {
int64_t new_tensor_size = MultiplyWithoutOverflow(tensor_size, dim);
if (new_tensor_size < 0) {
VLOG(1) << "Overflow encountered when computing element count of a "
"tensor, multiplying "
<< tensor_size << " with " << dim;
return -1;
}
tensor_size = new_tensor_size;
}
return tensor_size;
}
int64_t OpLevelCostEstimator::CalculateTensorSize(
const OpInfo::TensorProperties& tensor, bool* found_unknown_shapes) {
int64_t count = CalculateTensorElementCount(tensor, found_unknown_shapes);
int size = DataTypeSize(BaseType(tensor.dtype()));
VLOG(2) << "Count: " << count << " DataTypeSize: " << size;
int64_t tensor_size = MultiplyWithoutOverflow(count, size);
if (tensor_size < 0) {
VLOG(1) << "Overflow encountered when computing tensor size, multiplying "
<< count << " with " << size;
return -1;
}
return tensor_size;
}
int64_t OpLevelCostEstimator::CalculateInputSize(const OpInfo& op_info,
bool* found_unknown_shapes) {
int64_t total_input_size = 0;
for (auto& input : op_info.inputs()) {
int64_t input_size = CalculateTensorSize(input, found_unknown_shapes);
total_input_size += input_size;
VLOG(1) << "Input Size: " << input_size
<< " Total Input Size:" << total_input_size;
}
return total_input_size;
}
std::vector<int64_t> OpLevelCostEstimator::CalculateInputTensorSize(
const OpInfo& op_info, bool* found_unknown_shapes) {
std::vector<int64_t> input_tensor_size;
input_tensor_size.reserve(op_info.inputs().size());
for (auto& input : op_info.inputs()) {
input_tensor_size.push_back(
CalculateTensorSize(input, found_unknown_shapes));
}
return input_tensor_size;
}
int64_t OpLevelCostEstimator::CalculateLargestInputCount(
const OpInfo& op_info, bool* found_unknown_shapes) {
int64_t largest_input_count = 0;
for (auto& input : op_info.inputs()) {
int64_t input_count =
CalculateTensorElementCount(input, found_unknown_shapes);
if (input_count > largest_input_count) {
largest_input_count = input_count;
}
VLOG(1) << "Input Count: " << input_count
<< " Largest Input Count:" << largest_input_count;
}
return largest_input_count;
}
int64_t OpLevelCostEstimator::CalculateOutputSize(const OpInfo& op_info,
bool* found_unknown_shapes) {
int64_t total_output_size = 0;
for (const auto& output : op_info.outputs()) {
DataType dt = output.dtype();
const auto& original_output_shape = output.shape();
int64_t output_size = DataTypeSize(BaseType(dt));
int num_dims = std::max(1, original_output_shape.dim_size());
std::vector<int64_t> output_shape = MaybeGetMinimumShape(
original_output_shape, num_dims, found_unknown_shapes);
for (int64_t dim : output_shape) {
int64_t new_output_size = MultiplyWithoutOverflow(output_size, dim);
if (new_output_size < 0) {
VLOG(1) << "Overflow encountered when estimating cost, multiplying "
<< output_size << " with " << dim;
return -1;
}
output_size = new_output_size;
}
total_output_size += output_size;
VLOG(1) << "Output Size: " << output_size
<< " Total Output Size:" << total_output_size;
}
return total_output_size;
}
std::vector<int64_t> OpLevelCostEstimator::CalculateOutputTensorSize(
const OpInfo& op_info, bool* found_unknown_shapes) {
std::vector<int64_t> output_tensor_size;
output_tensor_size.reserve(op_info.outputs().size());
for (const auto& output : op_info.outputs()) {
DataType dt = output.dtype();
const auto& original_output_shape = output.shape();
int64_t output_size = DataTypeSize(BaseType(dt));
int num_dims = std::max(1, original_output_shape.dim_size());
auto output_shape = MaybeGetMinimumShape(original_output_shape, num_dims,
found_unknown_shapes);
for (int64_t dim : output_shape) {
int64_t new_output_size = MultiplyWithoutOverflow(output_size, dim);
if (new_output_size < 0) {
VLOG(1) << "Overflow encountered when estimating cost, multiplying "
<< output_size << " with " << dim;
}
output_size = new_output_size;
}
output_tensor_size.push_back(output_size);
}
return output_tensor_size;
}
absl::Status OpLevelCostEstimator::PredictDefaultNodeCosts(
const int64_t num_compute_ops, const OpContext& op_context,
bool* found_unknown_shapes, NodeCosts* node_costs) {
const auto& op_info = op_context.op_info;
node_costs->num_compute_ops = num_compute_ops;
node_costs->num_input_bytes_accessed =
CalculateInputTensorSize(op_info, found_unknown_shapes);
node_costs->num_output_bytes_accessed =
CalculateOutputTensorSize(op_info, found_unknown_shapes);
node_costs->max_memory = node_costs->num_total_output_bytes();
if (*found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
bool HasZeroDim(const OpInfo& op_info) {
for (int i = 0; i < op_info.inputs_size(); ++i) {
const auto& input = op_info.inputs(i);
for (int j = 0; j < input.shape().dim_size(); ++j) {
const auto& dim = input.shape().dim(j);
if (dim.size() == 0) {
VLOG(1) << "Convolution config has zero dim "
<< op_info.ShortDebugString();
return true;
}
}
}
return false;
}
absl::Status OpLevelCostEstimator::PredictConv2D(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
if (HasZeroDim(op_info)) {
node_costs->num_nodes_with_unknown_shapes = 1;
return errors::InvalidArgument("Conv2D op includes zero dimension: ",
op_info.ShortDebugString());
}
bool found_unknown_shapes = false;
int64_t num_compute_ops =
CountConv2DOperations(op_info, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictConv2DBackpropInput(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
if (HasZeroDim(op_info)) {
node_costs->num_nodes_with_unknown_shapes = 1;
return errors::InvalidArgument(
"Conv2DBackpropInput op includes zero dimension",
op_info.ShortDebugString());
}
bool found_unknown_shapes = false;
int64_t num_compute_ops = CountConv2DBackpropInputOperations(
op_info, nullptr, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictConv2DBackpropFilter(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
if (HasZeroDim(op_info)) {
node_costs->num_nodes_with_unknown_shapes = 1;
return errors::InvalidArgument(
"Conv2DBackpropFilter op includes zero dimension",
op_info.ShortDebugString());
}
bool found_unknown_shapes = false;
int64_t num_compute_ops = CountConv2DBackpropFilterOperations(
op_info, nullptr, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictFusedConv2DBiasActivation(
const OpContext& op_context, NodeCosts* node_costs) const {
std::string data_format = GetDataFormat(op_context.op_info);
if (data_format != "NCHW" && data_format != "NHWC" &&
data_format != "NCHW_VECT_C") {
return errors::InvalidArgument(
"Unsupported data format (", data_format,
") for op: ", op_context.op_info.ShortDebugString());
}
std::string filter_format = GetFilterFormat(op_context.op_info);
if (filter_format != "HWIO" && filter_format != "OIHW" &&
filter_format != "OIHW_VECT_I") {
return errors::InvalidArgument(
"Unsupported filter format (", filter_format,
") for op: ", op_context.op_info.ShortDebugString());
}
auto& conv_input = op_context.op_info.inputs(0);
auto& filter = op_context.op_info.inputs(1);
auto& side_input = op_context.op_info.inputs(3);
auto& conv_input_scale = op_context.op_info.inputs(4);
auto& side_input_scale = op_context.op_info.inputs(5);
bool found_unknown_shapes = false;
auto dims = ConvolutionDimensionsFromInputs(
conv_input.shape(), filter.shape(), op_context.op_info,
&found_unknown_shapes);
OpInfo::TensorProperties output;
if (data_format == "NCHW" || data_format == "NCHW_VECT_C") {
output = DescribeTensor(DT_FLOAT, {dims.batch, dims.oz, dims.oy, dims.ox});
} else if (data_format == "NHWC") {
output = DescribeTensor(DT_FLOAT, {dims.batch, dims.oy, dims.ox, dims.oz});
}
std::vector<OpContext> component_ops = {
FusedChildContext(op_context, "Conv2D", output, {conv_input, filter}),
FusedChildContext(op_context, "Mul", output, {output, conv_input_scale}),
FusedChildContext(
op_context, "BiasAdd", output,
{output, output}),
FusedChildContext(op_context, "Relu", output, {output})};
if (side_input.shape().dim_size() > 0) {
component_ops.push_back(FusedChildContext(op_context, "Mul", side_input,
{side_input, side_input_scale}));
component_ops.push_back(FusedChildContext(
op_context, "Add", output,
{output, output}));
}
auto op_context_with_output = op_context;
op_context_with_output.op_info.mutable_outputs()->Clear();
*op_context_with_output.op_info.mutable_outputs()->Add() = output;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return PredictFusedOp(op_context_with_output, component_ops, node_costs);
}
absl::Status OpLevelCostEstimator::PredictMatMul(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t num_compute_ops =
CountMatMulOperations(op_info, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictEinsum(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
auto it = op_info.attr().find("equation");
if (it == op_info.attr().end()) {
return errors::InvalidArgument("Einsum op doesn't have equation attr: ",
op_info.ShortDebugString());
}
OpContext batch_matmul_op_context;
bool found_unknown_shapes = false;
bool success = GenerateBatchMatmulContextFromEinsum(
op_context, &batch_matmul_op_context, &found_unknown_shapes);
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
if (!success) {
return PredictCostOfAnUnknownOp(op_context, node_costs);
}
return PredictNodeCosts(batch_matmul_op_context, node_costs);
}
absl::Status OpLevelCostEstimator::PredictSparseTensorDenseMatMul(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t num_elems_in_a =
CalculateTensorElementCount(op_info.inputs(1), &found_unknown_shapes);
auto b_matrix = op_info.inputs(3);
auto b_matrix_shape =
MaybeGetMinimumShape(b_matrix.shape(), 2, &found_unknown_shapes);
int64_t n_dim = b_matrix_shape[1];
const int64_t op_count = kOpsPerMac * num_elems_in_a * n_dim;
int64_t a_indices_input_size =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
int64_t a_values_input_size =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
int64_t a_shape_input_size =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
int64_t b_input_size =
num_elems_in_a * n_dim * DataTypeSize(BaseType(b_matrix.dtype()));
int64_t output_size = CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_compute_ops = op_count;
node_costs->num_input_bytes_accessed = {a_indices_input_size,
a_values_input_size,
a_shape_input_size, b_input_size};
node_costs->num_output_bytes_accessed = {output_size};
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictNoOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
VLOG(1) << "Op:" << op_info.op() << " Execution Time 0 (ns)";
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictPureMemoryOp(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
node_costs->num_nodes_with_pure_memory_op = 1;
return PredictDefaultNodeCosts(0, op_context, &found_unknown_shapes,
node_costs);
}
absl::Status OpLevelCostEstimator::PredictIdentity(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
VLOG(1) << "Op:" << op_info.op() << " Minimum cost for Identity";
node_costs->minimum_cost_op = true;
node_costs->num_compute_ops = kMinComputeOp;
node_costs->num_input_bytes_accessed = {0};
node_costs->num_output_bytes_accessed = {0};
bool inaccurate = false;
node_costs->max_memory = CalculateOutputSize(op_info, &inaccurate);
if (inaccurate) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictVariable(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
VLOG(1) << "Op:" << op_info.op() << " Minimum cost for Variable";
node_costs->minimum_cost_op = true;
node_costs->num_compute_ops = kMinComputeOp;
node_costs->num_input_bytes_accessed = {0};
node_costs->num_output_bytes_accessed = {0};
bool inaccurate = false;
node_costs->persistent_memory = CalculateOutputSize(op_info, &inaccurate);
if (inaccurate) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictBatchMatMul(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t num_compute_ops =
CountBatchMatMulOperations(op_info, &found_unknown_shapes);
return PredictDefaultNodeCosts(num_compute_ops, op_context,
&found_unknown_shapes, node_costs);
}
absl::Status OpLevelCostEstimator::PredictMetadata(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
node_costs->minimum_cost_op = true;
node_costs->num_compute_ops = kMinComputeOp;
node_costs->num_input_bytes_accessed = {0};
node_costs->num_output_bytes_accessed = {0};
bool inaccurate = false;
node_costs->max_memory = CalculateOutputSize(op_info, &inaccurate);
if (inaccurate) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictGatherOrSlice(
const OpContext& op_context, NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
const int inputs_needed = op_info.op() == "Slice" ? 3 : 2;
if (op_info.outputs_size() == 0 || op_info.inputs_size() < inputs_needed) {
return errors::InvalidArgument(
op_info.op(),
" Op doesn't have valid input / output: ", op_info.ShortDebugString());
}
bool unknown_shapes = false;
const int64_t op_count =
CalculateTensorElementCount(op_info.outputs(0), &unknown_shapes);
node_costs->num_compute_ops = op_count;
const int64_t output_size = CalculateOutputSize(op_info, &unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->num_input_bytes_accessed.reserve(op_info.inputs().size());
int64_t input_size = output_size;
node_costs->num_input_bytes_accessed.push_back(input_size);
int begin_input_index = 1;
int end_input_index;
if (op_info.op() == "Slice") {
end_input_index = 3;
} else if (op_info.op() == "StridedSlice") {
end_input_index = 4;
} else {
end_input_index = 2;
}
for (int i = begin_input_index; i < end_input_index; ++i) {
node_costs->num_input_bytes_accessed.push_back(
CalculateTensorElementCount(op_info.inputs(i), &unknown_shapes));
}
if (unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictScatter(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
const int64_t num_indices =
CalculateTensorElementCount(op_info.inputs(1), &found_unknown_shapes);
int64_t num_elems_in_ref_per_index = 1;
std::vector<int64_t> ref_tensor_shape = MaybeGetMinimumShape(
op_info.inputs(0).shape(), op_info.inputs(0).shape().dim_size(),
&found_unknown_shapes);
for (int i = 1; i < ref_tensor_shape.size(); ++i) {
num_elems_in_ref_per_index *= ref_tensor_shape[i];
}
const int64_t op_count = num_indices * num_elems_in_ref_per_index;
node_costs->num_compute_ops = op_count;
int64_t ref_input_size =
op_count * DataTypeSize(BaseType(op_info.inputs(0).dtype()));
int64_t indices_input_size =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
int64_t updates_input_size =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {ref_input_size, indices_input_size,
updates_input_size};
int64_t output_size =
op_count * DataTypeSize(BaseType(op_info.outputs(0).dtype()));
node_costs->num_output_bytes_accessed = {output_size};
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictFusedOp(
const OpContext& op_context,
const std::vector<OpContext>& fused_op_contexts,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
absl::Status s =
PredictDefaultNodeCosts(0, op_context, &found_unknown_shapes, node_costs);
for (auto& fused_op : fused_op_contexts) {
NodeCosts fused_node_costs;
s.Update(PredictNodeCosts(fused_op, &fused_node_costs));
node_costs->num_compute_ops += fused_node_costs.num_compute_ops;
node_costs->inaccurate |= fused_node_costs.inaccurate;
node_costs->num_nodes_with_unknown_shapes |=
fused_node_costs.num_nodes_with_unknown_shapes;
node_costs->num_nodes_with_unknown_op_type |=
fused_node_costs.num_nodes_with_unknown_op_type;
node_costs->num_nodes_with_pure_memory_op |=
fused_node_costs.num_nodes_with_pure_memory_op;
}
return absl::OkStatus();
}
OpContext OpLevelCostEstimator::FusedChildContext(
const OpContext& parent, const std::string& op_name,
const OpInfo::TensorProperties& output,
const std::vector<OpInfo::TensorProperties>& inputs) {
OpContext new_context;
new_context.name = op_name;
new_context.device_name = parent.device_name;
new_context.op_info = parent.op_info;
new_context.op_info.set_op(op_name);
new_context.op_info.mutable_inputs()->Clear();
for (const auto& input : inputs) {
*new_context.op_info.mutable_inputs()->Add() = input;
}
new_context.op_info.mutable_outputs()->Clear();
*new_context.op_info.mutable_outputs()->Add() = output;
return new_context;
}
OpInfo::TensorProperties OpLevelCostEstimator::DescribeTensor(
DataType type, const std::vector<int64_t>& dims) {
OpInfo::TensorProperties ret;
ret.set_dtype(type);
auto shape = ret.mutable_shape();
for (const int dim : dims) {
shape->add_dim()->set_size(dim);
}
return ret;
}
absl::StatusOr<OpLevelCostEstimator::ConvolutionDimensions>
OpLevelCostEstimator::OpDimensionsFromInputs(
const TensorShapeProto& original_image_shape, const OpInfo& op_info,
bool* found_unknown_shapes) {
VLOG(2) << "op features: " << op_info.DebugString();
VLOG(2) << "Original image shape: " << original_image_shape.DebugString();
*found_unknown_shapes = false;
auto image_shape =
MaybeGetMinimumShape(original_image_shape, 4, found_unknown_shapes);
VLOG(2) << "Image shape: " << absl::StrJoin(image_shape, ", ");
int x_index, y_index, channel_index;
const std::string& data_format = GetDataFormat(op_info);
if (data_format == "NCHW") {
channel_index = 1;
y_index = 2;
x_index = 3;
} else {
y_index = 1;
x_index = 2;
channel_index = 3;
}
int64_t batch = image_shape[0];
int64_t ix = image_shape[x_index];
int64_t iy = image_shape[y_index];
int64_t iz = image_shape[channel_index];
std::vector<int64_t> ksize = GetKernelSize(op_info);
int64_t kx = ksize[x_index];
int64_t ky = ksize[y_index];
int64_t kz = iz;
std::vector<int64_t> strides = GetStrides(op_info);
int64_t sx = strides[x_index];
int64_t sy = strides[y_index];
if (sx == 0 || sy == 0) {
return errors::InvalidArgument(
"Stride must be > 0 for Height and Width, but got (", sy, ", ", sx,
")");
}
const auto padding = GetPadding(op_info);
int64_t ox = GetOutputSize(ix, kx, sx, padding);
int64_t oy = GetOutputSize(iy, ky, sy, padding);
int64_t oz = iz;
OpLevelCostEstimator::ConvolutionDimensions conv_dims = {
batch, ix, iy, iz, kx, ky, kz, oz, ox, oy, sx, sy, padding};
return conv_dims;
}
absl::Status OpLevelCostEstimator::PredictMaxPool(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
int per_output_ops = dims.kx * dims.ky == 1 ? 1 : dims.kx * dims.ky - 1;
int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * per_output_ops;
node_costs->num_compute_ops = ops;
int64_t input_size = 0;
if (dims.ky >= dims.sy) {
input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
} else {
const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype()));
input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz;
}
node_costs->num_input_bytes_accessed = {input_size};
const int64_t output_size =
CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictMaxPoolGrad(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
if (op_info.inputs_size() < 3) {
return errors::InvalidArgument("MaxPoolGrad op has invalid inputs: ",
op_info.ShortDebugString());
}
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
int64_t ops = 0;
if (dims.kx == 1 && dims.ky == 1) {
ops = dims.batch * dims.ix * dims.iy * dims.iz;
} else if (dims.kx <= dims.sx && dims.ky <= dims.sy) {
ops = dims.batch * dims.iz *
(dims.ox * dims.oy * (dims.kx * dims.ky - 1) + dims.ix * dims.iy);
} else {
ops = dims.batch * dims.iz *
(dims.ox * dims.oy * (dims.kx * dims.ky - 1) + dims.ix * dims.iy * 2);
}
node_costs->num_compute_ops = ops;
const int64_t input0_size =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
const int64_t input2_size =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {input0_size, 0, input2_size};
const int64_t output_size =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictAssignVariableOps(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
if (op_info.inputs_size() != 2) {
return errors::InvalidArgument("AssignVariable op has invalid input: ",
op_info.ShortDebugString());
}
const int64_t ops = op_info.op() == kAssignVariableOp
? 0
: CalculateTensorElementCount(op_info.inputs(1),
&found_unknown_shapes);
node_costs->num_compute_ops = ops;
const int64_t input_size = CalculateInputSize(op_info, &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {input_size};
node_costs->num_output_bytes_accessed = {0};
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictAvgPool(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
int64_t ops = dims.batch * dims.ox * dims.oy * dims.oz * dims.kx * dims.ky;
node_costs->num_compute_ops = ops;
int64_t input_size;
if (dims.ky >= dims.sy) {
input_size = CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
} else {
const auto data_size = DataTypeSize(BaseType(op_info.inputs(0).dtype()));
input_size = data_size * dims.batch * dims.ix * dims.ky * dims.oy * dims.iz;
}
node_costs->num_input_bytes_accessed = {input_size};
const int64_t output_size =
CalculateOutputSize(op_info, &found_unknown_shapes);
node_costs->num_output_bytes_accessed = {output_size};
node_costs->max_memory = output_size;
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictAvgPoolGrad(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
bool shape_found = false;
TensorShapeProto x_shape;
if (op_info.inputs_size() >= 1 && op_info.inputs(0).has_value()) {
const TensorProto& value = op_info.inputs(0).value();
shape_found = GetTensorShapeProtoFromTensorProto(value, &x_shape);
}
if (!shape_found && op_info.outputs_size() > 0) {
x_shape = op_info.outputs(0).shape();
shape_found = true;
}
if (!shape_found) {
x_shape.Clear();
for (int i = 0; i < 4; ++i) {
x_shape.add_dim()->set_size(1);
}
found_unknown_shapes = true;
}
TF_ASSIGN_OR_RETURN(
ConvolutionDimensions dims,
OpDimensionsFromInputs(x_shape, op_info, &found_unknown_shapes));
int64_t ops = 0;
if (dims.kx <= dims.sx && dims.ky <= dims.sy) {
ops = dims.batch * dims.iz * (dims.ix * dims.iy + dims.ox * dims.oy);
} else {
ops = dims.batch * dims.iz *
(dims.ix * dims.iy + dims.ox * dims.oy * (dims.kx * dims.ky + 1));
}
auto s = PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
node_costs->max_memory = node_costs->num_total_output_bytes();
return s;
}
absl::Status OpLevelCostEstimator::PredictFusedBatchNorm(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(0).shape(), op_info,
&found_unknown_shapes));
const bool is_training = IsTraining(op_info);
int64_t ops = 0;
const auto rsqrt_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_rsqrt_op<float>>::Cost;
if (is_training) {
ops = dims.iz * (dims.batch * dims.ix * dims.iy * 4 + 6 + rsqrt_cost);
} else {
ops = dims.batch * dims.ix * dims.iy * dims.iz * 2;
}
node_costs->num_compute_ops = ops;
const int64_t size_nhwc =
CalculateTensorSize(op_info.inputs(0), &found_unknown_shapes);
const int64_t size_c =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
if (is_training) {
node_costs->num_input_bytes_accessed = {size_nhwc, size_c, size_c};
node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c, size_c,
size_c};
node_costs->internal_read_bytes = size_nhwc;
} else {
node_costs->num_input_bytes_accessed = {size_nhwc, size_c, size_c, size_c,
size_c};
node_costs->num_output_bytes_accessed = {size_nhwc};
}
node_costs->max_memory = node_costs->num_total_output_bytes();
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictFusedBatchNormGrad(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto& op_info = op_context.op_info;
TF_ASSIGN_OR_RETURN(ConvolutionDimensions dims,
OpDimensionsFromInputs(op_info.inputs(1).shape(), op_info,
&found_unknown_shapes));
int64_t ops = 0;
const auto rsqrt_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_rsqrt_op<float>>::Cost;
ops = dims.iz * (dims.batch * dims.ix * dims.iy * 11 + 5 + rsqrt_cost);
node_costs->num_compute_ops = ops;
const int64_t size_nhwc =
CalculateTensorSize(op_info.inputs(1), &found_unknown_shapes);
const int64_t size_c =
CalculateTensorSize(op_info.inputs(2), &found_unknown_shapes);
node_costs->num_input_bytes_accessed = {size_nhwc, size_nhwc, size_c, size_c};
node_costs->num_output_bytes_accessed = {size_nhwc, size_c, size_c};
node_costs->internal_read_bytes = size_nhwc;
node_costs->max_memory = node_costs->num_total_output_bytes();
if (found_unknown_shapes) {
node_costs->inaccurate = true;
node_costs->num_nodes_with_unknown_shapes = 1;
}
return absl::OkStatus();
}
absl::Status OpLevelCostEstimator::PredictNaryOp(const OpContext& op_context,
NodeCosts* node_costs) const {
const auto& op_info = op_context.op_info;
bool found_unknown_shapes = false;
int64_t op_count = CalculateLargestInputCount(op_info, &found_unknown_shapes);
if (op_info.outputs_size() > 0) {
op_count = std::max(
op_count,
CalculateTensorElementCount(op_info.outputs(0), &found_unknown_shapes));
}
if (op_info.inputs_size() >= 2) {
op_count = std::max(op_count, CwiseOutputElementCount(op_info));
}
op_count *= op_info.inputs_size() - 1;
const auto sum_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_sum_op<float>>::Cost;
return PredictDefaultNodeCosts(op_count * sum_cost, op_context,
&found_unknown_shapes, node_costs);
}
int64_t OpLevelCostEstimator::GetSoftmaxComputeOps(
const OpContext& op_context) const {
bool found_unknown_shapes = false;
const int64_t logits_size = CalculateTensorElementCount(
op_context.op_info.inputs(0), &found_unknown_shapes);
TensorShapeProto logits_shape = op_context.op_info.inputs(0).shape();
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
int64_t ops =
(EIGEN_COST(scalar_exp_op<float>) + EIGEN_COST(scalar_sum_op<float>) +
EIGEN_COST(scalar_product_op<float>)) *
logits_size +
EIGEN_COST(scalar_inverse_op<float>) * logits_shape.dim(0).size();
#undef EIGEN_COST
return ops;
}
absl::Status OpLevelCostEstimator::PredictSoftmax(const OpContext& op_context,
NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
TensorShapeProto logits_shape = op_context.op_info.inputs(0).shape();
if (logits_shape.unknown_rank() || logits_shape.dim_size() == 0) {
return errors::InvalidArgument("Softmax op has invalid input: ",
op_context.op_info.ShortDebugString());
}
int64_t ops = GetSoftmaxComputeOps(op_context);
return PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
}
absl::Status OpLevelCostEstimator::PredictResizeBilinear(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
if (op_context.op_info.outputs().empty() ||
op_context.op_info.inputs().empty()) {
return errors::InvalidArgument(
"ResizeBilinear op has invalid input / output ",
op_context.op_info.ShortDebugString());
}
const int64_t output_elements = CalculateTensorElementCount(
op_context.op_info.outputs(0), &found_unknown_shapes);
const auto half_pixel_centers =
op_context.op_info.attr().find("half_pixel_centers");
bool use_half_pixel_centers = false;
if (half_pixel_centers == op_context.op_info.attr().end()) {
LOG(WARNING) << "half_pixel_centers attr not set for ResizeBilinear.";
return PredictCostOfAnUnknownOp(op_context, node_costs);
} else {
use_half_pixel_centers = half_pixel_centers->second.b();
}
int64_t ops = 0;
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
const auto sub_cost_float = EIGEN_COST(scalar_difference_op<float>);
const auto sub_cost_int = EIGEN_COST(scalar_difference_op<int64_t>);
const auto add_cost = EIGEN_COST(scalar_sum_op<float>);
const auto mul_cost = EIGEN_COST(scalar_product_op<float>);
const auto floor_cost = EIGEN_COST(scalar_floor_op<float>);
const auto max_cost = EIGEN_COST(scalar_max_op<int64_t>);
const auto min_cost = EIGEN_COST(scalar_min_op<int64_t>);
const auto cast_to_int_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<float, int64_t>>::Cost;
const auto cast_to_float_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<int64_t, float>>::Cost;
const auto ceil_cost = EIGEN_COST(scalar_ceil_op<float>);
#undef EIGEN_COST
const std::vector<int64_t> output_shape = MaybeGetMinimumShape(
op_context.op_info.outputs(0).shape(), 4, &found_unknown_shapes);
const int64_t output_height = output_shape[1];
const int64_t output_width = output_shape[2];
int64_t interp_weight_cost = floor_cost + max_cost + min_cost +
sub_cost_float + sub_cost_int + ceil_cost +
cast_to_int_cost * 2;
if (use_half_pixel_centers) {
interp_weight_cost +=
add_cost + mul_cost + sub_cost_float + cast_to_float_cost;
} else {
interp_weight_cost += cast_to_float_cost + mul_cost;
}
ops += interp_weight_cost * (output_height + output_width);
ops += (add_cost * 3 + sub_cost_float * 3 + mul_cost * 3) * output_elements;
return PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
}
absl::Status OpLevelCostEstimator::PredictCropAndResize(
const OpContext& op_context, NodeCosts* node_costs) const {
bool found_unknown_shapes = false;
const auto method = op_context.op_info.attr().find("method");
std::optional<bool> use_bilinear_interp;
if (method == op_context.op_info.attr().end() ||
method->second.s() == "bilinear") {
use_bilinear_interp = true;
} else if (method->second.s() == "nearest") {
use_bilinear_interp = false;
}
if (!use_bilinear_interp.has_value() ||
op_context.op_info.outputs().empty()) {
LOG(WARNING) << "method attr in CropAndResize invalid; expected bilinear "
"or nearest.";
return PredictCostOfAnUnknownOp(op_context, node_costs);
}
const int64_t num_boxes = op_context.op_info.inputs(1).shape().dim(0).size();
const std::vector<int64_t> crop_shape = MaybeGetMinimumShape(
op_context.op_info.outputs(0).shape(), 4, &found_unknown_shapes);
const int64_t crop_height = crop_shape[1];
const int64_t crop_width = crop_shape[2];
const int64_t output_elements = CalculateTensorElementCount(
op_context.op_info.outputs(0), &found_unknown_shapes);
#define EIGEN_COST(X) Eigen::internal::functor_traits<Eigen::internal::X>::Cost
const auto sub_cost = EIGEN_COST(scalar_difference_op<float>);
const auto add_cost = EIGEN_COST(scalar_sum_op<float>);
const auto mul_cost = EIGEN_COST(scalar_product_op<float>);
auto div_cost = EIGEN_COST(scalar_div_cost<float>);
const auto floor_cost = EIGEN_COST(scalar_floor_op<float>);
const auto ceil_cost = EIGEN_COST(scalar_ceil_op<float>);
auto round_cost = EIGEN_COST(scalar_round_op<float>);
const auto cast_to_float_cost = Eigen::internal::functor_traits<
Eigen::internal::scalar_cast_op<int64_t, float>>::Cost;
#undef EIGEN_COST
int64_t crop_area = MultiplyWithoutOverflow(crop_height, crop_width);
if (crop_area < 0)
return errors::InvalidArgument("Cannot estimate cost, multiplying ",
crop_height, " with ", crop_width,
" would overflow");
int64_t crop_volume = MultiplyWithoutOverflow(crop_area, num_boxes);
if (crop_volume < 0)
return errors::InvalidArgument("Cannot estimate cost, multiplying ",
crop_area, " with ", num_boxes,
" would overflow");
int64_t crop_depth = MultiplyWithoutOverflow(crop_height, num_boxes);
if (crop_depth < 0)
return errors::InvalidArgument("Cannot estimate cost, multiplying ",
crop_height, " with ", num_boxes,
" would overflow");
int64_t ops = (sub_cost * 6 + mul_cost * 2 + div_cost * 2) * num_boxes;
ops += (mul_cost * 2 + sub_cost + add_cost) * crop_depth;
ops += (mul_cost * 2 + sub_cost + add_cost) * crop_volume;
if (*use_bilinear_interp) {
ops += (floor_cost + ceil_cost + sub_cost) * crop_depth;
ops += (floor_cost + ceil_cost + sub_cost) * crop_volume;
ops +=
(cast_to_float_cost * 4 + add_cost * 3 + sub_cost * 3 + mul_cost * 3) *
output_elements;
} else {
ops += round_cost * 2 * crop_volume;
ops += cast_to_float_cost * output_elements;
}
return PredictDefaultNodeCosts(ops, op_context, &found_unknown_shapes,
node_costs);
}
}
} | #include "tensorflow/core/grappler/costs/op_level_cost_estimator.h"
#include <unordered_set>
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/device_properties.pb.h"
namespace tensorflow {
namespace grappler {
using ::testing::ElementsAreArray;
namespace {
class TestOpLevelCostEstimator : public OpLevelCostEstimator {
public:
TestOpLevelCostEstimator() {
compute_memory_overlap_ = true;
device_info_ = DeviceInfo();
}
~TestOpLevelCostEstimator() override {}
void SetDeviceInfo(const DeviceInfo& device_info) {
device_info_ = device_info;
}
void SetComputeMemoryOverlap(bool value) { compute_memory_overlap_ = value; }
protected:
DeviceInfo GetDeviceInfo(const DeviceProperties& device) const override {
return device_info_;
}
DeviceInfo device_info_;
};
void ExpectZeroCost(const Costs& cost) {
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.compute_time, Costs::Duration::zero());
EXPECT_EQ(cost.execution_time, Costs::Duration::zero());
EXPECT_EQ(cost.memory_time, Costs::Duration::zero());
}
void DescribeMatrix(int rows, int columns, OpInfo* op_info) {
auto input = op_info->add_inputs();
auto shape = input->mutable_shape();
auto shape_rows = shape->add_dim();
shape_rows->set_size(rows);
auto shape_columns = shape->add_dim();
shape_columns->set_size(columns);
input->set_dtype(DT_FLOAT);
}
void SetCpuDevice(OpInfo* op_info) {
auto device = op_info->mutable_device();
device->set_type("CPU");
device->set_num_cores(10);
device->set_bandwidth(10000000);
device->set_frequency(1000);
}
OpContext DescribeMatMul(int m, int n, int l, int k) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("MatMul");
DescribeMatrix(m, l, &op_context.op_info);
DescribeMatrix(k, n, &op_context.op_info);
return op_context;
}
void DescribeArbitraryRankInput(const std::vector<int>& dims, DataType dtype,
OpInfo* op_info) {
auto input = op_info->add_inputs();
input->set_dtype(dtype);
auto shape = input->mutable_shape();
for (auto d : dims) {
shape->add_dim()->set_size(d);
}
}
void DescribeArbitraryRankOutput(const std::vector<int>& dims, DataType dtype,
OpInfo* op_info) {
auto output = op_info->add_outputs();
output->set_dtype(dtype);
auto shape = output->mutable_shape();
for (auto d : dims) {
shape->add_dim()->set_size(d);
}
}
OpContext DescribeSparseTensorDenseMatMul(const int nnz_a,
const std::vector<int>& dims_b,
const std::vector<int>& dims_out) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("SparseTensorDenseMatMul");
DescribeArbitraryRankInput({nnz_a, 2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({nnz_a}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput(dims_out, DT_FLOAT, &op_context.op_info);
return op_context;
}
OpContext DescribeXlaEinsum(const std::vector<int>& dims_a,
const std::vector<int>& dims_b,
const string& equation) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("XlaEinsum");
AttrValue equation_attribute;
equation_attribute.set_s(equation);
(*op_context.op_info.mutable_attr())["equation"] = equation_attribute;
if (!dims_a.empty())
DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info);
if (!dims_b.empty())
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
return op_context;
}
OpContext DescribeEinsum(const std::vector<int>& dims_a,
const std::vector<int>& dims_b,
const string& equation) {
OpContext op_context = DescribeXlaEinsum(dims_a, dims_b, equation);
op_context.op_info.set_op("Einsum");
return op_context;
}
void DescribeDummyTensor(OpInfo::TensorProperties* tensor) {
}
void DescribeTensor1D(int dim0, OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
tensor->set_dtype(DT_FLOAT);
}
void DescribeTensor4D(int dim0, int dim1, int dim2, int dim3,
OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
shape->add_dim()->set_size(dim1);
shape->add_dim()->set_size(dim2);
shape->add_dim()->set_size(dim3);
tensor->set_dtype(DT_FLOAT);
}
void DescribeTensor5D(int dim0, int dim1, int dim2, int dim3, int dim4,
OpInfo::TensorProperties* tensor) {
auto shape = tensor->mutable_shape();
shape->add_dim()->set_size(dim0);
shape->add_dim()->set_size(dim1);
shape->add_dim()->set_size(dim2);
shape->add_dim()->set_size(dim3);
shape->add_dim()->set_size(dim4);
tensor->set_dtype(DT_FLOAT);
}
OpContext DescribeConvolution(int batch, int ix, int iy, int iz1, int iz2,
int kx, int ky, int oz) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Conv2D");
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
DescribeTensor4D(kx, ky, iz2, oz, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeDepthwiseConv2dNative(int batch, int ix, int iy, int iz1,
int iz2, int kx, int ky, int cm) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("DepthwiseConv2dNative");
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
DescribeTensor4D(kx, ky, iz2, cm, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeFusedConv2DBiasActivation(int batch, int ix, int iy, int iz1,
int iz2, int kx, int ky, int ox,
int oy, int oz, bool has_side_input,
const string& data_format,
const string& filter_format) {
const int kVecWidth = 4;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("FusedConv2DBiasActivation");
auto* attr_data_format = op_context.op_info.mutable_attr();
SetAttrValue(data_format, &(*attr_data_format)["data_format"]);
auto* attr_filter_format = op_context.op_info.mutable_attr();
SetAttrValue(filter_format, &(*attr_filter_format)["filter_format"]);
if (data_format == "NHWC") {
DescribeTensor4D(batch, ix, iy, iz1, op_context.op_info.add_inputs());
} else if (data_format == "NCHW") {
DescribeTensor4D(batch, iz1, ix, iy, op_context.op_info.add_inputs());
} else {
EXPECT_EQ(data_format, "NCHW_VECT_C");
EXPECT_EQ(iz1 % kVecWidth, 0);
DescribeTensor5D(batch, iz1 / kVecWidth, ix, iy, kVecWidth,
op_context.op_info.add_inputs());
}
if (filter_format == "HWIO") {
DescribeTensor4D(kx, ky, iz2, oz, op_context.op_info.add_inputs());
} else if (filter_format == "OIHW") {
DescribeTensor4D(oz, iz2, kx, ky, op_context.op_info.add_inputs());
} else {
EXPECT_EQ(filter_format, "OIHW_VECT_I");
EXPECT_EQ(iz2 % kVecWidth, 0);
DescribeTensor5D(oz, iz2 / kVecWidth, kx, ky, kVecWidth,
op_context.op_info.add_inputs());
}
DescribeTensor1D(oz, op_context.op_info.add_inputs());
auto side_input = op_context.op_info.add_inputs();
if (has_side_input) {
if (data_format == "NHWC") {
DescribeTensor4D(batch, ox, oy, oz, side_input);
} else if (data_format == "NCHW") {
DescribeTensor4D(batch, oz, ox, oy, side_input);
} else {
EXPECT_EQ(data_format, "NCHW_VECT_C");
EXPECT_EQ(oz % kVecWidth, 0);
DescribeTensor5D(batch, oz / kVecWidth, ox, oy, kVecWidth, side_input);
}
}
DescribeTensor1D(1, op_context.op_info.add_inputs());
DescribeTensor1D(1, op_context.op_info.add_inputs());
return op_context;
}
OpContext DescribeUnaryOp(const string& op, int size1) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_outputs());
return op_context;
}
OpContext DescribeBinaryOp(const string& op, int size1, int size2) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeTensor4D(size1, 1, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(2 * size1, size2, 1, 1, op_context.op_info.add_inputs());
DescribeTensor4D(2 * size1, size2, 1, 1, op_context.op_info.add_outputs());
return op_context;
}
OpContext DescribeBiasAdd(int size1, int size2) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("BiasAdd");
DescribeTensor4D(1, 1, size2, size1, op_context.op_info.add_inputs());
DescribeTensor1D(size1, op_context.op_info.add_inputs());
DescribeTensor4D(1, 1, size2, size1, op_context.op_info.add_outputs());
return op_context;
}
int GetOutputSize(const int x, const int k, const int s,
const string& padding) {
if (padding == "SAME") {
return (x + s - 1) / s;
} else {
return (x - k + s) / s;
}
}
std::vector<int> GetPoolingOutputSize(const std::vector<int>& input,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const string& data_format,
const string& padding) {
int h_index = 1;
int w_index = 2;
int c_index = 3;
if (data_format == "NCHW") {
h_index = 2;
w_index = 3;
c_index = 1;
}
int n = input[0];
int h = input[h_index];
int w = input[w_index];
int c = input[c_index];
int sx = strides[h_index];
int sy = strides[w_index];
int kx = ksize[h_index];
int ky = ksize[w_index];
int ho = GetOutputSize(h, kx, sx, padding);
int wo = GetOutputSize(w, ky, sy, padding);
std::vector<int> output;
if (data_format == "NHWC") {
output = {n, ho, wo, c};
} else {
output = {n, c, ho, wo};
}
return output;
}
void GetTensorProto(const DataType dtype, const std::vector<int64_t>& shape,
const std::vector<int64_t> values,
const bool tensor_content, TensorProto* tensor_proto) {
tensor_proto->Clear();
TensorProto temp_tensor_proto;
temp_tensor_proto.set_dtype(dtype);
for (const auto& x : shape) {
temp_tensor_proto.mutable_tensor_shape()->add_dim()->set_size(x);
}
for (const auto& x : values) {
if (dtype == DT_INT64) {
temp_tensor_proto.add_int64_val(x);
} else if (dtype == DT_INT32 || dtype == DT_INT16 || dtype == DT_INT8 ||
dtype == DT_UINT8) {
temp_tensor_proto.add_int_val(x);
} else if (dtype == DT_UINT32) {
temp_tensor_proto.add_uint32_val(x);
} else if (dtype == DT_UINT64) {
temp_tensor_proto.add_uint64_val(x);
} else {
CHECK(false) << "Unsupported dtype: " << dtype;
}
}
Tensor tensor(dtype);
CHECK(tensor.FromProto(temp_tensor_proto));
if (tensor_content) {
tensor.AsProtoTensorContent(tensor_proto);
} else {
tensor.AsProtoField(tensor_proto);
}
}
OpContext DescribePoolingOp(const string& op_name, const std::vector<int>& x,
const std::vector<int>& ksize,
const std::vector<int>& strides,
const string& data_format, const string& padding) {
OpContext op_context;
auto& op_info = op_context.op_info;
SetCpuDevice(&op_info);
op_info.set_op(op_name);
const std::vector<int> y =
GetPoolingOutputSize(x, ksize, strides, data_format, padding);
if (op_name == "AvgPool" || op_name == "MaxPool") {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_outputs());
} else if (op_name == "AvgPoolGrad") {
DescribeArbitraryRankInput({4}, DT_INT32, &op_info);
auto* tensor_proto = op_info.mutable_inputs(0)->mutable_value();
GetTensorProto(DT_INT32, {4}, {x[0], x[1], x[2], x[3]},
false, tensor_proto);
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_outputs());
} else if (op_name == "MaxPoolGrad") {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(y[0], y[1], y[2], y[3], op_info.add_inputs());
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_outputs());
}
auto* attr = op_info.mutable_attr();
SetAttrValue(data_format, &(*attr)["data_format"]);
SetAttrValue(padding, &(*attr)["padding"]);
SetAttrValue(strides, &(*attr)["strides"]);
SetAttrValue(ksize, &(*attr)["ksize"]);
return op_context;
}
OpContext DescribeFusedBatchNorm(const bool is_training, const bool is_grad,
const std::vector<int>& x,
const string& data_format) {
OpContext op_context = DescribePoolingOp("MaxPool", x, {1, 1, 1, 1},
{1, 1, 1, 1}, data_format, "SAME");
auto& op_info = op_context.op_info;
if (is_grad) {
op_info.set_op("FusedBatchNormGrad");
} else {
op_info.set_op("FusedBatchNorm");
}
if (is_grad) {
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
}
int num_1d_inputs = is_grad ? 3 : 4;
for (int i = 0; i < num_1d_inputs; i++) {
auto* tensor = op_info.add_inputs();
auto* shape = tensor->mutable_shape();
shape->add_dim()->set_size(x[3]);
tensor->set_dtype(DT_FLOAT);
}
for (int i = 0; i < 4; i++) {
auto* tensor = op_info.add_outputs();
auto* shape = tensor->mutable_shape();
shape->add_dim()->set_size(x[3]);
tensor->set_dtype(DT_FLOAT);
}
auto* attr = op_context.op_info.mutable_attr();
attr->erase("ksize");
attr->erase("strides");
attr->erase("padding");
SetAttrValue(is_training, &(*attr)["is_training"]);
return op_context;
}
}
class OpLevelCostEstimatorTest : public ::testing::Test {
protected:
using BatchMatMulDimensions = OpLevelCostEstimator::BatchMatMulDimensions;
Costs PredictCosts(const OpContext& op_context) const {
return estimator_.PredictCosts(op_context);
}
int64_t CountMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return estimator_.CountMatMulOperations(op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return estimator_.CountBatchMatMulOperations(op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
BatchMatMulDimensions* batch_mat_mul,
bool* found_unknown_shapes) const {
return estimator_.CountBatchMatMulOperations(op_info, batch_mat_mul,
found_unknown_shapes);
}
void SetComputeMemoryOverlap(bool value) {
estimator_.compute_memory_overlap_ = value;
}
void ValidateOpDimensionsFromInputs(const int n, const int h, const int w,
const int c, const int kx, const int ky,
const int sx, const int sy,
const string& data_format,
const string& padding) {
OpContext op_context;
int ho;
int wo;
if (data_format == "NHWC") {
op_context = DescribePoolingOp("MaxPool", {n, h, w, c}, {1, kx, ky, 1},
{1, sx, sy, 1}, "NHWC", padding);
ho = op_context.op_info.outputs(0).shape().dim(1).size();
wo = op_context.op_info.outputs(0).shape().dim(2).size();
} else {
op_context = DescribePoolingOp("MaxPool", {n, c, h, w}, {1, 1, kx, ky},
{1, 1, sx, sy}, "NCHW", padding);
ho = op_context.op_info.outputs(0).shape().dim(2).size();
wo = op_context.op_info.outputs(0).shape().dim(3).size();
}
bool found_unknown_shapes;
TF_ASSERT_OK_AND_ASSIGN(
auto dims, OpLevelCostEstimator::OpDimensionsFromInputs(
op_context.op_info.inputs(0).shape(), op_context.op_info,
&found_unknown_shapes));
Padding padding_enum;
if (padding == "VALID") {
padding_enum = Padding::VALID;
} else {
padding_enum = Padding::SAME;
}
EXPECT_EQ(n, dims.batch);
EXPECT_EQ(h, dims.ix);
EXPECT_EQ(w, dims.iy);
EXPECT_EQ(c, dims.iz);
EXPECT_EQ(kx, dims.kx);
EXPECT_EQ(ky, dims.ky);
EXPECT_EQ(sx, dims.sx);
EXPECT_EQ(sy, dims.sy);
EXPECT_EQ(ho, dims.ox);
EXPECT_EQ(wo, dims.oy);
EXPECT_EQ(c, dims.oz);
EXPECT_EQ(padding_enum, dims.padding);
}
absl::StatusOr<OpLevelCostEstimator::ConvolutionDimensions>
CallOpDimensionsFromInputs(const int n, const int h, const int w, const int c,
const int kx, const int ky, const int sx,
const int sy, const string& data_format,
const string& padding) {
OpContext op_context;
const std::vector<int> x = {n, h, w, c};
const std::vector<int> ksize = {1, kx, ky, 1};
std::vector<int> strides;
if (data_format == "NHWC") {
strides = {1, sy, sx, 1};
} else {
strides = {1, 1, sy, sx};
}
auto& op_info = op_context.op_info;
SetCpuDevice(&op_info);
op_info.set_op("MaxPool");
DescribeTensor4D(x[0], x[1], x[2], x[3], op_info.add_inputs());
auto* attr = op_info.mutable_attr();
SetAttrValue(data_format, &(*attr)["data_format"]);
SetAttrValue(padding, &(*attr)["padding"]);
SetAttrValue(strides, &(*attr)["strides"]);
SetAttrValue(ksize, &(*attr)["ksize"]);
bool found_unknown_shapes;
return OpLevelCostEstimator::OpDimensionsFromInputs(
op_context.op_info.inputs(0).shape(), op_context.op_info,
&found_unknown_shapes);
}
OpLevelCostEstimator estimator_;
};
class OpLevelBatchMatMulCostEstimatorTest
: public OpLevelCostEstimatorTest,
public ::testing::WithParamInterface<const char*> {
protected:
OpContext DescribeBatchMatMul(const std::vector<int>& dims_a,
const std::vector<int>& dims_b) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(GetParam());
DescribeArbitraryRankInput(dims_a, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput(dims_b, DT_FLOAT, &op_context.op_info);
return op_context;
}
int64_t CountBatchMatMulOperations(const OpInfo& op_info,
bool* found_unknown_shapes) const {
return OpLevelCostEstimatorTest::CountBatchMatMulOperations(
op_info, found_unknown_shapes);
}
int64_t CountBatchMatMulDimProduct(const OpInfo& op_info,
bool* found_unknown_shapes) const {
BatchMatMulDimensions batch_mat_mul;
batch_mat_mul.matmul_dims.n = 0;
batch_mat_mul.matmul_dims.m = 0;
batch_mat_mul.matmul_dims.k = 0;
OpLevelCostEstimatorTest::CountBatchMatMulOperations(
op_info, &batch_mat_mul, found_unknown_shapes);
int dimension_product = 1;
for (auto dim : batch_mat_mul.batch_dims) dimension_product *= dim;
dimension_product *= batch_mat_mul.matmul_dims.n;
dimension_product *= batch_mat_mul.matmul_dims.m;
dimension_product *= batch_mat_mul.matmul_dims.k;
return dimension_product;
}
};
TEST_F(OpLevelCostEstimatorTest, TestPersistentOpCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
std::unordered_set<string> persistent_ops = {
"Const", "Variable", "VariableV2", "AutoReloadVariable",
"VarHandleOp", "ReadVariableOp",
};
for (const auto& op : persistent_ops) {
op_context.op_info.set_op(op);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(1), cost.compute_time);
EXPECT_EQ(Costs::Duration(1), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, TestGatherCosts) {
std::vector<std::string> gather_ops = {"Gather", "GatherNd", "GatherV2"};
for (const auto& op : gather_ops) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({16, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(130), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(146), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, TestGatherCostsWithoutOutput) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Gather");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(0), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestSliceCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Slice");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({10, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(81), cost.memory_time);
EXPECT_EQ(Costs::Duration(10), cost.compute_time);
EXPECT_EQ(Costs::Duration(91), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestStridedSliceCosts) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("StridedSlice");
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({2}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankOutput({10, 10}, DT_FLOAT, &op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(81), cost.memory_time);
EXPECT_EQ(Costs::Duration(10), cost.compute_time);
EXPECT_EQ(Costs::Duration(91), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, TestScatterOps) {
std::vector<string> scatter_ops = {"ScatterAdd", "ScatterDiv", "ScatterMax",
"ScatterMin", "ScatterMul", "ScatterSub",
"ScatterUpdate"};
for (const auto& op : scatter_ops) {
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT64, &op_context.op_info);
DescribeArbitraryRankInput({16, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput({10000000, 10}, DT_FLOAT,
&op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(205), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(221), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op(op);
DescribeArbitraryRankInput({10000000, 10}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankInput({16}, DT_INT32, &op_context.op_info);
DescribeArbitraryRankInput({}, DT_FLOAT, &op_context.op_info);
DescribeArbitraryRankOutput({10000000, 10}, DT_FLOAT,
&op_context.op_info);
auto cost = estimator_.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(135), cost.memory_time);
EXPECT_EQ(Costs::Duration(16), cost.compute_time);
EXPECT_EQ(Costs::Duration(151), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
}
}
TEST_F(OpLevelCostEstimatorTest, BiasAddExecutionTime) {
auto cost = PredictCosts(DescribeBiasAdd(1000, 10));
EXPECT_EQ(Costs::Duration(8400), cost.memory_time);
EXPECT_EQ(Costs::Duration(1000), cost.compute_time);
EXPECT_EQ(Costs::Duration(9400), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, Conv2DExecutionTime) {
auto cost = PredictCosts(DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(233780), cost.memory_time);
EXPECT_EQ(Costs::Duration(354877440), cost.compute_time);
EXPECT_EQ(Costs::Duration(355111220), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, InvalidConv2DConfig) {
const std::vector<std::string> conv_ops = {
"Conv2D",
"Conv2DBackpropFilter",
"Conv2DBackpropInput",
"DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter",
"DepthwiseConv2dNativeBackpropInput",
};
const std::vector<int> valid_conv_config = {16, 19, 19, 48, 48, 5, 5, 256};
for (const auto& op : conv_ops) {
for (int i = 0; i < valid_conv_config.size(); ++i) {
std::vector<int> conv_config(valid_conv_config);
conv_config[i] = 0;
auto op_context = DescribeConvolution(
conv_config[0], conv_config[1], conv_config[2], conv_config[3],
conv_config[4], conv_config[5], conv_config[6], conv_config[7]);
op_context.op_info.set_op(op);
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(0), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(0), cost.execution_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
}
}
TEST_F(OpLevelCostEstimatorTest, DepthwiseConv2dNativeExecutionTime) {
auto cost =
PredictCosts(DescribeDepthwiseConv2dNative(16, 19, 19, 48, 48, 5, 5, 3));
EXPECT_EQ(Costs::Duration(112340), cost.memory_time);
EXPECT_EQ(Costs::Duration(4158720), cost.compute_time);
EXPECT_EQ(Costs::Duration(4271060), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, DummyExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Dummy", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(2000), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, ExecutionTimeSumOrMax) {
SetComputeMemoryOverlap(true);
auto cost = PredictCosts(DescribeBinaryOp("Dummy", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(2000), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
SetComputeMemoryOverlap(false);
}
TEST_F(OpLevelCostEstimatorTest,
FusedConv2DBiasActivationNCHW_HWIO_NoSideInput) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, false,
"NCHW", "HWIO"));
EXPECT_EQ(Costs::Duration(825345), cost.memory_time);
EXPECT_EQ(Costs::Duration(355321037), cost.compute_time);
EXPECT_EQ(Costs::Duration(356146382), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_HWIO) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW", "HWIO"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_OIHW) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW", "OIHW"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNHWC_HWIO) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NHWC", "HWIO"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNHWC_OIHW) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NHWC", "OIHW"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_VECT_C_OIHW) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW_VECT_C", "OIHW"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, FusedConv2DBiasActivationNCHW_OIHW_VECT_I) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW", "OIHW_VECT_I"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest,
FusedConv2DBiasActivationNCHW_VECT_C_OIHW_VECT_I) {
auto cost = PredictCosts(DescribeFusedConv2DBiasActivation(
16, 19, 19, 48, 48, 5, 5, 19, 19, 256, true,
"NCHW_VECT_C", "OIHW_VECT_I"));
EXPECT_EQ(Costs::Duration(1416808), cost.memory_time);
EXPECT_EQ(Costs::Duration(355616768), cost.compute_time);
EXPECT_EQ(Costs::Duration(357033576), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, MulExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Mul", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(2200), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, MulBroadcastExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Mul", 1000, 2));
EXPECT_EQ(Costs::Duration(3600), cost.memory_time);
EXPECT_EQ(Costs::Duration(400), cost.compute_time);
EXPECT_EQ(Costs::Duration(4000), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, ModExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("Mod", 1000, 1));
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(Costs::Duration(1600), cost.compute_time);
EXPECT_EQ(Costs::Duration(3600), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, SquaredDifferenceExecutionTime) {
auto cost = PredictCosts(DescribeBinaryOp("SquaredDifference", 1000, 2));
EXPECT_EQ(cost.memory_time, Costs::Duration(3600));
EXPECT_EQ(cost.compute_time, Costs::Duration(800));
EXPECT_EQ(cost.execution_time, Costs::Duration(4400));
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, UnaryOpExecutionTime) {
std::vector<std::pair<std::string, int>> unary_ops = {
{"All", 1}, {"ArgMax", 1}, {"Cast", 1}, {"Max", 1},
{"Min", 1}, {"Prod", 1}, {"Relu", 1}, {"Relu6", 1},
{"Softmax", 40}, {"Sum", 1}, {"TopKV2", 1}};
const int kTensorSize = 1000;
for (auto unary_op : unary_ops) {
OpContext op_context = DescribeUnaryOp(unary_op.first, kTensorSize);
const int kExpectedMemoryTime = 800;
int expected_compute_time = std::ceil(
unary_op.second * kTensorSize /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time))
<< unary_op.first;
EXPECT_EQ(cost.execution_time,
Costs::Duration(expected_compute_time + kExpectedMemoryTime));
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, BinaryOpExecutionTime) {
std::vector<std::pair<std::string, int>> binary_ops = {
{"Select", 1},
{"SelectV2", 1},
{"SquaredDifference", 2},
{"Where", 1},
};
const int kTensorSize1 = 1000;
const int kTensorSize2 = 2;
for (auto binary_op : binary_ops) {
OpContext op_context =
DescribeBinaryOp(binary_op.first, kTensorSize1, kTensorSize2);
const int kExpectedMemoryTime = 3600;
int expected_compute_time = std::ceil(
binary_op.second * kTensorSize1 * kTensorSize2 * 2 /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(kExpectedMemoryTime), cost.memory_time)
<< binary_op.first;
EXPECT_EQ(Costs::Duration(expected_compute_time), cost.compute_time)
<< binary_op.first;
EXPECT_EQ(Costs::Duration(expected_compute_time + kExpectedMemoryTime),
cost.execution_time)
<< binary_op.first;
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, BroadcastAddExecutionTime) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("Add");
DescribeTensor1D(100, op_context.op_info.add_inputs());
DescribeTensor4D(1, 10, 1, 1, op_context.op_info.add_inputs());
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(44), cost.memory_time);
EXPECT_EQ(Costs::Duration(100), cost.compute_time);
EXPECT_EQ(Costs::Duration(144), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, UnknownOrPartialShape) {
{
auto cost = PredictCosts(DescribeMatMul(2, 4, 7, 7));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeMatMul(-1, 4, 7, 7));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeMatMul(2, 4, -1, 7));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeConvolution(16, -1, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
}
TEST_P(OpLevelBatchMatMulCostEstimatorTest, TestBatchMatMul) {
{
auto cost = PredictCosts(DescribeBatchMatMul({}, {}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({2, 4}, {}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({2, 4}, {4, 2}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({1, 2, 4}, {1, 4, 2}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(DescribeBatchMatMul({2, 4}, {1, 3, 4, 2}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
}
bool matmul_inaccurate = false;
bool batch_matmul_inaccurate = false;
EXPECT_EQ(
CountMatMulOperations(DescribeMatMul(2, 2, 4, 4).op_info,
&matmul_inaccurate),
CountBatchMatMulOperations(DescribeBatchMatMul({2, 4}, {4, 2}).op_info,
&batch_matmul_inaccurate));
EXPECT_EQ(matmul_inaccurate, batch_matmul_inaccurate);
EXPECT_EQ(10 * CountMatMulOperations(DescribeMatMul(2, 2, 4, 4).op_info,
&matmul_inaccurate),
CountBatchMatMulOperations(
DescribeBatchMatMul({10, 2, 4}, {-1, 10, 4, 2}).op_info,
&batch_matmul_inaccurate));
EXPECT_NE(matmul_inaccurate, batch_matmul_inaccurate);
EXPECT_EQ(20 * CountMatMulOperations(DescribeMatMul(2, 2, 4, 4).op_info,
&matmul_inaccurate),
CountBatchMatMulOperations(
DescribeBatchMatMul({2, 10, 2, 4}, {-1, 10, 4, 2}).op_info,
&batch_matmul_inaccurate));
EXPECT_NE(matmul_inaccurate, batch_matmul_inaccurate);
int prod = CountBatchMatMulDimProduct(
DescribeBatchMatMul({2, 4}, {1, 3, 4, 2}).op_info,
&batch_matmul_inaccurate);
EXPECT_EQ(prod, 16);
EXPECT_FALSE(batch_matmul_inaccurate);
OpContext bad_batch = DescribeBatchMatMul({2, 4}, {4, 2});
bad_batch.op_info.set_op("notBatchMatMul");
prod =
CountBatchMatMulDimProduct(bad_batch.op_info, &batch_matmul_inaccurate);
EXPECT_EQ(prod, 0);
EXPECT_TRUE(batch_matmul_inaccurate);
OpContext transpose_batch = DescribeBatchMatMul({2, 4, 3, 1}, {4, 2});
auto attr = transpose_batch.op_info.mutable_attr();
(*attr)["adj_x"].set_b(true);
(*attr)["adj_y"].set_b(true);
prod = CountBatchMatMulDimProduct(transpose_batch.op_info,
&batch_matmul_inaccurate);
EXPECT_EQ(prod, 12);
}
INSTANTIATE_TEST_SUITE_P(TestBatchMatMul, OpLevelBatchMatMulCostEstimatorTest,
::testing::Values("BatchMatMul", "BatchMatMulV2"));
TEST_F(OpLevelCostEstimatorTest, SparseTensorDenseMatMul) {
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(-1, {1, 1}, {1, 1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(1, {-1, 1}, {1, 1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(1, {1, -1}, {1, -1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost =
PredictCosts(DescribeSparseTensorDenseMatMul(1, {1, 1}, {-1, 1}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
}
{
auto cost = PredictCosts(
DescribeSparseTensorDenseMatMul(10, {1000, 100}, {50, 100}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(2422), cost.memory_time);
}
{
auto cost = PredictCosts(
DescribeSparseTensorDenseMatMul(10, {100000, 100}, {50, 100}));
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(2422), cost.memory_time);
}
}
void ExpectTensorShape(const std::vector<int64_t>& expected,
const TensorShapeProto& tensor_shape_proto) {
TensorShape tensor_shape_expected(expected);
TensorShape tensor_shape(tensor_shape_proto);
EXPECT_EQ(tensor_shape_expected, tensor_shape);
}
TEST_F(OpLevelCostEstimatorTest, GetTensorShapeProtoFromTensorProto) {
TensorProto tensor_proto;
TensorShapeProto tensor_shape_proto;
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(255);
EXPECT_FALSE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
tensor_proto.Clear();
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(1);
tensor_proto.mutable_tensor_shape()->add_dim()->set_size(2);
EXPECT_FALSE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
GetTensorProto(DT_FLOAT, {}, {}, false, &tensor_proto);
EXPECT_FALSE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
{
std::vector<int64_t> shape_expected = {10, 20, 30, 40};
GetTensorProto(DT_INT32, {4}, shape_expected,
false, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
{
std::vector<int64_t> shape_expected = {40, 20, 90, 40};
GetTensorProto(DT_INT64, {4}, shape_expected,
false, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
{
std::vector<int64_t> shape_expected = {10, 20, 30, 40};
GetTensorProto(DT_INT32, {4}, shape_expected,
true, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
{
std::vector<int64_t> shape_expected = {40, 20, 90, 40};
GetTensorProto(DT_INT64, {4}, shape_expected,
true, &tensor_proto);
EXPECT_TRUE(
GetTensorShapeProtoFromTensorProto(tensor_proto, &tensor_shape_proto));
ExpectTensorShape(shape_expected, tensor_shape_proto);
}
}
TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputs) {
std::vector<string> paddings = {"VALID", "SAME"};
std::vector<string> formats = {"NHWC", "NCHW"};
for (const auto& p : paddings) {
for (const auto& f : formats) {
ValidateOpDimensionsFromInputs(10, 20, 20, 100, 3, 3, 2, 2, f, p);
ValidateOpDimensionsFromInputs(10, 20, 20, 100, 1, 1, 3, 3, f, p);
ValidateOpDimensionsFromInputs(10, 200, 200, 100, 5, 5, 3, 3, f, p);
ValidateOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 2, 2, f, p);
}
}
}
TEST_F(OpLevelCostEstimatorTest, OpDimensionsFromInputsError) {
std::vector<string> paddings = {"VALID", "SAME"};
std::vector<string> formats = {"NHWC", "NCHW"};
for (const auto& p : paddings) {
for (const auto& f : formats) {
ASSERT_THAT(
CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 0, 2, f, p),
testing::StatusIs(
error::INVALID_ARGUMENT,
"Stride must be > 0 for Height and Width, but got (2, 0)"));
ASSERT_THAT(
CallOpDimensionsFromInputs(10, 14, 14, 3840, 3, 3, 2, 0, f, p),
testing::StatusIs(
error::INVALID_ARGUMENT,
"Stride must be > 0 for Height and Width, but got (0, 2)"));
}
}
}
TEST_F(OpLevelCostEstimatorTest, PredictMaxPool) {
auto predict_max_pool = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context = DescribePoolingOp(
"MaxPool", {n, in, in, c}, {1, k, k, 1}, {1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_max_pool(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1075200), costs.execution_time);
EXPECT_EQ(Costs::Duration(307200), costs.compute_time);
EXPECT_EQ(Costs::Duration(768000), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_max_pool(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(499200), costs.execution_time);
EXPECT_EQ(Costs::Duration(38400), costs.compute_time);
EXPECT_EQ(Costs::Duration(460800), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_max_pool(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(561792), costs.execution_time);
EXPECT_EQ(Costs::Duration(56448), costs.compute_time);
EXPECT_EQ(Costs::Duration(505344), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictMaxPoolGrad) {
auto predict_max_pool_grad = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context =
DescribePoolingOp("MaxPoolGrad", {n, in, in, c}, {1, k, k, 1},
{1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_max_pool_grad(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1996800), costs.execution_time);
EXPECT_EQ(Costs::Duration(614400), costs.compute_time);
EXPECT_EQ(Costs::Duration(1382400), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_max_pool_grad(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(1536000), costs.execution_time);
EXPECT_EQ(Costs::Duration(153600), costs.compute_time);
EXPECT_EQ(Costs::Duration(1382400), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_max_pool_grad(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(1514112), costs.execution_time);
EXPECT_EQ(Costs::Duration(210048), costs.compute_time);
EXPECT_EQ(Costs::Duration(1304064), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictAvgPool) {
auto predict_avg_pool = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context = DescribePoolingOp(
"AvgPool", {n, in, in, c}, {1, k, k, 1}, {1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_avg_pool(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1113600), costs.execution_time);
EXPECT_EQ(Costs::Duration(345600), costs.compute_time);
EXPECT_EQ(Costs::Duration(768000), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_avg_pool(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(499200), costs.execution_time);
EXPECT_EQ(Costs::Duration(38400), costs.compute_time);
EXPECT_EQ(Costs::Duration(460800), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_avg_pool(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(580608), costs.execution_time);
EXPECT_EQ(Costs::Duration(75264), costs.compute_time);
EXPECT_EQ(Costs::Duration(505344), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictAvgPoolGrad) {
auto predict_avg_pool_grad = [this](const int n, const int in, const int c,
const int k, const int s,
const string& padding) -> Costs {
OpContext op_context =
DescribePoolingOp("AvgPoolGrad", {n, in, in, c}, {1, k, k, 1},
{1, s, s, 1}, "NHWC", padding);
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_avg_pool_grad(10, 20, 384, 3, 2, "SAME");
EXPECT_EQ(Costs::Duration(1305602), costs.execution_time);
EXPECT_EQ(Costs::Duration(537600), costs.compute_time);
EXPECT_EQ(Costs::Duration(768002), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_avg_pool_grad(10, 20, 384, 1, 2, "SAME");
EXPECT_EQ(Costs::Duration(960002), costs.execution_time);
EXPECT_EQ(Costs::Duration(192000), costs.compute_time);
EXPECT_EQ(Costs::Duration(768002), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_avg_pool_grad(10, 20, 384, 2, 3, "VALID");
EXPECT_EQ(Costs::Duration(862082), costs.execution_time);
EXPECT_EQ(Costs::Duration(172416), costs.compute_time);
EXPECT_EQ(Costs::Duration(689666), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictFusedBatchNorm) {
auto predict_fused_bn = [this](const int n, const int in, const int c,
const bool is_training) -> Costs {
OpContext op_context = DescribeFusedBatchNorm(
is_training, false, {n, in, in, c}, "NHWC");
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_fused_bn(10, 20, 96, true);
EXPECT_EQ(Costs::Duration(614737), costs.execution_time);
EXPECT_EQ(Costs::Duration(153706), costs.compute_time);
EXPECT_EQ(Costs::Duration(461031), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_fused_bn(10, 20, 32, true);
EXPECT_EQ(Costs::Duration(204913), costs.execution_time);
EXPECT_EQ(Costs::Duration(51236), costs.compute_time);
EXPECT_EQ(Costs::Duration(153677), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_fused_bn(10, 20, 96, false);
EXPECT_EQ(Costs::Duration(384154), costs.execution_time);
EXPECT_EQ(Costs::Duration(76800), costs.compute_time);
EXPECT_EQ(Costs::Duration(307354), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
{
auto costs = predict_fused_bn(10, 20, 32, false);
EXPECT_EQ(Costs::Duration(128052), costs.execution_time);
EXPECT_EQ(Costs::Duration(25600), costs.compute_time);
EXPECT_EQ(Costs::Duration(102452), costs.memory_time);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictFusedBatchNormGrad) {
auto predict_fused_bn_grad = [this](const int n, const int in,
const int c) -> Costs {
OpContext op_context = DescribeFusedBatchNorm(
false, true, {n, in, in, c}, "NHWC");
return estimator_.PredictCosts(op_context);
};
{
auto costs = predict_fused_bn_grad(10, 20, 96);
EXPECT_EQ(Costs::Duration(1037050), costs.execution_time);
EXPECT_EQ(Costs::Duration(422496), costs.compute_time);
EXPECT_EQ(Costs::Duration(614554), costs.memory_time);
EXPECT_EQ(costs.num_ops_total, 1);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(costs.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(costs.temporary_memory, 0);
EXPECT_EQ(costs.persistent_memory, 0);
}
{
auto costs = predict_fused_bn_grad(128, 7, 384);
EXPECT_EQ(Costs::Duration(6503809), costs.execution_time);
EXPECT_EQ(Costs::Duration(2649677), costs.compute_time);
EXPECT_EQ(Costs::Duration(3854132), costs.memory_time);
EXPECT_EQ(1, costs.num_ops_total);
EXPECT_FALSE(costs.inaccurate);
EXPECT_EQ(0, costs.num_ops_with_unknown_shapes);
}
}
TEST_F(OpLevelCostEstimatorTest, MaybeGetMinimumShapeTest) {
{
TensorShapeProto x;
x.set_unknown_rank(true);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 4, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({1, 1, 1, 1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 1, &unknown_shapes);
EXPECT_FALSE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 2, &unknown_shapes);
EXPECT_FALSE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({1, 1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
x.add_dim()->set_size(10);
x.add_dim()->set_size(20);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 2, &unknown_shapes);
EXPECT_FALSE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({10, 20}));
unknown_shapes = false;
std::vector<int64_t> z = MaybeGetMinimumShape(x, 4, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(z, ElementsAreArray({10, 20, 1, 1}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
x.add_dim()->set_size(10);
x.add_dim()->set_size(20);
x.add_dim()->set_size(-1);
x.add_dim()->set_size(20);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 4, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({10, 20, 1, 20}));
}
{
TensorShapeProto x;
x.set_unknown_rank(false);
x.add_dim()->set_size(10);
x.add_dim()->set_size(20);
x.add_dim()->set_size(30);
x.add_dim()->set_size(20);
bool unknown_shapes = false;
std::vector<int64_t> y = MaybeGetMinimumShape(x, 2, &unknown_shapes);
EXPECT_TRUE(unknown_shapes);
EXPECT_THAT(y, ElementsAreArray({10, 20}));
}
}
TEST_F(OpLevelCostEstimatorTest, IntermediateRdWrBandwidth) {
TestOpLevelCostEstimator estimator;
estimator.SetDeviceInfo(DeviceInfo(1,
1));
estimator.SetComputeMemoryOverlap(true);
auto cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(3548774400), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time);
estimator.SetComputeMemoryOverlap(false);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(3551112192), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time + cost.memory_time +
cost.intermediate_memory_time);
estimator.SetDeviceInfo(DeviceInfo(99999,
1));
estimator.SetComputeMemoryOverlap(true);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2337792), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.memory_time);
estimator.SetComputeMemoryOverlap(false);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2373281), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time + cost.memory_time +
cost.intermediate_memory_time);
estimator.SetDeviceInfo(DeviceInfo(99999,
9999,
1,
1));
estimator.SetComputeMemoryOverlap(true);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2337792), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.intermediate_memory_time);
estimator.SetComputeMemoryOverlap(false);
cost = estimator.PredictCosts(
DescribeConvolution(16, 19, 19, 48, 48, 5, 5, 256));
EXPECT_EQ(Costs::Duration(2373515), cost.execution_time);
EXPECT_EQ(cost.execution_time, cost.compute_time + cost.memory_time +
cost.intermediate_memory_time);
}
TEST_F(OpLevelCostEstimatorTest, Einsum) {
{
auto cost = PredictCosts(DescribeEinsum({100, 50}, {100, 50}, "ik,jk->ij"));
EXPECT_EQ(Costs::Duration(104000), cost.execution_time);
EXPECT_EQ(Costs::Duration(100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(4000), cost.memory_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
EXPECT_EQ(PredictCosts(DescribeEinsum({100, 50}, {100, 50}, "ik,jk->ij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {100, 50}, "ik,jk->ij"))
.execution_time);
}
{
auto cost = PredictCosts(
DescribeEinsum({25, 100, 50}, {100, 50, 25}, "Bik,jkB->Bij"));
EXPECT_EQ(Costs::Duration(25 * 104000), cost.execution_time);
EXPECT_EQ(Costs::Duration(25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(25 * 4000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(PredictCosts(
DescribeEinsum({25, 100, 50}, {100, 50, 25}, "Bik,jkB->Bij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({25, 100, 50}, {100, 50, 25},
"Bik,jkB->Bij"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum(
{25, 16, 100, 50}, {16, 100, 50, 25}, "BNik,NjkB->BNij"));
EXPECT_EQ(Costs::Duration(16 * 25 * 104000), cost.execution_time);
EXPECT_EQ(
Costs::Duration(16 * 25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(16 * 25 * 4000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({25, 16, 100, 50}, {16, 100, 50, 25},
"BNik,NjkB->BNij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({25, 16, 100, 50}, {16, 100, 50, 25},
"BNik,NjkB->BNij"))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({25, 100, 50}, {100, 50}, "Aik,jk->Aij"));
EXPECT_EQ(Costs::Duration(2552000), cost.execution_time);
EXPECT_EQ(Costs::Duration(25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({25, 100, 50}, {100, 50}, "Aik,jk->Aij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({25, 100, 50}, {100, 50}, "Aik,jk->Aij"))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({100, 50}, {25, 100, 50}, "ik,Bjk->ijB"));
EXPECT_EQ(Costs::Duration(2552000), cost.execution_time);
EXPECT_EQ(Costs::Duration(25 * 100 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50}, {25, 100, 50}, "ik,Bjk->ijB"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {25, 100, 50}, "ik,Bjk->ijB"))
.execution_time);
}
{
auto cost = PredictCosts(
DescribeEinsum({100, 50, 25}, {100, 50, 25}, "ikl,jkl->ij"));
EXPECT_EQ(Costs::Duration(2600000), cost.execution_time);
EXPECT_EQ(Costs::Duration(100 * 50 * 25 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(100000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(PredictCosts(
DescribeEinsum({100, 50, 25}, {100, 50, 25}, "ikl,jkl->ij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50, 25}, {100, 50, 25},
"ikl,jkl->ij"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum({100, 50}, {}, "ij->ji"));
EXPECT_EQ(Costs::Duration(2000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(2000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50}, {}, "ij->ji")).execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {}, "ij->ji"))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({100, 50, 25}, {50, 100}, "ik,kl->il"));
EXPECT_EQ(Costs::Duration(52000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50, 25}, {50, 100}, "ik,kl->il"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50, 25}, {50, 100}, "ik,kl->il"))
.execution_time);
cost = PredictCosts(DescribeEinsum({100, 50}, {50, 100, 25}, "ik,kl->il"));
EXPECT_EQ(Costs::Duration(52000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(52000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50}, {50, 100, 25}, "ik,kl->il"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50}, {50, 100, 25}, "ik,kl->il"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum(
{100, 50, 25, 16}, {50, 100, 32, 12}, "ik...,kl...->il..."));
EXPECT_EQ(Costs::Duration(1568000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(1568000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 50, 25, 16}, {50, 100, 32, 12},
"ik...,kl...->il..."))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 50, 25, 16}, {50, 100, 32, 12},
"ik...,kl...->il..."))
.execution_time);
}
{
auto cost =
PredictCosts(DescribeEinsum({100, 100, 50}, {50, 100}, "iik,kl->il"));
EXPECT_EQ(Costs::Duration(202000), cost.execution_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(202000), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(0, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(
PredictCosts(DescribeEinsum({100, 100, 50}, {50, 100}, "iik,kl->il"))
.execution_time,
PredictCosts(DescribeXlaEinsum({100, 100, 50}, {50, 100}, "iik,kl->il"))
.execution_time);
}
{
auto cost = PredictCosts(DescribeEinsum({-1, 50}, {100, 50}, "ik,jk->ij"));
EXPECT_EQ(Costs::Duration(3020), cost.execution_time);
EXPECT_EQ(Costs::Duration(1 * 50 * 100 * 2 / (1000 * 10 * 1e-3)),
cost.compute_time);
EXPECT_EQ(Costs::Duration(2020), cost.memory_time);
EXPECT_EQ(1, cost.num_ops_total);
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(1, cost.num_ops_with_unknown_shapes);
EXPECT_EQ(PredictCosts(DescribeEinsum({-1, 50}, {100, 50}, "ik,jk->ij"))
.execution_time,
PredictCosts(DescribeXlaEinsum({-1, 50}, {100, 50}, "ik,jk->ij"))
.execution_time);
}
}
TEST_F(OpLevelCostEstimatorTest, PredictResourceVariableOps) {
TestOpLevelCostEstimator estimator;
estimator.SetDeviceInfo(DeviceInfo(1, 1));
{
OpContext op_context;
op_context.op_info.set_op("AssignVariableOp");
DescribeDummyTensor(op_context.op_info.add_inputs());
DescribeTensor1D(100, op_context.op_info.add_inputs());
auto cost = estimator.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(400), cost.memory_time);
EXPECT_EQ(Costs::Duration(0), cost.compute_time);
EXPECT_EQ(Costs::Duration(400), cost.execution_time);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
{
OpContext op_context;
op_context.op_info.set_op("AssignSubVariableOp");
DescribeDummyTensor(op_context.op_info.add_inputs());
DescribeTensor1D(100, op_context.op_info.add_inputs());
auto cost = estimator.PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(400), cost.memory_time);
EXPECT_EQ(Costs::Duration(100), cost.compute_time);
EXPECT_EQ(Costs::Duration(400), cost.execution_time);
EXPECT_FALSE(cost.inaccurate);
}
}
TEST_F(OpLevelCostEstimatorTest, AddNExecutionTime) {
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("AddN");
DescribeTensor4D(1, 10, 10, 10, op_context.op_info.add_inputs());
DescribeTensor4D(1, 10, 10, 10, op_context.op_info.add_inputs());
DescribeTensor4D(1, 10, 10, 10, op_context.op_info.add_inputs());
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(1200), cost.memory_time);
EXPECT_EQ(Costs::Duration(200), cost.compute_time);
EXPECT_EQ(Costs::Duration(1400), cost.execution_time);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
TEST_F(OpLevelCostEstimatorTest, IdentityOpExecutionTime) {
std::vector<std::string> identity_ops = {
"_Recv", "_Send", "BitCast", "Identity",
"Enter", "Exit", "IdentityN", "Merge",
"NextIteration", "Placeholder", "PreventGradient", "RefIdentity",
"Reshape", "StopGradient", "Switch"};
const int kTensorSize = 1000;
for (auto identity_op : identity_ops) {
OpContext op_context = DescribeUnaryOp(identity_op, kTensorSize);
const int kExpectedMemoryTime = 0;
const int kExpectedComputeTime = 1;
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(kExpectedMemoryTime), cost.memory_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime), cost.compute_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime + kExpectedMemoryTime),
cost.execution_time);
EXPECT_EQ(cost.max_memory, kTensorSize * 4);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, PureMemoryOpExecutionTime) {
std::vector<std::string> reshape_ops = {
"ConcatV2", "DataFormatVecPermute",
"DepthToSpace", "ExpandDims",
"Fill", "OneHot",
"Pack", "Range",
"SpaceToDepth", "Split",
"Squeeze", "Transpose",
"Tile", "Unpack"};
const int kTensorSize = 1000;
for (auto reshape_op : reshape_ops) {
OpContext op_context = DescribeUnaryOp(reshape_op, kTensorSize);
const int kExpectedMemoryTime = 800;
const int kExpectedComputeTime = 0;
auto cost = PredictCosts(op_context);
EXPECT_EQ(Costs::Duration(kExpectedMemoryTime), cost.memory_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime), cost.compute_time);
EXPECT_EQ(Costs::Duration(kExpectedComputeTime + kExpectedMemoryTime),
cost.execution_time);
EXPECT_EQ(cost.max_memory, kTensorSize * 4);
EXPECT_EQ(cost.num_ops_total, 1);
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, ResizeBilinearExecutionTime) {
const int kImageDim = 255;
const int kChannelSize = 10;
const int kComputeLerpCost = 9;
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("ResizeBilinear");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
auto cost = PredictCosts(op_context);
ExpectZeroCost(cost);
op_context.op_info.clear_inputs();
DescribeTensor4D(0, 0, 0, 0, op_context.op_info.add_outputs());
cost = PredictCosts(op_context);
ExpectZeroCost(cost);
}
{
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("ResizeBilinear");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
const int kExpectedMemoryTime = kImageDim * kImageDim * 4;
DescribeTensor4D(0, 0, 0, 0, op_context.op_info.add_outputs());
auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(0));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_TRUE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
EXPECT_EQ(cost.temporary_memory, 0);
EXPECT_EQ(cost.persistent_memory, 0);
AttrValue half_pixel_centers;
half_pixel_centers.set_b(false);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(0));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
const int kOutputImageDim = 100;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("ResizeBilinear");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
DescribeTensor4D(1, kOutputImageDim, kOutputImageDim, kChannelSize,
op_context.op_info.add_outputs());
const int kExpectedMemoryTime =
(kImageDim * kImageDim + kOutputImageDim * kOutputImageDim) * 4;
{
AttrValue half_pixel_centers;
half_pixel_centers.set_b(false);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
const int kInterpWeightCost = 10;
const int num_ops =
kInterpWeightCost * (kOutputImageDim * 2) +
kComputeLerpCost * (kOutputImageDim * kOutputImageDim * kChannelSize);
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
{
AttrValue half_pixel_centers;
half_pixel_centers.set_b(true);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
const int kInterpWeightCost = 12;
const int num_ops =
kInterpWeightCost * (kOutputImageDim * 2) +
kComputeLerpCost * (kOutputImageDim * kOutputImageDim * kChannelSize);
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
{
op_context.op_info.clear_outputs();
constexpr int64_t kLargeOutputImageDim = 40000;
DescribeTensor4D(1, kLargeOutputImageDim, kLargeOutputImageDim,
kChannelSize, op_context.op_info.add_outputs());
const int64_t kInterpWeightCost = 12;
AttrValue half_pixel_centers;
half_pixel_centers.set_b(true);
(*op_context.op_info.mutable_attr())["half_pixel_centers"] =
half_pixel_centers;
const int64_t num_ops =
kInterpWeightCost * (kLargeOutputImageDim * 2) +
kComputeLerpCost *
(kLargeOutputImageDim * kLargeOutputImageDim * kChannelSize);
const int64_t expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const int64_t expected_memory_time =
(kImageDim * kImageDim + kLargeOutputImageDim * kLargeOutputImageDim) *
4;
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(expected_memory_time));
EXPECT_EQ(cost.execution_time,
Costs::Duration(expected_memory_time + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
}
TEST_F(OpLevelCostEstimatorTest, CropAndResizeExecutionTime) {
const int kImageDim = 255;
const int kChannelSize = 10;
const int kOutputImageDim = 100;
const int kNumBoxes = 10;
const int kOutputElements =
kNumBoxes * kOutputImageDim * kOutputImageDim * kChannelSize;
OpContext op_context;
SetCpuDevice(&op_context.op_info);
op_context.op_info.set_op("CropAndResize");
DescribeTensor4D(1, kImageDim, kImageDim, kChannelSize,
op_context.op_info.add_inputs());
DescribeArbitraryRankInput({kNumBoxes, 4}, DT_INT64, &op_context.op_info);
DescribeTensor4D(kNumBoxes, kOutputImageDim, kOutputImageDim, kChannelSize,
op_context.op_info.add_outputs());
const int kExpectedMemoryTime =
(kImageDim * kImageDim * 4 +
kNumBoxes * 4 * 8 / 10 +
kNumBoxes * kOutputImageDim * kOutputImageDim * 4);
{
AttrValue method;
method.set_s("bilinear");
(*op_context.op_info.mutable_attr())["method"] = method;
int num_ops = 28 * kNumBoxes + 4 * kNumBoxes * kOutputImageDim +
4 * kNumBoxes * kOutputImageDim * kOutputImageDim +
3 * kNumBoxes * kOutputImageDim +
3 * kNumBoxes * kOutputImageDim * kOutputImageDim +
13 * kOutputElements;
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
{
AttrValue method;
method.set_s("nearest");
(*op_context.op_info.mutable_attr())["method"] = method;
int num_ops = 28 * kNumBoxes + 4 * kNumBoxes * kOutputImageDim +
4 * kNumBoxes * kOutputImageDim * kOutputImageDim +
2 * kNumBoxes * kOutputImageDim * kOutputImageDim +
kOutputElements;
const int expected_compute_time = std::ceil(
num_ops /
estimator_.GetDeviceInfo(op_context.op_info.device()).gigaops);
const auto cost = PredictCosts(op_context);
EXPECT_EQ(cost.compute_time, Costs::Duration(expected_compute_time));
EXPECT_EQ(cost.memory_time, Costs::Duration(kExpectedMemoryTime));
EXPECT_EQ(cost.execution_time,
Costs::Duration(kExpectedMemoryTime + expected_compute_time));
EXPECT_FALSE(cost.inaccurate);
EXPECT_EQ(cost.num_ops_with_unknown_shapes, 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/op_level_cost_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/op_level_cost_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
09110132-03f3-4e02-8c26-76dc3137b184 | cpp | tensorflow/tensorflow | shuffle_and_repeat_fusion | tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc | tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc | #include "tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kShuffleDataset[] = "ShuffleDataset";
constexpr char kShuffleDatasetV2[] = "ShuffleDatasetV2";
constexpr char kShuffleDatasetV3[] = "ShuffleDatasetV3";
constexpr char kRepeatDataset[] = "RepeatDataset";
constexpr char kShuffleAndRepeatDataset[] = "ShuffleAndRepeatDataset";
constexpr char kShuffleAndRepeatDatasetV2[] = "ShuffleAndRepeatDatasetV2";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
Status FuseShuffleV1AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDataset);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output,
fused_node);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(shuffle_node.input(2));
fused_node->add_input(shuffle_node.input(3));
fused_node->add_input(repeat_node.input(1));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
graph_utils::CopyAttribute(kReshuffleEachIteration, shuffle_node, fused_node);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
Status FuseShuffleV2AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDatasetV2);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDatasetV2, output,
fused_node);
NodeDef zero_node = *graph_utils::AddScalarConstNode<int64_t>(0, graph);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(zero_node.name());
fused_node->add_input(zero_node.name());
fused_node->add_input(repeat_node.input(1));
fused_node->add_input(shuffle_node.input(2));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
(*fused_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
Status FuseShuffleV3AndRepeat(const NodeDef& shuffle_node,
const NodeDef& repeat_node,
MutableGraphView* graph, GraphDef* output,
NodeDef* fused_node) {
fused_node->set_op(kShuffleAndRepeatDatasetV2);
graph_utils::SetUniqueGraphNodeName(kShuffleAndRepeatDataset, output,
fused_node);
fused_node->add_input(shuffle_node.input(0));
fused_node->add_input(shuffle_node.input(1));
fused_node->add_input(shuffle_node.input(2));
fused_node->add_input(shuffle_node.input(3));
fused_node->add_input(repeat_node.input(1));
fused_node->add_input(shuffle_node.input(4));
graph_utils::CopyShapesAndTypesAttrs(shuffle_node, fused_node);
graph_utils::CopyAttribute(kReshuffleEachIteration, shuffle_node, fused_node);
graph_utils::MaybeSetFusedMetadata(shuffle_node, repeat_node, fused_node);
return absl::OkStatus();
}
}
Status ShuffleAndRepeatFusion::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
absl::flat_hash_set<string> nodes_to_delete;
for (const NodeDef& repeat_node : item.graph.node()) {
if (repeat_node.op() != kRepeatDataset) {
continue;
}
const NodeDef& shuffle_node =
*graph_utils::GetInputNode(repeat_node, graph);
NodeDef fused_node;
if (shuffle_node.op() == kShuffleDataset) {
TF_RETURN_IF_ERROR(FuseShuffleV1AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else if (shuffle_node.op() == kShuffleDatasetV2) {
TF_RETURN_IF_ERROR(FuseShuffleV2AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else if (shuffle_node.op() == kShuffleDatasetV3) {
TF_RETURN_IF_ERROR(FuseShuffleV3AndRepeat(shuffle_node, repeat_node,
&graph, output, &fused_node));
} else {
continue;
}
NodeDef& shuffle_and_repeat_node = *graph.AddNode(std::move(fused_node));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(repeat_node.name(),
shuffle_and_repeat_node.name()));
TF_RETURN_IF_ERROR(graph.UpdateFanouts(shuffle_node.name(),
shuffle_and_repeat_node.name()));
const auto nodes_to_preserve = item.NodesToPreserve();
if (nodes_to_preserve.find(shuffle_node.name()) ==
nodes_to_preserve.end() &&
nodes_to_preserve.find(repeat_node.name()) == nodes_to_preserve.end()) {
nodes_to_delete.insert(shuffle_node.name());
nodes_to_delete.insert(repeat_node.name());
}
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(ShuffleAndRepeatFusion,
"shuffle_and_repeat_fusion");
}
} | #include "tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kReshuffleEachIteration[] = "reshuffle_each_iteration";
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV1AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed2_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> shuffle_inputs(4);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_node->name();
shuffle_inputs[3] = seed2_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDataset", shuffle_inputs, common_attrs, &graph);
(*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDataset", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDataset", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 5);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2));
EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
for (const auto &attr :
{kOutputShapes, kOutputTypes, kReshuffleEachIteration}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
}
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV2AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_generator_node =
graph_utils::AddScalarConstNode<StringPiece>("dummy_resource", &graph);
std::vector<string> shuffle_inputs(3);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_generator_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDatasetV2", shuffle_inputs, common_attrs, &graph);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(2));
for (const auto &attr : {kOutputShapes, kOutputTypes}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
EXPECT_TRUE(shuffle_and_repeat_node.attr().at(kReshuffleEachIteration).b());
}
TEST(ShuffleAndRepeatFusionTest, FuseShuffleV3AndRepeat) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *buffer_size_node =
graph_utils::AddScalarConstNode<int64_t>(128, &graph);
NodeDef *seed_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed2_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
NodeDef *seed_generator_node =
graph_utils::AddScalarConstNode<StringPiece>("dummy_resource", &graph);
std::vector<string> shuffle_inputs(5);
shuffle_inputs[0] = range_node->name();
shuffle_inputs[1] = buffer_size_node->name();
shuffle_inputs[2] = seed_node->name();
shuffle_inputs[3] = seed2_node->name();
shuffle_inputs[4] = seed_generator_node->name();
NodeDef *shuffle_node = graph_utils::AddNode(
"", "ShuffleDatasetV3", shuffle_inputs, common_attrs, &graph);
(*shuffle_node->mutable_attr())[kReshuffleEachIteration].set_b(true);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = shuffle_node->name();
repeat_inputs[1] = count_node->name();
NodeDef *repeat_node = graph_utils::AddNode(
"", "RepeatDataset", repeat_inputs, common_attrs, &graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(shuffle_node->name(), output));
EXPECT_FALSE(
graph_utils::ContainsGraphNodeWithName(repeat_node->name(), output));
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("ShuffleAndRepeatDatasetV2", output));
NodeDef shuffle_and_repeat_node = output.node(
graph_utils::FindGraphNodeWithOp("ShuffleAndRepeatDatasetV2", output));
EXPECT_EQ(shuffle_and_repeat_node.input_size(), 6);
EXPECT_EQ(shuffle_and_repeat_node.input(0), shuffle_node->input(0));
EXPECT_EQ(shuffle_and_repeat_node.input(1), shuffle_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(2), shuffle_node->input(2));
EXPECT_EQ(shuffle_and_repeat_node.input(3), shuffle_node->input(3));
EXPECT_EQ(shuffle_and_repeat_node.input(4), repeat_node->input(1));
EXPECT_EQ(shuffle_and_repeat_node.input(5), shuffle_node->input(4));
for (const auto &attr :
{kOutputShapes, kOutputTypes, kReshuffleEachIteration}) {
EXPECT_TRUE(AreAttrValuesEqual(shuffle_and_repeat_node.attr().at(attr),
shuffle_node->attr().at(attr)));
}
}
TEST(ShuffleAndRepeatFusionTest, NoChange) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
std::vector<std::pair<string, AttrValue>> common_attrs(2);
AttrValue shapes_attr;
SetAttrValue(kOutputShapes, &shapes_attr);
common_attrs[0] = std::make_pair(kOutputShapes, shapes_attr);
AttrValue types_attr;
SetAttrValue(kOutputTypes, &types_attr);
common_attrs[1] = std::make_pair(kOutputTypes, types_attr);
NodeDef *start_node = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_node = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_node = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_node->name();
range_inputs[1] = stop_node->name();
range_inputs[2] = step_node->name();
NodeDef *range_node = graph_utils::AddNode("", "RangeDataset", range_inputs,
common_attrs, &graph);
NodeDef *count_node = graph_utils::AddScalarConstNode<int64_t>(-1, &graph);
std::vector<string> repeat_inputs(2);
repeat_inputs[0] = range_node->name();
repeat_inputs[1] = count_node->name();
graph_utils::AddNode("", "RepeatDataset", repeat_inputs, common_attrs,
&graph);
ShuffleAndRepeatFusion optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_TRUE(graph_utils::Compare(*graph.graph(), output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/shuffle_and_repeat_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
404138cb-ac73-4e58-893c-69a8a5dcda55 | cpp | tensorflow/tensorflow | dcn_utils | tensorflow/core/profiler/convert/dcn_utils.cc | tensorflow/core/profiler/convert/dcn_utils_test.cc | #include "tensorflow/core/profiler/convert/dcn_utils.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "xla/tsl/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::MicroToNano;
using tsl::profiler::StatType;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XStatVisitor;
DcnMessage CreateDcnMessageFromStats(const XEventVisitor& event_visitor) {
DcnMessage dcn_message;
event_visitor.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type()) return;
switch (static_cast<StatType>(*stat.Type())) {
case StatType::kDcnLabel: {
dcn_message.collective_name = stat.ToString();
break;
}
case StatType::kDcnSourceSliceId: {
dcn_message.slice_src = stat.IntValue();
break;
}
case StatType::kDcnSourcePerSliceDeviceId: {
dcn_message.tpu_src = stat.IntValue();
break;
}
case StatType::kDcnDestinationSliceId: {
dcn_message.slice_dst = stat.IntValue();
break;
}
case StatType::kDcnDestinationPerSliceDeviceId: {
dcn_message.tpu_dst = stat.IntValue();
break;
}
case StatType::kDcnChunk: {
dcn_message.chunk_id = stat.IntValue();
break;
}
case StatType::kDcnLoopIndex: {
dcn_message.loop_index_id = stat.IntValue();
break;
}
case StatType::kPayloadSizeBytes: {
dcn_message.size_bytes = stat.IntValue();
break;
}
case StatType::kDuration: {
dcn_message.duration_us = stat.IntOrUintValue();
dcn_message.start_timestamp_ns =
event_visitor.TimestampNs() - MicroToNano(dcn_message.duration_us);
dcn_message.end_timestamp_ns = event_visitor.TimestampNs();
break;
}
default:
break;
}
});
return dcn_message;
}
void SetMessageValidity(DcnMessage& dcn_message) {
if (dcn_message.collective_name.empty() || dcn_message.slice_src == -1 ||
dcn_message.tpu_src == -1 || dcn_message.slice_dst == -1 ||
dcn_message.tpu_dst == -1 || dcn_message.size_bytes == -1) {
dcn_message.validity_info = DCN_MESSAGE_INVALID_BAD_KEY;
} else if (dcn_message.duration_us == 0) {
dcn_message.validity_info = DCN_MESSAGE_INVALID_CLOCK_SKEW;
} else if (dcn_message.slice_src == dcn_message.slice_dst) {
dcn_message.validity_info = DCN_MESSAGE_VALID_LOOPBACK;
} else {
dcn_message.validity_info = DCN_MESSAGE_VALID;
}
}
}
DcnMessage GetDcnMessageFromXEvent(const XEventVisitor& event_visitor) {
DcnMessage dcn_message = CreateDcnMessageFromStats(event_visitor);
SetMessageValidity(dcn_message);
return dcn_message;
}
bool IsDcnEvent(const tsl::profiler::XEventVisitor& event) {
return absl::StartsWith(event.Name(), "MegaScale:");
}
}
} | #include "tensorflow/core/profiler/convert/dcn_utils.h"
#include <cstdint>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/xplane_builder.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
namespace tensorflow {
namespace profiler {
namespace {
using tsl::profiler::kMegaScaleDcnReceive;
using tsl::profiler::XEventBuilder;
using tsl::profiler::XEventVisitor;
using tsl::profiler::XLineBuilder;
using tsl::profiler::XPlaneBuilder;
using tsl::profiler::XPlaneVisitor;
void PopulateXPlane(XPlane &xplane, absl::string_view event_name, int offset,
absl::string_view label, int64_t source_slice_id,
int64_t source_per_slice_device_id,
int64_t destination_slice_id,
int64_t destination_per_slice_device_id, int64_t chunk,
int64_t loop_index, int64_t payload_size,
int64_t duration) {
XPlaneBuilder xplane_builder(&xplane);
XEventMetadata *event_metadata = xplane_builder.GetOrCreateEventMetadata(1);
event_metadata->set_name(std::string(event_name));
XLineBuilder xline_builder = xplane_builder.GetOrCreateLine(0);
XEventBuilder event_builder = xline_builder.AddEvent(*event_metadata);
event_builder.SetOffsetNs(offset);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_label"), label);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_slice_id"),
source_slice_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_source_per_slice_device_id"),
source_per_slice_device_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_destination_slice_id"),
destination_slice_id);
event_builder.AddStatValue(*xplane_builder.GetOrCreateStatMetadata(
"dcn_destination_per_slice_device_id"),
destination_per_slice_device_id);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_chunk"), chunk);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("dcn_loop_index"), loop_index);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("duration_us"), duration);
event_builder.AddStatValue(
*xplane_builder.GetOrCreateStatMetadata("payload_size_bytes"),
payload_size);
}
TEST(DcnUtilsTest, IsDcnEvent) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 0, "test", 0, 0, 0, 0, 0, 0, 0,
0);
XLine line = xplane.lines()[0];
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &line, &line.events()[0]);
EXPECT_TRUE(IsDcnEvent(visitor));
}
TEST(DcnUtilsTest, IsNotDcnEvent) {
XPlane xplane;
PopulateXPlane(xplane, "test", 0, "test", 0, 0, 0, 0, 0, 0, 0, 0);
XLine line = xplane.lines()[0];
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &line, &line.events()[0]);
EXPECT_FALSE(IsDcnEvent(visitor));
}
TEST(DcnUtilsTest, GetDcnMessageFromXEvent) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 100000, "all-reduce.273_312", 2,
3, 1, 3, 0, 24, 32768, 50);
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &xplane.lines()[0],
&xplane.lines()[0].events()[0]);
EXPECT_THAT(GetDcnMessageFromXEvent(visitor),
testing::FieldsAre(
"all-reduce.273_312",
2, 3, 1, 3,
50000, 100000, 50,
32768, 0, 24,
DCN_MESSAGE_VALID));
}
TEST(DcnUtilsTest, GetDcnMessageFromXEventLoopBack) {
XPlane xplane;
PopulateXPlane(xplane, kMegaScaleDcnReceive, 5000000, "all-gather.1234", 2, 3,
2, 1, 4, 40, 1000, 1000);
XPlaneVisitor xplane_visitor = tsl::profiler::CreateTfXPlaneVisitor(&xplane);
XEventVisitor visitor(&xplane_visitor, &xplane.lines()[0],
&xplane.lines()[0].events()[0]);
EXPECT_THAT(GetDcnMessageFromXEvent(visitor),
testing::FieldsAre(
"all-gather.1234",
2, 3, 2, 1,
4000000, 5000000, 1000,
1000, 4, 40,
DCN_MESSAGE_VALID_LOOPBACK));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/dcn_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5aa739c9-6249-4747-abc4-3b74415156d6 | cpp | google/tensorstore | bit_vec | tensorstore/util/bit_vec.h | tensorstore/util/bit_vec_test.cc | #ifndef TENSORSTORE_UTIL_BIT_VEC_H_
#define TENSORSTORE_UTIL_BIT_VEC_H_
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstring>
#include <type_traits>
#include "absl/base/attributes.h"
#include "tensorstore/util/bit_span.h"
#include "tensorstore/util/bit_vec_impl.h"
#include "tensorstore/util/small_bit_set.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
template <ptrdiff_t Extent = dynamic_extent>
class BitVec {
using Storage = internal_bitvec::BitVecStorage<Extent>;
public:
using Block = internal_bitvec::Block;
using value_type = bool;
using difference_type = ptrdiff_t;
using size_type = ptrdiff_t;
using reference = BitRef<Block>;
using const_reference = BitRef<const Block>;
using iterator = BitIterator<Block>;
using const_iterator = BitIterator<const Block>;
static constexpr ptrdiff_t static_extent = Extent;
static constexpr ptrdiff_t static_block_extent =
Extent == dynamic_extent ? dynamic_extent
: BitVectorSizeInBlocks<Block>(Extent);
using ExtentType = typename Storage::ExtentType;
using BlockExtentType = typename Storage::BlockExtentType;
BitVec() : BitVec(ExtentType{}) {}
template <ptrdiff_t OtherExtent,
typename = std::enable_if_t<(OtherExtent == Extent ||
Extent == dynamic_extent)> >
BitVec(const bool (&arr)[OtherExtent])
: storage_(std::integral_constant<ptrdiff_t, OtherExtent>{}) {
std::copy(arr, arr + OtherExtent, begin());
}
template <typename OtherBlock, ptrdiff_t OtherExtent,
typename = std::enable_if_t<(OtherExtent == Extent ||
Extent == dynamic_extent)> >
explicit BitVec(BitSpan<OtherBlock, OtherExtent> other)
: storage_(other.size()) {
this->bit_span().DeepAssign(other);
}
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
Extent == dynamic_extent)>* = nullptr>
BitVec(const BitVec<OtherExtent>& other) : storage_(other.size()) {
this->bit_span().DeepAssign(other.bit_span());
}
explicit BitVec(ExtentType extent, bool value = false) : storage_(extent) {
fill(value);
}
void resize(ExtentType new_size, bool value = false) {
storage_.resize(new_size, value);
}
void fill(bool value) {
std::memset(
storage_.data(),
value ? ~static_cast<unsigned char>(0) : static_cast<unsigned char>(0),
storage_.num_blocks() * sizeof(Block));
}
tensorstore::span<const Block, static_block_extent> blocks() const {
return {storage_.data(), storage_.num_blocks()};
}
tensorstore::span<Block, static_block_extent> blocks() {
return {storage_.data(), storage_.num_blocks()};
}
ExtentType size() const { return storage_.size(); }
bool empty() const { return size() == 0; }
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent)>* = nullptr>
operator BitSpan<const Block, OtherExtent>() const {
return {storage_.data(), 0, size()};
}
template <ptrdiff_t OtherExtent,
std::enable_if_t<(OtherExtent == Extent ||
OtherExtent == dynamic_extent)>* = nullptr>
operator BitSpan<Block, OtherExtent>() {
return {storage_.data(), 0, size()};
}
BitSpan<const Block, Extent> bit_span() const { return *this; }
BitSpan<Block, Extent> bit_span() { return *this; }
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> begin() const {
return {storage_.data(), 0};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> cbegin() const {
return {storage_.data(), 0};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> end() const {
return {storage_.data(), storage_.size()};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<const Block> cend() const {
return {storage_.data(), storage_.size()};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<Block> begin() {
return {storage_.data(), 0};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitIterator<Block> end() {
return {storage_.data(), storage_.size()};
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitRef<const Block> operator[](
ptrdiff_t i) const {
return assert(i >= 0 && i <= size()), *(begin() + i);
}
ABSL_ATTRIBUTE_ALWAYS_INLINE BitRef<Block> operator[](ptrdiff_t i) {
return assert(i >= 0 && i <= size()), *(begin() + i);
}
friend bool operator==(const BitVec& a, const BitVec& b) {
const ptrdiff_t size = a.size();
if (size != b.size()) return false;
const ptrdiff_t full_blocks = size / (sizeof(Block) * 8);
const Block* a_data = a.storage_.data();
const Block* b_data = b.storage_.data();
if (!std::equal(a_data, a_data + full_blocks, b_data)) {
return false;
}
const Block final_mask =
(static_cast<Block>(1) << (size % (sizeof(Block) * 8))) - 1;
return (a_data[full_blocks] & final_mask) ==
(b_data[full_blocks] & final_mask);
}
friend bool operator!=(const BitVec& a, const BitVec& b) { return !(a == b); }
private:
Storage storage_;
};
template <ptrdiff_t Extent>
BitVec(const bool (&arr)[Extent]) -> BitVec<Extent>;
template <typename Block, ptrdiff_t Extent>
BitVec(BitSpan<Block, Extent>) -> BitVec<Extent>;
}
#endif | #include "tensorstore/util/bit_vec.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <type_traits>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/bit_span.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::BitSpan;
using ::tensorstore::BitVec;
static_assert(!std::is_convertible_v<BitSpan<uint64_t, 3>, BitVec<>>);
static_assert(std::is_constructible_v<BitVec<3>, BitSpan<uint32_t, 3>>);
static_assert(std::is_constructible_v<BitVec<>, BitSpan<uint32_t, 3>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitVec<>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitSpan<uint32_t>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitSpan<uint32_t, 4>>);
static_assert(!std::is_constructible_v<BitVec<3>, BitVec<4>>);
TEST(BitVecTest, StaticDefaultConstruct) {
BitVec<9> v;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 0, 0, 0, 0, 0, 0));
}
TEST(BitVecTest, StaticConstructTrue) {
BitVec<9> v({}, true);
EXPECT_THAT(v, ::testing::ElementsAre(1, 1, 1, 1, 1, 1, 1, 1, 1));
}
TEST(BitVecTest, DynamicDefaultConstruct) {
BitVec<> v;
EXPECT_EQ(0, v.size());
EXPECT_TRUE(v.empty());
v.resize(65);
EXPECT_FALSE(v.empty());
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(65, false)));
v.fill(true);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(65, true)));
}
TEST(BitVecTest, DynamicConstructFalse) {
BitVec<> v(65);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(65, false)));
}
TEST(BitVecTest, Subscript) {
BitVec<> v(9);
const auto& v_ref = v;
EXPECT_FALSE(v_ref[3]);
v[3] = true;
EXPECT_TRUE(v_ref[3]);
v[5] = true;
v[6] = true;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 1, 0, 1, 1, 0, 0));
v[8] = true;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 1, 0, 1, 1, 0, 1));
v[3] = false;
EXPECT_THAT(v, ::testing::ElementsAre(0, 0, 0, 0, 0, 1, 1, 0, 1));
}
TEST(BitVecTest, CopyConstructInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(a);
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, CopyConstructLarge) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(a);
EXPECT_THAT(a, ::testing::ElementsAreArray(expected));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, MoveConstructInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(std::move(a));
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, MoveConstructLarge) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(std::move(a));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, CopyAssignInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(9);
b = a;
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, CopyAssignLargeSameNumBlocks) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(129);
b = a;
EXPECT_THAT(a, ::testing::ElementsAreArray(expected));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, CopyAssignLargeDifferentNumBlocks) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(65);
b = a;
EXPECT_THAT(a, ::testing::ElementsAreArray(expected));
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, MoveAssignInline) {
BitVec<> a(9);
a[0] = true;
a[3] = true;
a[5] = true;
a[6] = true;
BitVec<> b(9);
b = std::move(a);
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 0, 1, 1, 0, 0));
}
TEST(BitVecTest, MoveAssignLarge) {
BitVec<> a(129);
std::vector<bool> expected(129);
for (int i : {0, 3, 5, 6, 31, 33, 72, 128}) {
expected[i] = true;
a[i] = true;
}
BitVec<> b(129);
b = std::move(a);
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
TEST(BitVecTest, BracedListConstruct) {
BitVec<> a({1, 0, 0, 1, 1});
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 1));
}
TEST(BitVecTest, DeduceBitVec) {
auto a = BitVec({true, false, false, true, true});
EXPECT_THAT(a, ::testing::ElementsAre(1, 0, 0, 1, 1));
static_assert(std::is_same_v<decltype(a), BitVec<5>>);
auto b = BitVec(a.bit_span());
static_assert(std::is_same_v<decltype(b), BitVec<5>>);
EXPECT_THAT(b, ::testing::ElementsAre(1, 0, 0, 1, 1));
}
TEST(BitVecTest, BitSpanConstruct) {
BitVec<> a(37);
a[32] = 1;
a[17] = 1;
a[2] = 1;
EXPECT_THAT(a, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
BitVec<> b(a.bit_span());
EXPECT_THAT(b, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
}
TEST(BitVecTest, BitVecConvertConstruct) {
BitVec<37> a;
a[32] = 1;
a[17] = 1;
a[2] = 1;
EXPECT_THAT(a, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
BitVec<> b = a;
EXPECT_THAT(b, ::testing::ElementsAre(0, 0, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, 0, 0, 0));
}
TEST(BitVecTest, ComparisonShort) {
BitVec<> a(18);
BitVec<> b(17);
EXPECT_NE(a, b);
b.resize(18);
EXPECT_EQ(a, b);
b[2] = true;
EXPECT_NE(a, b);
a[2] = true;
EXPECT_EQ(a, b);
a[17] = true;
EXPECT_NE(a, b);
b[17] = true;
EXPECT_EQ(a, b);
}
TEST(BitVecTest, ComparisonLong) {
BitVec<> a(150);
BitVec<> b(151);
EXPECT_NE(a, b);
b.resize(150);
EXPECT_EQ(a, b);
b[2] = true;
EXPECT_NE(a, b);
a[2] = true;
EXPECT_EQ(a, b);
a[149] = true;
EXPECT_NE(a, b);
b[149] = true;
EXPECT_EQ(a, b);
}
TEST(BitVecTest, ConstIterators) {
BitVec<> a(7);
a[1] = 1;
a[4] = 1;
{
const auto& a_ref = a;
std::vector<bool> b(a_ref.begin(), a_ref.end());
EXPECT_THAT(b, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
{
std::vector<bool> b(a.cbegin(), a.cend());
EXPECT_THAT(b, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
}
TEST(BitVecTest, NonConstIterators) {
BitVec<> a(7);
a[1] = 1;
a[4] = 1;
std::vector<bool> b(a.begin(), a.end());
EXPECT_THAT(b, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
TEST(BitVecTest, NonConstIteratorsMutate) {
BitVec<> a(7);
std::vector<bool> b{0, 1, 0, 0, 1, 0, 0};
std::copy(b.begin(), b.end(), a.begin());
EXPECT_THAT(a, ::testing::ElementsAre(0, 1, 0, 0, 1, 0, 0));
}
TEST(BitVecTest, BlocksInline) {
BitVec<> a(64);
for (int i : {0, 5, 17, 62}) {
a[i] = true;
}
EXPECT_THAT(a.blocks(), ::testing::ElementsAre(
(uint64_t(1) << 0) |
(uint64_t(1) << 5) |
(uint64_t(1) << 17) |
(uint64_t(1) << 62)));
}
TEST(BitVecTest, BlocksLarge) {
BitVec<> a(128);
for (int i : {0, 5, 17, 62, 90, 127}) {
a[i] = true;
}
EXPECT_THAT(a.blocks(),
::testing::ElementsAre(
(uint64_t(1) << 0) |
(uint64_t(1) << 5) |
(uint64_t(1) << 17) |
(uint64_t(1) << 62),
(uint64_t(1) << (90 - 64)) |
(uint64_t(1) << (127 - 64))));
}
TEST(BitVecTest, ResizeStatic) {
BitVec<65> b;
std::vector<bool> expected(65);
for (int i : {0, 3, 7, 29, 35, 64}) {
expected[i] = true;
b[i] = true;
}
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
b.resize(std::integral_constant<std::ptrdiff_t, 65>{});
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
}
void TestResizeDynamic(std::ptrdiff_t orig_size, std::ptrdiff_t new_size,
std::vector<int> bits) {
SCOPED_TRACE(tensorstore::StrCat("orig_size=", orig_size,
", new_size=", new_size,
", bits=", ::testing::PrintToString(bits)));
BitVec<> b(orig_size);
std::vector<bool> expected(orig_size);
for (int i : bits) {
expected[i] = true;
b[i] = true;
}
std::vector<bool> expected_resize_false = expected;
expected_resize_false.resize(new_size, false);
std::vector<bool> expected_resize_true = expected;
expected_resize_true.resize(new_size, true);
EXPECT_THAT(b, ::testing::ElementsAreArray(expected));
BitVec<> b_resize_false = b;
b_resize_false.resize(new_size, false);
BitVec<> b_resize_true = b;
b_resize_true.resize(new_size, true);
EXPECT_THAT(b_resize_false,
::testing::ElementsAreArray(expected_resize_false));
EXPECT_THAT(b_resize_true, ::testing::ElementsAreArray(expected_resize_true));
}
TEST(BitVecTest, ResizeDynamicLargeNoOp) {
TestResizeDynamic(65, 65, {0, 3, 7, 29, 35, 64});
}
TEST(BitVecTest, ResizeDynamicInlineNoOp) {
TestResizeDynamic(62, 62, {0, 3, 7, 29, 35, 61});
}
TEST(BitVecTest, ResizeDynamicInlineShrink) {
TestResizeDynamic(62, 30, {0, 3, 7, 29, 35, 61});
}
TEST(BitVecTest, ResizeDynamicInlineExpand) {
TestResizeDynamic(36, 41, {0, 3, 7, 29, 35});
}
TEST(BitVecTest, ResizeDynamicShrinkSameNumBlocks) {
TestResizeDynamic(150, 132, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicExpandSameNumBlocks) {
TestResizeDynamic(150, 160, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicShrinkDifferentNumBlocks) {
TestResizeDynamic(150, 128, {0, 3, 7, 29, 35, 64, 127, 131, 149});
TestResizeDynamic(150, 126, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicExpandDifferentNumBlocks) {
TestResizeDynamic(150, 250, {0, 3, 7, 29, 35, 64, 127, 131, 149});
}
TEST(BitVecTest, ResizeDynamicExpandFromEmpty) {
TestResizeDynamic(0, 15, {});
TestResizeDynamic(0, 65, {});
TestResizeDynamic(0, 150, {});
TestResizeDynamic(0, 0, {});
}
TEST(BitVecTest, ResizeDynamicShrinkToEmpty) {
TestResizeDynamic(13, 0, {1, 2, 12});
TestResizeDynamic(129, 0, {1, 2, 12, 65, 73, 128});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_vec.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/bit_vec_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
49de6d11-ef16-4650-b8b9-f2614c7904de | cpp | abseil/abseil-cpp | zipf_distribution | absl/random/zipf_distribution.h | absl/random/zipf_distribution_test.cc | #ifndef ABSL_RANDOM_ZIPF_DISTRIBUTION_H_
#define ABSL_RANDOM_ZIPF_DISTRIBUTION_H_
#include <cassert>
#include <cmath>
#include <istream>
#include <limits>
#include <ostream>
#include <type_traits>
#include "absl/random/internal/iostream_state_saver.h"
#include "absl/random/internal/traits.h"
#include "absl/random/uniform_real_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename IntType = int>
class zipf_distribution {
public:
using result_type = IntType;
class param_type {
public:
using distribution_type = zipf_distribution;
explicit param_type(result_type k = (std::numeric_limits<IntType>::max)(),
double q = 2.0, double v = 1.0);
result_type k() const { return k_; }
double q() const { return q_; }
double v() const { return v_; }
friend bool operator==(const param_type& a, const param_type& b) {
return a.k_ == b.k_ && a.q_ == b.q_ && a.v_ == b.v_;
}
friend bool operator!=(const param_type& a, const param_type& b) {
return !(a == b);
}
private:
friend class zipf_distribution;
inline double h(double x) const;
inline double hinv(double x) const;
inline double compute_s() const;
inline double pow_negative_q(double x) const;
IntType k_;
double q_;
double v_;
double one_minus_q_;
double s_;
double one_minus_q_inv_;
double hxm_;
double hx0_minus_hxm_;
static_assert(random_internal::IsIntegral<IntType>::value,
"Class-template absl::zipf_distribution<> must be "
"parameterized using an integral type.");
};
zipf_distribution()
: zipf_distribution((std::numeric_limits<IntType>::max)()) {}
explicit zipf_distribution(result_type k, double q = 2.0, double v = 1.0)
: param_(k, q, v) {}
explicit zipf_distribution(const param_type& p) : param_(p) {}
void reset() {}
template <typename URBG>
result_type operator()(URBG& g) {
return (*this)(g, param_);
}
template <typename URBG>
result_type operator()(URBG& g,
const param_type& p);
result_type k() const { return param_.k(); }
double q() const { return param_.q(); }
double v() const { return param_.v(); }
param_type param() const { return param_; }
void param(const param_type& p) { param_ = p; }
result_type(min)() const { return 0; }
result_type(max)() const { return k(); }
friend bool operator==(const zipf_distribution& a,
const zipf_distribution& b) {
return a.param_ == b.param_;
}
friend bool operator!=(const zipf_distribution& a,
const zipf_distribution& b) {
return a.param_ != b.param_;
}
private:
param_type param_;
};
template <typename IntType>
zipf_distribution<IntType>::param_type::param_type(
typename zipf_distribution<IntType>::result_type k, double q, double v)
: k_(k), q_(q), v_(v), one_minus_q_(1 - q) {
assert(q > 1);
assert(v > 0);
assert(k > 0);
one_minus_q_inv_ = 1 / one_minus_q_;
constexpr double kMax = 18446744073709549568.0;
double kd = static_cast<double>(k);
if (kd > kMax) {
kd = kMax;
}
hxm_ = h(kd + 0.5);
const bool use_precomputed = (v == 1.0 && q == 2.0);
const double h0x5 = use_precomputed ? (-1.0 / 1.5)
: h(0.5);
const double elogv_q = (v_ == 1.0) ? 1 : pow_negative_q(v_);
hx0_minus_hxm_ = (h0x5 - elogv_q) - hxm_;
s_ = use_precomputed ? 0.46153846153846123 : compute_s();
}
template <typename IntType>
double zipf_distribution<IntType>::param_type::h(double x) const {
x += v_;
return (one_minus_q_ == -1.0)
? (-1.0 / x)
: (std::exp(std::log(x) * one_minus_q_) * one_minus_q_inv_);
}
template <typename IntType>
double zipf_distribution<IntType>::param_type::hinv(double x) const {
return -v_ + ((one_minus_q_ == -1.0)
? (-1.0 / x)
: std::exp(one_minus_q_inv_ * std::log(one_minus_q_ * x)));
}
template <typename IntType>
double zipf_distribution<IntType>::param_type::compute_s() const {
return 1.0 - hinv(h(1.5) - pow_negative_q(v_ + 1.0));
}
template <typename IntType>
double zipf_distribution<IntType>::param_type::pow_negative_q(double x) const {
return q_ == 2.0 ? (1.0 / (x * x)) : std::exp(std::log(x) * -q_);
}
template <typename IntType>
template <typename URBG>
typename zipf_distribution<IntType>::result_type
zipf_distribution<IntType>::operator()(
URBG& g, const param_type& p) {
absl::uniform_real_distribution<double> uniform_double;
double k;
for (;;) {
const double v = uniform_double(g);
const double u = p.hxm_ + v * p.hx0_minus_hxm_;
const double x = p.hinv(u);
k = rint(x);
if (k > static_cast<double>(p.k())) continue;
if (k - x <= p.s_) break;
const double h = p.h(k + 0.5);
const double r = p.pow_negative_q(p.v_ + k);
if (u >= h - r) break;
}
IntType ki = static_cast<IntType>(k);
assert(ki <= p.k_);
return ki;
}
template <typename CharT, typename Traits, typename IntType>
std::basic_ostream<CharT, Traits>& operator<<(
std::basic_ostream<CharT, Traits>& os,
const zipf_distribution<IntType>& x) {
using stream_type =
typename random_internal::stream_format_type<IntType>::type;
auto saver = random_internal::make_ostream_state_saver(os);
os.precision(random_internal::stream_precision_helper<double>::kPrecision);
os << static_cast<stream_type>(x.k()) << os.fill() << x.q() << os.fill()
<< x.v();
return os;
}
template <typename CharT, typename Traits, typename IntType>
std::basic_istream<CharT, Traits>& operator>>(
std::basic_istream<CharT, Traits>& is,
zipf_distribution<IntType>& x) {
using result_type = typename zipf_distribution<IntType>::result_type;
using param_type = typename zipf_distribution<IntType>::param_type;
using stream_type =
typename random_internal::stream_format_type<IntType>::type;
stream_type k;
double q;
double v;
auto saver = random_internal::make_istream_state_saver(is);
is >> k >> q >> v;
if (!is.fail()) {
x.param(param_type(static_cast<result_type>(k), q, v));
}
return is;
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/zipf_distribution.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/log.h"
#include "absl/random/internal/chi_square.h"
#include "absl/random/internal/pcg_engine.h"
#include "absl/random/internal/sequence_urbg.h"
#include "absl/random/random.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/strip.h"
namespace {
using ::absl::random_internal::kChiSquared;
using ::testing::ElementsAre;
template <typename IntType>
class ZipfDistributionTypedTest : public ::testing::Test {};
using IntTypes = ::testing::Types<int, int8_t, int16_t, int32_t, int64_t,
uint8_t, uint16_t, uint32_t, uint64_t>;
TYPED_TEST_SUITE(ZipfDistributionTypedTest, IntTypes);
TYPED_TEST(ZipfDistributionTypedTest, SerializeTest) {
using param_type = typename absl::zipf_distribution<TypeParam>::param_type;
constexpr int kCount = 1000;
absl::InsecureBitGen gen;
for (const auto& param : {
param_type(),
param_type(32),
param_type(100, 3, 2),
param_type(std::numeric_limits<TypeParam>::max(), 4, 3),
param_type(std::numeric_limits<TypeParam>::max() / 2),
}) {
const auto k = param.k();
const auto q = param.q();
const auto v = param.v();
absl::zipf_distribution<TypeParam> before(k, q, v);
EXPECT_EQ(before.k(), param.k());
EXPECT_EQ(before.q(), param.q());
EXPECT_EQ(before.v(), param.v());
{
absl::zipf_distribution<TypeParam> via_param(param);
EXPECT_EQ(via_param, before);
}
std::stringstream ss;
ss << before;
absl::zipf_distribution<TypeParam> after(4, 5.5, 4.4);
EXPECT_NE(before.k(), after.k());
EXPECT_NE(before.q(), after.q());
EXPECT_NE(before.v(), after.v());
EXPECT_NE(before.param(), after.param());
EXPECT_NE(before, after);
ss >> after;
EXPECT_EQ(before.k(), after.k());
EXPECT_EQ(before.q(), after.q());
EXPECT_EQ(before.v(), after.v());
EXPECT_EQ(before.param(), after.param());
EXPECT_EQ(before, after);
auto sample_min = after.max();
auto sample_max = after.min();
for (int i = 0; i < kCount; i++) {
auto sample = after(gen);
EXPECT_GE(sample, after.min());
EXPECT_LE(sample, after.max());
if (sample > sample_max) sample_max = sample;
if (sample < sample_min) sample_min = sample;
}
LOG(INFO) << "Range: " << sample_min << ", " << sample_max;
}
}
class ZipfModel {
public:
ZipfModel(size_t k, double q, double v) : k_(k), q_(q), v_(v) {}
double mean() const { return mean_; }
double PMF(size_t i) { return i >= hnq_.size() ? 0.0 : hnq_[i] / sum_hnq_; }
double CDF(size_t i) {
if (i >= hnq_.size()) {
return 1.0;
}
auto it = std::begin(hnq_);
double h = 0.0;
for (const auto end = it; it != end; it++) {
h += *it;
}
return h / sum_hnq_;
}
std::pair<size_t, size_t> InverseCDF(double p) {
size_t min = 0;
size_t max = hnq_.size();
while (max > min + 1) {
size_t target = (max + min) >> 1;
double x = CDF(target);
if (x > p) {
max = target;
} else {
min = target;
}
}
return {min, max};
}
void Init() {
if (!hnq_.empty()) {
return;
}
hnq_.clear();
hnq_.reserve(std::min(k_, size_t{1000}));
sum_hnq_ = 0;
double qm1 = q_ - 1.0;
double sum_hnq_m1 = 0;
for (size_t i = 0; i < k_; i++) {
const double x = v_ + i;
const double hnqm1 =
(q_ == 2.0) ? (1.0 / x)
: (q_ == 3.0) ? (1.0 / (x * x)) : std::pow(x, -qm1);
sum_hnq_m1 += hnqm1;
const double hnq =
(q_ == 2.0) ? (1.0 / (x * x))
: (q_ == 3.0) ? (1.0 / (x * x * x)) : std::pow(x, -q_);
sum_hnq_ += hnq;
hnq_.push_back(hnq);
if (i > 1000 && hnq <= 1e-10) {
break;
}
}
assert(sum_hnq_ > 0);
mean_ = sum_hnq_m1 / sum_hnq_;
}
private:
const size_t k_;
const double q_;
const double v_;
double mean_;
std::vector<double> hnq_;
double sum_hnq_;
};
using zipf_u64 = absl::zipf_distribution<uint64_t>;
class ZipfTest : public testing::TestWithParam<zipf_u64::param_type>,
public ZipfModel {
public:
ZipfTest() : ZipfModel(GetParam().k(), GetParam().q(), GetParam().v()) {}
absl::random_internal::pcg64_2018_engine rng_{0x2B7E151628AED2A6};
};
TEST_P(ZipfTest, ChiSquaredTest) {
const auto& param = GetParam();
Init();
size_t trials = 10000;
std::vector<size_t> points;
std::vector<double> expected;
{
double last_cdf = 0.0;
double min_p = 1.0;
for (double p = 0.01; p < 1.0; p += 0.01) {
auto x = InverseCDF(p);
if (points.empty() || points.back() < x.second) {
const double p = CDF(x.second);
points.push_back(x.second);
double q = p - last_cdf;
expected.push_back(q);
last_cdf = p;
if (q < min_p) {
min_p = q;
}
}
}
if (last_cdf < 0.999) {
points.push_back(std::numeric_limits<size_t>::max());
double q = 1.0 - last_cdf;
expected.push_back(q);
if (q < min_p) {
min_p = q;
}
} else {
points.back() = std::numeric_limits<size_t>::max();
expected.back() += (1.0 - last_cdf);
}
trials = static_cast<size_t>(8.0 / min_p);
}
ASSERT_GT(points.size(), 0);
std::vector<int64_t> buckets(points.size(), 0);
double avg = 0;
{
zipf_u64 dis(param);
for (size_t i = 0; i < trials; i++) {
uint64_t x = dis(rng_);
ASSERT_LE(x, dis.max());
ASSERT_GE(x, dis.min());
avg += static_cast<double>(x);
auto it = std::upper_bound(std::begin(points), std::end(points),
static_cast<size_t>(x));
buckets[std::distance(std::begin(points), it)]++;
}
avg = avg / static_cast<double>(trials);
}
for (auto& e : expected) {
e *= trials;
}
const int dof = static_cast<int>(expected.size()) - 1;
const double threshold = absl::random_internal::ChiSquareValue(dof, 0.9999);
const double chi_square = absl::random_internal::ChiSquare(
std::begin(buckets), std::end(buckets), std::begin(expected),
std::end(expected));
const double p_actual =
absl::random_internal::ChiSquarePValue(chi_square, dof);
if (chi_square > threshold) {
LOG(INFO) << "values";
for (size_t i = 0; i < expected.size(); i++) {
LOG(INFO) << points[i] << ": " << buckets[i] << " vs. E=" << expected[i];
}
LOG(INFO) << "trials " << trials;
LOG(INFO) << "mean " << avg << " vs. expected " << mean();
LOG(INFO) << kChiSquared << "(data, " << dof << ") = " << chi_square << " ("
<< p_actual << ")";
LOG(INFO) << kChiSquared << " @ 0.9995 = " << threshold;
FAIL() << kChiSquared << " value of " << chi_square
<< " is above the threshold.";
}
}
std::vector<zipf_u64::param_type> GenParams() {
using param = zipf_u64::param_type;
const auto k = param().k();
const auto q = param().q();
const auto v = param().v();
const uint64_t k2 = 1 << 10;
return std::vector<zipf_u64::param_type>{
param(k, q, v),
param(4, q, v), param(1 << 4, q, v), param(k2, q, v),
param(k2, q, 0.5), param(k2, q, 1.5), param(k2, q, 2.5), param(k2, q, 10),
param(k2, 1.5, v), param(k2, 3, v), param(k2, 5, v), param(k2, 10, v),
param(k2, 1.5, 0.5), param(k2, 3, 1.5), param(k, 10, 10)};
}
std::string ParamName(
const ::testing::TestParamInfo<zipf_u64::param_type>& info) {
const auto& p = info.param;
std::string name = absl::StrCat("k_", p.k(), "__q_", absl::SixDigits(p.q()),
"__v_", absl::SixDigits(p.v()));
return absl::StrReplaceAll(name, {{"+", "_"}, {"-", "_"}, {".", "_"}});
}
INSTANTIATE_TEST_SUITE_P(All, ZipfTest, ::testing::ValuesIn(GenParams()),
ParamName);
TEST(ZipfDistributionTest, StabilityTest) {
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull});
std::vector<int> output(10);
{
absl::zipf_distribution<int32_t> dist;
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
EXPECT_THAT(output, ElementsAre(10031, 0, 0, 3, 6, 0, 7, 47, 0, 0));
}
urbg.reset();
{
absl::zipf_distribution<int32_t> dist(std::numeric_limits<int32_t>::max(),
3.3);
std::generate(std::begin(output), std::end(output),
[&] { return dist(urbg); });
EXPECT_THAT(output, ElementsAre(44, 0, 0, 0, 0, 1, 0, 1, 3, 0));
}
}
TEST(ZipfDistributionTest, AlgorithmBounds) {
absl::zipf_distribution<int32_t> dist;
const std::pair<uint64_t, int32_t> kInputs[] = {
{0xffffffffffffffff, 0x0}, {0x7fffffffffffffff, 0x0},
{0x3ffffffffffffffb, 0x1}, {0x1ffffffffffffffd, 0x4},
{0xffffffffffffffe, 0x9}, {0x7ffffffffffffff, 0x12},
{0x3ffffffffffffff, 0x25}, {0x1ffffffffffffff, 0x4c},
{0xffffffffffffff, 0x99}, {0x7fffffffffffff, 0x132},
{0x3fffffffffffff, 0x265}, {0x1fffffffffffff, 0x4cc},
{0xfffffffffffff, 0x999}, {0x7ffffffffffff, 0x1332},
{0x3ffffffffffff, 0x2665}, {0x1ffffffffffff, 0x4ccc},
{0xffffffffffff, 0x9998}, {0x7fffffffffff, 0x1332f},
{0x3fffffffffff, 0x2665a}, {0x1fffffffffff, 0x4cc9e},
{0xfffffffffff, 0x998e0}, {0x7ffffffffff, 0x133051},
{0x3ffffffffff, 0x265ae4}, {0x1ffffffffff, 0x4c9ed3},
{0xffffffffff, 0x98e223}, {0x7fffffffff, 0x13058c4},
{0x3fffffffff, 0x25b178e}, {0x1fffffffff, 0x4a062b2},
{0xfffffffff, 0x8ee23b8}, {0x7ffffffff, 0x10b21642},
{0x3ffffffff, 0x1d89d89d}, {0x1ffffffff, 0x2fffffff},
{0xffffffff, 0x45d1745d}, {0x7fffffff, 0x5a5a5a5a},
{0x3fffffff, 0x69ee5846}, {0x1fffffff, 0x73ecade3},
{0xfffffff, 0x79a9d260}, {0x7ffffff, 0x7cc0532b},
{0x3ffffff, 0x7e5ad146}, {0x1ffffff, 0x7f2c0bec},
{0xffffff, 0x7f95adef}, {0x7fffff, 0x7fcac0da},
{0x3fffff, 0x7fe55ae2}, {0x1fffff, 0x7ff2ac0e},
{0xfffff, 0x7ff955ae}, {0x7ffff, 0x7ffcaac1},
{0x3ffff, 0x7ffe555b}, {0x1ffff, 0x7fff2aac},
{0xffff, 0x7fff9556}, {0x7fff, 0x7fffcaab},
{0x3fff, 0x7fffe555}, {0x1fff, 0x7ffff2ab},
{0xfff, 0x7ffff955}, {0x7ff, 0x7ffffcab},
{0x3ff, 0x7ffffe55}, {0x1ff, 0x7fffff2b},
{0xff, 0x7fffff95}, {0x7f, 0x7fffffcb},
{0x3f, 0x7fffffe5}, {0x1f, 0x7ffffff3},
{0xf, 0x7ffffff9}, {0x7, 0x7ffffffd},
{0x3, 0x7ffffffe}, {0x1, 0x7fffffff},
};
for (const auto& instance : kInputs) {
absl::random_internal::sequence_urbg urbg({instance.first});
EXPECT_EQ(instance.second, dist(urbg));
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/zipf_distribution.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/zipf_distribution_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
16f9733f-766f-40e0-9dc0-7359dc774407 | cpp | google/arolla | operator_metadata | arolla/qexpr/operator_metadata.cc | arolla/qexpr/operator_metadata_test.cc | #include "arolla/qexpr/operator_metadata.h"
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "arolla/qtype/qtype.h"
namespace arolla {
absl::Status QExprOperatorMetadataRegistry::AddOperatorFamilyMetadata(
QExprOperatorFamilyMetadata metadata) {
absl::WriterMutexLock lock(&mutex_);
if (family_metadatas_.contains(metadata.name) ||
operator_metadatas_.contains(metadata.name)) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat("trying to register individual operator or operator "
"family metadata twice under the same name %s",
metadata.name));
}
family_metadatas_.emplace(metadata.name, std::move(metadata));
return absl::OkStatus();
}
absl::Status QExprOperatorMetadataRegistry::AddOperatorMetadata(
QExprOperatorMetadata metadata) {
absl::WriterMutexLock lock(&mutex_);
if (family_metadatas_.contains(metadata.name)) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat("trying to register individual operator or operator "
"family metadata twice under the same name %s",
metadata.name));
}
auto [iter, inserted] =
operator_metadatas_.emplace(metadata.name, TypeToMetadata{});
if (iter->second.contains(metadata.input_qtypes)) {
return absl::Status(
absl::StatusCode::kAlreadyExists,
absl::StrFormat("trying to register operator metadata twice "
"for operator %s with input types %s",
metadata.name,
FormatTypeVector(metadata.input_qtypes)));
}
iter->second.emplace(metadata.input_qtypes, std::move(metadata));
return absl::OkStatus();
}
absl::StatusOr<QExprOperatorMetadata>
QExprOperatorMetadataRegistry::LookupOperatorMetadata(
absl::string_view op_name, absl::Span<const QTypePtr> input_qtypes) const {
absl::ReaderMutexLock lock(&mutex_);
std::vector<QTypePtr> input_qtypes_vector(input_qtypes.begin(),
input_qtypes.end());
if (auto m = family_metadatas_.find(op_name); m != family_metadatas_.end()) {
return QExprOperatorMetadata{
.name = std::string(m->second.name),
.input_qtypes = std::move(input_qtypes_vector),
.build_details = m->second.family_build_details};
}
if (auto oms = operator_metadatas_.find(op_name);
oms != operator_metadatas_.end()) {
if (auto m = oms->second.find(input_qtypes_vector);
m != oms->second.end()) {
return m->second;
}
}
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrFormat(
"no metadata is available for operator %s with input types %s",
op_name, FormatTypeVector(input_qtypes)));
}
QExprOperatorMetadataRegistry& QExprOperatorMetadataRegistry::GetInstance() {
static absl::NoDestructor<QExprOperatorMetadataRegistry> instance;
return *instance;
}
absl::flat_hash_map<std::string, std::set<std::string>>
QExprOperatorMetadataRegistry::OperatorBuildDependencies() const {
absl::flat_hash_map<std::string, std::set<std::string>> result;
absl::ReaderMutexLock lock(&mutex_);
for (const auto& [_, metadata] : family_metadatas_) {
result[absl::StrCat(metadata.name, "(...)")].insert(
metadata.family_build_details.build_target);
}
for (const auto& [name, type_to_meta] : operator_metadatas_) {
for (const auto& [types, metadata] : type_to_meta) {
std::string name_with_types =
absl::StrCat(name, ::arolla::FormatTypeVector(types));
result[name_with_types].insert(metadata.build_details.build_target);
}
}
return result;
}
int RegisterOperatorFamilyMetadataOrDie(QExprOperatorFamilyMetadata metadata) {
auto status =
QExprOperatorMetadataRegistry::GetInstance().AddOperatorFamilyMetadata(
std::move(metadata));
if (!status.ok()) {
LOG(FATAL) << status;
}
return 57;
}
int RegisterOperatorMetadataOrDie(QExprOperatorMetadata metadata) {
auto status =
QExprOperatorMetadataRegistry::GetInstance().AddOperatorMetadata(
std::move(metadata));
if (!status.ok()) {
LOG(FATAL) << status;
}
return 57;
}
} | #include "arolla/qexpr/operator_metadata.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/operator_name.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::Field;
using ::testing::MatchesRegex;
TEST(OperatorMetadataTest, OperatorMetadata) {
auto i32 = GetQType<int32_t>();
auto f32 = GetQType<float>();
QExprOperatorMetadata add_ints_meta;
add_ints_meta.name = AROLLA_OPERATOR_NAME("test.add");
add_ints_meta.input_qtypes = {i32, i32};
add_ints_meta.build_details.op_class = "Add<int>";
QExprOperatorMetadata add_floats_meta;
add_floats_meta.name = AROLLA_OPERATOR_NAME("test.add");
add_floats_meta.input_qtypes = {f32, f32};
add_floats_meta.build_details.op_class = "Add<float>";
QExprOperatorMetadataRegistry registry;
ASSERT_OK(registry.AddOperatorMetadata(add_ints_meta));
ASSERT_OK(registry.AddOperatorMetadata(add_floats_meta));
EXPECT_THAT(
registry.AddOperatorMetadata(add_ints_meta),
StatusIs(absl::StatusCode::kAlreadyExists,
MatchesRegex("trying to register operator metadata twice for "
"operator test.add with input types .*")));
EXPECT_THAT(
registry.AddOperatorFamilyMetadata(QExprOperatorFamilyMetadata{
.name = add_ints_meta.name, .family_build_details = {}}),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(registry.LookupOperatorMetadata(add_ints_meta.name, {i32, i32}),
IsOkAndHolds(Field(&QExprOperatorMetadata::build_details,
Field(&BuildDetails::op_class, "Add<int>"))));
}
TEST(OperatorMetadataTest, OperatorFamilyMetadata) {
auto i32 = GetQType<int32_t>();
::arolla::BuildDetails family_build_details;
family_build_details.op_family_class = "AddFamily";
QExprOperatorFamilyMetadata add_meta{
.name = "test.add", .family_build_details = family_build_details};
QExprOperatorMetadataRegistry registry;
ASSERT_OK(registry.AddOperatorFamilyMetadata(add_meta));
EXPECT_THAT(
registry.AddOperatorMetadata(QExprOperatorMetadata{
.name = "test.add", .input_qtypes = {i32, i32}}),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(
registry.AddOperatorFamilyMetadata(add_meta),
StatusIs(absl::StatusCode::kAlreadyExists,
Eq("trying to register individual operator or operator family "
"metadata twice under the same name test.add")));
EXPECT_THAT(
registry.LookupOperatorMetadata(add_meta.name, {i32, i32}),
IsOkAndHolds(Field(&QExprOperatorMetadata::build_details,
Field(&BuildDetails::op_family_class, "AddFamily"))));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operator_metadata.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operator_metadata_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
6ec690be-11c7-4341-96de-6b3f76ee661a | cpp | google/arolla | derived_qtype | arolla/qtype/derived_qtype.cc | arolla/qtype/derived_qtype_test.cc | #include "arolla/qtype/derived_qtype.h"
#include <cstddef>
#include <utility>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
BasicDerivedQType::BasicDerivedQType(ConstructorArgs args)
: QType(QType::ConstructorArgs{
.name = std::move(args.name),
.type_info = args.base_qtype->type_info(),
.type_layout = args.base_qtype->type_layout(),
.type_fields = args.base_qtype->type_fields(),
.value_qtype = args.value_qtype,
.qtype_specialization_key = std::move(args.qtype_specialization_key),
}),
base_qtype_(args.base_qtype) {
CHECK_OK(VerifyDerivedQType(this));
}
ReprToken BasicDerivedQType::UnsafeReprToken(const void* source) const {
return ReprToken{
absl::StrCat(name(), "{", base_qtype_->UnsafeReprToken(source).str, "}")};
}
void BasicDerivedQType::UnsafeCopy(const void* source,
void* destination) const {
base_qtype_->UnsafeCopy(source, destination);
}
void BasicDerivedQType::UnsafeCombineToFingerprintHasher(
const void* source, FingerprintHasher* hasher) const {
base_qtype_->UnsafeCombineToFingerprintHasher(source, hasher);
}
const QType* DecayDerivedQType(const QType* qtype) {
if (auto* derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype)) {
return derived_qtype_interface->GetBaseQType();
}
return qtype;
}
absl::Status VerifyDerivedQType(QTypePtr qtype) {
const auto* derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype);
if (derived_qtype_interface == nullptr) {
return absl::InvalidArgumentError(
absl::StrFormat("%s is not a derived qtype", qtype->name()));
}
const auto* base_qtype = derived_qtype_interface->GetBaseQType();
if (base_qtype == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=nullptr", qtype->name()));
}
if (dynamic_cast<const DerivedQTypeInterface*>(base_qtype) != nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"base_qtype=%s cannot be a derived qtype", base_qtype->name()));
}
const bool type_info_ok = (qtype->type_info() == base_qtype->type_info());
if (!type_info_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible type_info",
qtype->name(), base_qtype->name()));
}
const bool type_layout_ok =
(qtype->type_layout().AllocSize() ==
base_qtype->type_layout().AllocSize() &&
qtype->type_layout().AllocAlignment().value ==
base_qtype->type_layout().AllocAlignment().value);
if (!type_layout_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible type_layout",
qtype->name(), base_qtype->name()));
}
bool type_fields_ok =
(qtype->type_fields().empty() ||
qtype->type_fields().size() == base_qtype->type_fields().size());
for (size_t i = 0; type_fields_ok && i < qtype->type_fields().size() &&
i < base_qtype->type_fields().size();
++i) {
const auto& derived_field = qtype->type_fields()[i];
const auto& base_field = base_qtype->type_fields()[i];
type_fields_ok = type_fields_ok &&
(derived_field.byte_offset() == base_field.byte_offset() &&
DecayDerivedQType(derived_field.GetType()) ==
DecayDerivedQType(base_field.GetType()));
}
if (!type_layout_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible type_fields",
qtype->name(), base_qtype->name()));
}
const bool value_qtype_ok =
(qtype->value_qtype() == nullptr ||
base_qtype->value_qtype() == nullptr ||
DecayDerivedQType(qtype->value_qtype()) ==
DecayDerivedQType(base_qtype->value_qtype()));
if (!value_qtype_ok) {
return absl::FailedPreconditionError(absl::StrFormat(
"invalid derived_qtype=%s: base_qtype=%s: incompatible value_qtype",
qtype->name(), base_qtype->name()));
}
return absl::OkStatus();
}
TypedRef DecayDerivedQValue(TypedRef qvalue) {
return TypedRef::UnsafeFromRawPointer(DecayDerivedQType(qvalue.GetType()),
qvalue.GetRawPointer());
}
TypedValue DecayDerivedQValue(const TypedValue& qvalue) {
return TypedValue(DecayDerivedQValue(qvalue.AsRef()));
}
TypedRef UnsafeDowncastDerivedQValue(QTypePtr derived_qtype, TypedRef qvalue) {
DCHECK_NE(derived_qtype, nullptr);
auto* base_qtype = DecayDerivedQType(derived_qtype);
DCHECK_EQ(qvalue.GetType(), base_qtype);
return TypedRef::UnsafeFromRawPointer(derived_qtype, qvalue.GetRawPointer());
}
TypedValue UnsafeDowncastDerivedQValue(QTypePtr derived_qtype,
const TypedValue& qvalue) {
return TypedValue(UnsafeDowncastDerivedQValue(derived_qtype, qvalue.AsRef()));
}
} | #include "arolla/qtype/derived_qtype.h"
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/no_destructor.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
struct PointQType final : BasicDerivedQType {
PointQType()
: BasicDerivedQType(ConstructorArgs{
.name = "POINT",
.base_qtype =
MakeTupleQType({GetQType<double>(), GetQType<double>()}),
.value_qtype = GetQType<double>(),
.qtype_specialization_key = "::arolla::PointQType",
}) {}
static QTypePtr get() {
static const absl::NoDestructor<PointQType> result;
return result.get();
}
};
TEST(BasicDerivedQTypeTest, QTypeProperties) {
const auto point_qtype = PointQType::get();
EXPECT_EQ(point_qtype->name(), "POINT");
EXPECT_EQ(point_qtype->value_qtype(), GetQType<double>());
EXPECT_EQ(point_qtype->qtype_specialization_key(), "::arolla::PointQType");
const auto tuple_qtype =
MakeTupleQType({GetQType<double>(), GetQType<double>()});
EXPECT_EQ(point_qtype->type_info(), tuple_qtype->type_info());
EXPECT_EQ(point_qtype->type_layout().AllocSize(),
tuple_qtype->type_layout().AllocSize());
EXPECT_EQ(point_qtype->type_layout().AllocAlignment().value,
tuple_qtype->type_layout().AllocAlignment().value);
EXPECT_EQ(point_qtype->type_fields().size(), 2);
}
TEST(BasicDerivedQTypeTest, DefaultRepr) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue =
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue.AsRef());
EXPECT_THAT(point_qvalue.GenReprToken(),
ReprTokenEq("POINT{(float64{1}, float64{2})}"));
}
TEST(BasicDerivedQTypeTest, UnsafeCombineToFingerprintHasher) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto* tuple_qtype = tuple_qvalue.GetType();
const auto* point_qtype = PointQType::get();
FingerprintHasher hasher1("seed");
FingerprintHasher hasher2("seed");
tuple_qtype->UnsafeCombineToFingerprintHasher(tuple_qvalue.GetRawPointer(),
&hasher1);
point_qtype->UnsafeCombineToFingerprintHasher(tuple_qvalue.GetRawPointer(),
&hasher2);
EXPECT_EQ(std::move(hasher1).Finish(), std::move(hasher2).Finish());
}
TEST(BasicDerivedQTypeTest, DecayDerivedQType) {
const auto point_qtype = PointQType::get();
const auto tuple_qtype =
MakeTupleQType({GetQType<double>(), GetQType<double>()});
EXPECT_NE(point_qtype, tuple_qtype);
EXPECT_EQ(DecayDerivedQType(point_qtype), tuple_qtype);
EXPECT_EQ(DecayDerivedQType(tuple_qtype), tuple_qtype);
EXPECT_EQ(DecayDerivedQType(nullptr), nullptr);
}
TEST(BasicDerivedQTypeTest, UnsafeDowncastDerivedQRef) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue = TypedValue(
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue.AsRef()));
EXPECT_EQ(point_qvalue.GetType(), PointQType::get());
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
}
TEST(BasicDerivedQTypeTest, UnsafeDowncastDerivedQValue) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue =
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue);
EXPECT_EQ(point_qvalue.GetType(), PointQType::get());
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
}
TEST(BasicDerivedQTypeTest, DecayDerivedQRef) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue = TypedValue(
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue.AsRef()));
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
EXPECT_EQ(
TypedValue(DecayDerivedQValue(point_qvalue.AsRef())).GetFingerprint(),
tuple_qvalue.GetFingerprint());
EXPECT_EQ(
TypedValue(DecayDerivedQValue(tuple_qvalue.AsRef())).GetFingerprint(),
tuple_qvalue.GetFingerprint());
}
TEST(BasicDerivedQTypeTest, DecayDerivedQValue) {
const auto tuple_qvalue = MakeTupleFromFields(1., 2.);
const auto point_qvalue =
UnsafeDowncastDerivedQValue(PointQType::get(), tuple_qvalue);
EXPECT_NE(point_qvalue.GetFingerprint(), tuple_qvalue.GetFingerprint());
EXPECT_EQ(DecayDerivedQValue(point_qvalue).GetFingerprint(),
tuple_qvalue.GetFingerprint());
EXPECT_EQ(TypedValue(DecayDerivedQValue(tuple_qvalue)).GetFingerprint(),
tuple_qvalue.GetFingerprint());
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/derived_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/derived_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
50eb565e-1a53-429c-ba86-b7ff26d47687 | cpp | tensorflow/tensorflow | lift_as_function_call | tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc | tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc | #include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <queue>
#include <stack>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/quantization_unit_loc.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/platform/mutex.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant {
using ::stablehlo::quantization::Method;
using ::tsl::protobuf::TextFormat;
constexpr int64_t kDefaultVersion = 9;
constexpr StringRef kPlatformCpu = "CPU";
constexpr StringRef kStablehloModuleAttrsAttrName = "_stablehlo_module_attrs";
constexpr StringRef kUsesShapePolymorphismAttr = "jax.uses_shape_polymorphism";
bool IsInLiftedFunc(Operation* op) {
if (op == nullptr) return false;
return op->getParentOfType<func::FuncOp>()->hasAttr(kFusedFunctionAttr);
}
bool IsInStableHloOpRegion(Operation* op) {
if (op == nullptr) return false;
auto parent_op = op->getParentOp();
return parent_op != nullptr && stablehlo::IsStablehloOp(parent_op);
}
StringAttr InsertToSymbolTable(Operation& module, Operation& function,
const StringRef func_name) {
static tensorflow::mutex* mtx = new tensorflow::mutex();
tensorflow::mutex_lock lock(*mtx);
SymbolTable symbol_table(&module);
std::string unique_name = func_name.str();
int32_t uniquing_counter = 0;
while (symbol_table.lookup(unique_name) != nullptr) {
++uniquing_counter;
unique_name = absl::StrCat(func_name.str(), "_", uniquing_counter);
}
function.setAttr("sym_name",
StringAttr::get(module.getContext(), unique_name));
return symbol_table.insert(&function);
}
ValueRange CreateTFPartitionedCallOp(OpBuilder& builder,
const Location location,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
TF::PartitionedCallOp call_op = builder.create<TF::PartitionedCallOp>(
location, output_types, args,
FlatSymbolRefAttr::get(builder.getStringAttr(func_name)),
"", "", "");
call_op->setAttr(
kQuantTraitAttrName,
builder.getStringAttr(StringRef(
std::string(QuantTraitValues[QuantizationTrait::FullyQuantizable]))));
return call_op.getOutput();
}
ValueRange CreateTFXlaCallModuleOp(OpBuilder& builder, const Location location,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
MLIRContext* ctx = builder.getContext();
SmallVector<Attribute> shape_attrs;
for (const Type result_type : output_types) {
shape_attrs.push_back(
tf_type::ShapeAttr::get(ctx, mlir::cast<ShapedType>(result_type)));
}
auto empty_array_attr = ArrayAttr::get(ctx, {});
auto platforms = ArrayAttr::get(ctx, {StringAttr::get(ctx, kPlatformCpu)});
auto call_op = builder.create<TF::XlaCallModuleOp>(
location,
output_types,
args,
kDefaultVersion, "",
ArrayAttr::get(ctx, shape_attrs),
empty_array_attr,
platforms,
empty_array_attr,
false,
empty_array_attr);
call_op->setAttr(TF::kStablehloEntryFunctionAttrName,
FlatSymbolRefAttr::get(builder.getStringAttr(func_name)));
call_op->setAttr(kOriginalStablehloEntryFunctionAttrName,
builder.getStringAttr(func_name));
call_op->setAttr(
kQuantTraitAttrName,
builder.getStringAttr(StringRef(
std::string(QuantTraitValues[QuantizationTrait::FullyQuantizable]))));
call_op->setAttr(kStablehloModuleAttrsAttrName,
builder.getDictionaryAttr(builder.getNamedAttr(
kUsesShapePolymorphismAttr, builder.getBoolAttr(true))));
return call_op.getOutput();
}
ValueRange CreateFunctionCallOp(OpBuilder& builder, const Location location,
const FunctionCallOpType call_op_type,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
switch (call_op_type) {
case FunctionCallOpType::TFXlaCallModuleOp:
return CreateTFXlaCallModuleOp(builder, location, func_name, output_types,
args);
case FunctionCallOpType::TFPartitionedCallOp:
return CreateTFPartitionedCallOp(builder, location, func_name,
output_types, args);
}
}
SmallVector<Operation*> FindOpsFromArgumentsToResults(
const ArrayRef<Value> arguments, const ArrayRef<Value> results) {
std::queue<Value> value_queue;
for (Value result : results) {
value_queue.push(result);
}
absl::flat_hash_set<mlir::detail::ValueImpl*> argument_set;
for (Value argument : arguments) {
argument_set.insert(argument.getImpl());
}
std::stack<Operation*> op_stack;
while (!value_queue.empty()) {
Value current_value = value_queue.front();
value_queue.pop();
Operation* defining_node = current_value.getDefiningOp();
if (defining_node == nullptr) continue;
op_stack.push(defining_node);
for (Value arg : defining_node->getOperands()) {
if (!argument_set.contains(arg.getImpl())) {
value_queue.push(arg);
}
}
}
SmallVector<Operation*> sorted_ops;
absl::flat_hash_set<Operation*> unique_ops;
while (!op_stack.empty()) {
Operation* current_op = op_stack.top();
op_stack.pop();
if (unique_ops.contains(current_op)) continue;
sorted_ops.push_back(current_op);
unique_ops.insert(current_op);
}
return sorted_ops;
}
LogicalResult SetAttributeMap(MLIRContext& context,
const ArrayRef<NamedAttribute> attributes,
const ArrayRef<Operation*> ops) {
llvm::SmallDenseMap<NamedAttribute, Operation*> attr_to_op_map;
for (Operation* op : ops) {
for (const NamedAttribute named_attr : op->getAttrs()) {
attr_to_op_map.insert({named_attr, op});
}
}
for (int idx : llvm::seq<int>(0, attributes.size())) {
const NamedAttribute& attribute = attributes[idx];
if (const auto string_attr =
mlir::dyn_cast_or_null<StringAttr>(attribute.getValue());
string_attr != nullptr &&
string_attr.getValue() == kNullAttributeValue) {
continue;
}
if (std::find_if(
attr_to_op_map.begin(), attr_to_op_map.end(), [&](auto attr_op) {
return std::get<0>(attr_op).getName() == attribute.getName();
}) == attr_to_op_map.end()) {
emitError(UnknownLoc::get(&context),
"Could not find attribute: " + attribute.getName().str());
return failure();
}
Operation* owner_op;
for (const auto& [attr, val] : attr_to_op_map) {
if (attr.getName() == attribute.getName()) owner_op = val;
}
if (stablehlo::IsStablehloOp(owner_op)) {
owner_op->setAttr(StringRef(attribute.getName()), attribute.getValue());
} else {
owner_op = attr_to_op_map[attribute];
std::string new_attr_map_str{};
if (owner_op->hasAttr(kAttrMapAttribute)) {
new_attr_map_str =
owner_op->getAttrOfType<StringAttr>(kAttrMapAttribute).str();
absl::StrAppend(&new_attr_map_str, ",");
}
const std::string identifier = std::to_string(idx);
const StringAttr attribute_name = attribute.getName();
absl::StrAppend(&new_attr_map_str, identifier, ":", attribute_name.str());
owner_op->setAttr(kAttrMapAttribute,
StringAttr::get(&context, new_attr_map_str));
}
}
return success();
}
SmallVector<Value, 4> LiftAsFunctionCall(
OpBuilder& builder, const Location location,
const FunctionCallOpType call_op_type, const StringRef func_name,
const ArrayRef<Value> arguments, const ArrayRef<Value> results,
const ArrayRef<NamedAttribute> attributes) {
MLIRContext* context = builder.getContext();
if (results.empty()) {
emitError(UnknownLoc::get(context), "No result values specified");
return {};
}
Operation* result_op = results[0].getDefiningOp();
auto module = result_op->getParentOfType<ModuleOp>();
auto current_func = result_op->getParentOfType<func::FuncOp>();
auto guard = OpBuilder::InsertionGuard(builder);
builder.setInsertionPointAfter(current_func);
TypeRange arg_types{ValueRange{arguments}};
TypeRange result_types{ValueRange{results}};
auto func_type = FunctionType::get(context, arg_types, result_types);
SmallVector<Location> arg_locs;
for (Value arg : arguments) {
arg_locs.push_back(arg.getLoc());
}
auto wrap_func = builder.create<func::FuncOp>(location, func_name, func_type);
wrap_func.setVisibility(SymbolTable::Visibility::Private);
if (call_op_type == FunctionCallOpType::TFXlaCallModuleOp) {
wrap_func->setAttr(TF::kFromXlaCallModuleAttrName, builder.getUnitAttr());
}
wrap_func->setAttr(kFusedFunctionAttr, builder.getUnitAttr());
builder.createBlock(&wrap_func.getBody(), wrap_func.begin(), arg_types,
arg_locs);
IRMapping mapping;
for (int32_t i : llvm::seq<int32_t>(0, arguments.size())) {
mapping.map(arguments[i], wrap_func.getArgument(i));
}
auto cloning_ops = FindOpsFromArgumentsToResults(arguments, results);
Location call_op_loc = location;
for (Operation* op : cloning_ops) {
std::optional<QuantizationUnitLoc::QuantizationUnit> unit =
FindQuantizationUnitFromLoc(op->getLoc());
if (unit.has_value()) {
call_op_loc = QuantizationUnitLoc(builder.getContext(), unit.value());
}
}
if (failed(SetAttributeMap(*context, attributes, cloning_ops))) {
current_func.emitError() << "Some attributes couldn't be found.";
}
for (Operation* op : cloning_ops) {
builder.clone(*op, mapping);
}
SmallVector<Value> return_values;
for (Value result : results) {
return_values.push_back(mapping.lookupOrNull(result));
}
builder.create<func::ReturnOp>(location, return_values);
StringAttr new_func_name =
InsertToSymbolTable(*module, *wrap_func, func_name);
builder.setInsertionPointAfter(result_op);
ValueRange new_results =
CreateFunctionCallOp(builder, call_op_loc, call_op_type,
new_func_name.getValue(), result_types, arguments);
return SmallVector<Value, 4>(new_results.begin(), new_results.end());
}
SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder,
const Location location,
const FunctionCallOpType call_op_type,
const StringRef func_name,
const ArrayRef<Value> arguments,
const ArrayRef<Value> results) {
SmallVector<NamedAttribute> attributes;
return LiftAsFunctionCall(builder, location, call_op_type, func_name,
arguments, results, attributes);
}
SmallVector<Value> AppendToVector(const ArrayRef<Value> arguments,
Value append) {
SmallVector<Value> ret(arguments);
ret.push_back(append);
return ret;
}
bool IsEinsumSupportedByXlaDotV2(StringAttr equation_attr) {
StringRef equation = equation_attr.getValue();
if (!absl::StrContains(equation, "->") || !absl::StrContains(equation, ",") ||
absl::StrContains(equation, ".")) {
return false;
}
int idx_arrow = equation.find("->");
StringRef calc_eq = equation.substr(0, idx_arrow);
StringRef out_eq = equation.substr(idx_arrow + 2);
int idx_comma = calc_eq.find(',');
StringRef lhs_eq = calc_eq.substr(0, idx_comma);
StringRef rhs_eq = calc_eq.substr(idx_comma + 1);
if (absl::StrContains(rhs_eq, ",")) return false;
int lhs_out_idx_start = out_eq.size();
int lhs_out_idx_end = -1;
int rhs_out_idx_start = out_eq.size();
int rhs_out_idx_end = -1;
int lhs_batch_dim_size = 0;
int rhs_batch_dim_size = 0;
for (const char c : lhs_eq) {
if (absl::StrContains(out_eq, c) && absl::StrContains(rhs_eq, c)) {
lhs_batch_dim_size++;
} else if (absl::StrContains(out_eq, c)) {
const int out_idx = out_eq.find(c);
if (out_idx < lhs_out_idx_end) {
return false;
}
lhs_out_idx_start = std::min(lhs_out_idx_start, out_idx);
lhs_out_idx_end = std::max(lhs_out_idx_end, out_idx);
}
}
for (const char c : rhs_eq) {
if (absl::StrContains(out_eq, c) && absl::StrContains(lhs_eq, c)) {
rhs_batch_dim_size++;
} else if (absl::StrContains(out_eq, c)) {
int out_idx = out_eq.find(c);
if (out_idx < rhs_out_idx_end) {
return false;
}
if (out_idx < rhs_out_idx_start) rhs_out_idx_start = out_idx;
if (out_idx > rhs_out_idx_end) rhs_out_idx_end = out_idx;
}
}
if (lhs_batch_dim_size != rhs_batch_dim_size && lhs_batch_dim_size != 0 &&
rhs_batch_dim_size != 0) {
return false;
}
if (lhs_out_idx_end > rhs_out_idx_start) return false;
int batch_dim_size = std::max(rhs_batch_dim_size, lhs_batch_dim_size);
return lhs_out_idx_start >= batch_dim_size &&
rhs_out_idx_start >= batch_dim_size;
}
absl::StatusOr<Method> GetQuantizationMethod(absl::Nonnull<Operation*> op) {
const auto quantization_method_attr =
op->getAttrOfType<StringAttr>(kQuantizationMethodAttr);
if (!quantization_method_attr) {
return absl::InvalidArgumentError(absl::StrCat(
"Attribute ", kQuantizationMethodAttr.str(), " is not found."));
}
Method quantization_method;
const std::string method_txtpb = quantization_method_attr.getValue().str();
if (!TextFormat::ParseFromString(method_txtpb, &quantization_method)) {
return absl::InternalError(
absl::StrCat("Failed to parse Method from textproto: ", method_txtpb));
}
return quantization_method;
}
Method GetQuantizationMethodOrDefault(absl::Nonnull<Operation*> op) {
absl::StatusOr<Method> method = GetQuantizationMethod(op);
if (method.status().code() == absl::StatusCode::kInternal) {
op->emitError(absl::StrCat("Failed to get quantization method: ",
method.status().ToString()));
}
return method.ok() ? *method : Method::default_instance();
}
bool HasWeightOnlyPtqMethod(TF::XlaCallModuleOp xla_call_module_op) {
Method method = GetQuantizationMethodOrDefault(xla_call_module_op);
return method.has_weight_only_ptq();
}
bool IsWeightOnlyQuantizableOp(const Operation& op) {
if (auto call_op = dyn_cast<TF::XlaCallModuleOp>(op)) {
StringRef entry_function_name = GetEntryFunctionName(call_op);
absl::StatusOr<Method> quantization_method = GetQuantizationMethod(call_op);
return ContainsConvOrDot(entry_function_name) && quantization_method.ok() &&
quantization_method->has_weight_only_ptq();
}
return false;
}
SmallVector<func::FuncOp> GetSortedFunctions(ModuleOp module_op) {
auto iterator_range = module_op.getOps<func::FuncOp>();
SmallVector<func::FuncOp> func_ops(iterator_range.begin(),
iterator_range.end());
absl::c_sort(func_ops, [](func::FuncOp op1, func::FuncOp op2) {
return op1.getName() < op2.getName();
});
return func_ops;
}
} | #include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant {
namespace {
using ::stablehlo::quantization::Method;
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::util::MessageDifferencer;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using LiftAsFunctionCallTest = QuantizationTestBase;
constexpr absl::string_view kModuleLifted = R"mlir(
module {
func.func private @composite_dot_general_fn_1(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, LiftedFunctionSucceeds) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleLifted);
ASSERT_TRUE(module_op);
auto composite_dot_general_fn =
module_op->lookupSymbol<func::FuncOp>("composite_dot_general_fn_1");
ASSERT_THAT(composite_dot_general_fn, NotNull());
auto dot_general_op = FindOperationOfType<mlir::stablehlo::DotGeneralOp>(
composite_dot_general_fn);
EXPECT_TRUE(IsInLiftedFunc(dot_general_op));
}
constexpr absl::string_view kModuleStableHlo = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, FunctionLiftedAsXlaCallModuleOp) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStableHlo);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(main_fn);
const SmallVector<NamedAttribute>& attributes = {
builder_.getNamedAttr(
"precision_config",
builder_.getArrayAttr(SmallVector<Attribute>(
1, mlir::stablehlo::PrecisionAttr::get(
ctx_.get(), mlir::stablehlo::Precision::DEFAULT)))),
};
const SmallVector<Value> operands(dot_general_op->getOperands());
const SmallVector<Value> results(dot_general_op->getResults());
Operation* lifted_op =
LiftAsFunctionCall(builder_, dot_general_op->getLoc(),
FunctionCallOpType::TFXlaCallModuleOp,
"composite_dot_general_fn", operands, results,
attributes)[0]
.getDefiningOp();
const auto entry_function_symbol_ref =
lifted_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
SymbolTable symbol_table(*module_op);
auto entry_func = dyn_cast_or_null<func::FuncOp>(
symbol_table.lookup(entry_function_symbol_ref.getValue()));
auto lifted_dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(entry_func);
EXPECT_TRUE(isa<TF::XlaCallModuleOp>(lifted_op));
EXPECT_EQ(
mlir::cast<StringAttr>(lifted_op->getAttr("_original_entry_function")),
"composite_dot_general_fn_1");
EXPECT_EQ(
mlir::cast<ArrayAttr>(lifted_dot_general_op->getAttr("precision_config")),
builder_.getArrayAttr(SmallVector<Attribute>(
1, mlir::stablehlo::PrecisionAttr::get(
ctx_.get(), mlir::stablehlo::Precision::DEFAULT))));
}
TEST_F(LiftAsFunctionCallTest, FunctionNoAttrLiftedAsXlaCallModuleOp) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStableHlo);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(main_fn);
const SmallVector<Value> operands(dot_general_op->getOperands());
const SmallVector<Value> results(dot_general_op->getResults());
Operation* lifted_op =
LiftAsFunctionCall(builder_, dot_general_op->getLoc(),
FunctionCallOpType::TFXlaCallModuleOp,
"composite_dot_general_fn", operands, results)[0]
.getDefiningOp();
EXPECT_TRUE(isa<TF::XlaCallModuleOp>(lifted_op));
EXPECT_EQ(
mlir::cast<StringAttr>(lifted_op->getAttr("_original_entry_function")),
"composite_dot_general_fn_1");
}
TEST_F(LiftAsFunctionCallTest, EinsumSupportedForXlaDotV2Succeeds) {
StringAttr einsum_supported_by_xla_dot_v2_attr =
builder_.getStringAttr("ijk,ikm->ijm");
StringAttr einsum_one_operand = builder_.getStringAttr("ijk->ikj");
StringAttr einsum_ellipsis = builder_.getStringAttr("...gse->...gs");
EXPECT_TRUE(IsEinsumSupportedByXlaDotV2(einsum_supported_by_xla_dot_v2_attr));
EXPECT_FALSE(IsEinsumSupportedByXlaDotV2(einsum_one_operand));
EXPECT_FALSE(IsEinsumSupportedByXlaDotV2(einsum_ellipsis));
}
TEST_F(LiftAsFunctionCallTest, GetQuantizationMethodSucceeds) {
constexpr absl::string_view kXlaCallModuleOpWithQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _quantization_method = "no_quantization {}", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
ASSERT_THAT(method, IsOk());
EXPECT_TRUE(method->has_no_quantization());
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodFailsWhenNoQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithNoQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithNoQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
EXPECT_THAT(
method,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Attribute _quantization_method is not found")));
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodFailsWhenMalformedQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithNoQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _quantization_method = "invalid_field: 123", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithNoQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
EXPECT_THAT(method,
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to parse Method from textproto")));
}
constexpr absl::string_view kFunctionWithRegion =
R"mlir(
func.func @main(%arg0: tensor<i1>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> {
%if = "stablehlo.if"(%arg0) ({
%0 = stablehlo.add %arg1, %arg1 : tensor<f32>
stablehlo.return %0 : tensor<f32>
}, {
%1 = stablehlo.add %arg2, %arg2 : tensor<f32>
stablehlo.return %1 : tensor<f32>
}) : (tensor<i1>) -> (tensor<f32>)
%subtract = stablehlo.subtract %if, %if : tensor<f32>
return %subtract : tensor<f32>
}
)mlir";
TEST_F(LiftAsFunctionCallTest, IsInRegionSucceedsWhenOpInsideRegion) {
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kFunctionWithRegion);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto if_op = FindOperationOfType<mlir::stablehlo::IfOp>(main_fn);
Block& block = if_op->getRegion(0).front();
Operation& add_op = *absl::c_find_if(block, [](Operation& entry) {
return dyn_cast_or_null<::mlir::stablehlo::AddOp>(&entry);
});
EXPECT_TRUE(IsInStableHloOpRegion(&add_op));
}
TEST_F(LiftAsFunctionCallTest, IsInRegionFailsWhenOpNotInsideRegion) {
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kFunctionWithRegion);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto subtract_op = FindOperationOfType<mlir::stablehlo::SubtractOp>(main_fn);
EXPECT_FALSE(IsInStableHloOpRegion(subtract_op));
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodOrDefaultReturnsCorrectMethod) {
constexpr absl::string_view kXlaCallModuleOpWithQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_dot_general_fn_1,
_quantization_method = "no_quantization { }",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
ASSERT_TRUE(module_op);
FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const Method method = GetQuantizationMethodOrDefault(*xla_call_module_op);
EXPECT_TRUE(method.has_no_quantization());
}
TEST_F(
LiftAsFunctionCallTest,
GetQuantizationMethodOrDefaultReturnsDefaultWhenNoQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithoutQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_dot_general_fn_1,
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithoutQuantizationMethodAttr);
ASSERT_TRUE(module_op);
FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const Method method = GetQuantizationMethodOrDefault(*xla_call_module_op);
EXPECT_TRUE(MessageDifferencer::Equals(method, Method::default_instance()));
}
constexpr absl::string_view kModuleDotWeightOnlyPtq = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodExists) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_TRUE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodDifferentMethod) {
const absl::string_view kModuleDotNoQuantization = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "no_quantization { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotNoQuantization);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodNoMethod) {
const absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpDot) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_TRUE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpNotTfXlaCallModuleOp) {
const absl::string_view kModulePartitionedCallDot = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.PartitionedCall"(%arg0, %1, %0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_dot_general_fn_1, _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModulePartitionedCallDot);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::PartitionedCallOp>().begin();
EXPECT_FALSE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpNoConvNoDot) {
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, GetSortedFunctions) {
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @conv_3_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
func.func @conv_1_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
func.func @conv_2_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
SmallVector<func::FuncOp> funcs = GetSortedFunctions(*module_op);
ASSERT_THAT(funcs, SizeIs(3));
EXPECT_THAT(funcs[0].getSymName(), StrEq("conv_1_fn"));
EXPECT_THAT(funcs[1].getSymName(), StrEq("conv_2_fn"));
EXPECT_THAT(funcs[2].getSymName(), StrEq("conv_3_fn"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f97eb36-a7ec-4230-9a14-09f091bafb63 | cpp | tensorflow/tensorflow | lstm_eval | tensorflow/lite/kernels/lstm_eval.cc | tensorflow/lite/kernels/lstm_eval_test.cc | #include "tensorflow/lite/kernels/lstm_eval.h"
#include <math.h>
#include <string.h>
#include <algorithm>
#include <cstdint>
#include <memory>
#include <vector>
#include "ruy/matrix.h"
#include "ruy/mul_params.h"
#include "ruy/profiler/instrumentation.h"
#include "ruy/ruy.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace lstm_eval {
namespace {
void MatrixBatchVectorMultiplyAccumulate(
const float* matrix, const float* vector, const float* result,
float* output, int m_rows, int m_cols, int n_batch,
CpuBackendContext* cpu_backend_context) {
tflite::FullyConnectedParams float_fc_params;
float_fc_params.float_activation_min = std::numeric_limits<float>::lowest();
float_fc_params.float_activation_max = std::numeric_limits<float>::max();
float_fc_params.lhs_cacheable = true;
float_fc_params.rhs_cacheable = false;
tflite::RuntimeShape weight_shape({m_rows, m_cols});
tflite::RuntimeShape input_shape({n_batch, m_cols});
tflite::RuntimeShape output_shape({n_batch, m_rows});
if (n_batch == 1) {
tflite::optimized_ops::FullyConnected(
float_fc_params, input_shape, vector, weight_shape, matrix,
output_shape, result, output_shape, output, cpu_backend_context);
} else {
tflite::optimized_ops::FullyConnected(
float_fc_params, input_shape, vector, weight_shape, matrix,
output_shape, nullptr, output_shape, output, cpu_backend_context);
for (int i = 0; i < m_rows * n_batch; ++i) {
output[i] += result[i];
}
}
}
void ComputeRowSums(
int32_t* input_to_input_row_sums, int32_t* input_to_forget_row_sums,
int32_t* input_to_cell_row_sums, int32_t* input_to_output_row_sums,
int32_t* aux_input_to_input_row_sums, int32_t* aux_input_to_forget_row_sums,
int32_t* aux_input_to_cell_row_sums, int32_t* aux_input_to_output_row_sums,
int32_t* recurrent_to_input_row_sums, int32_t* recurrent_to_forget_row_sums,
int32_t* recurrent_to_cell_row_sums, int32_t* recurrent_to_output_row_sums,
int32_t* projection_weights_row_sums, int32_t* row_sums, int n_cell,
int n_input, int n_aux_input, int n_output,
const int8_t* input_to_input_weights_ptr,
const int8_t* input_to_forget_weights_ptr,
const int8_t* input_to_cell_weights_ptr,
const int8_t* input_to_output_weights_ptr,
const int8_t* aux_input_to_input_weights_ptr,
const int8_t* aux_input_to_forget_weights_ptr,
const int8_t* aux_input_to_cell_weights_ptr,
const int8_t* aux_input_to_output_weights_ptr,
const int8_t* recurrent_to_input_weights_ptr,
const int8_t* recurrent_to_forget_weights_ptr,
const int8_t* recurrent_to_cell_weights_ptr,
const int8_t* recurrent_to_output_weights_ptr,
const int8_t* projection_weights_ptr, bool use_cifg,
const float* aux_input_ptr, bool recurrent_to_input_is_diag = false,
bool recurrent_to_forget_is_diag = false,
bool recurrent_to_cell_is_diag = false,
bool recurrent_to_output_is_diag = false) {
if (!use_cifg) {
tensor_utils::ReductionSumVector(input_to_input_weights_ptr,
input_to_input_row_sums, n_cell, n_input);
}
tensor_utils::ReductionSumVector(input_to_forget_weights_ptr,
input_to_forget_row_sums, n_cell, n_input);
tensor_utils::ReductionSumVector(input_to_cell_weights_ptr,
input_to_cell_row_sums, n_cell, n_input);
tensor_utils::ReductionSumVector(input_to_output_weights_ptr,
input_to_output_row_sums, n_cell, n_input);
if (aux_input_ptr) {
if (!use_cifg) {
tensor_utils::ReductionSumVector(aux_input_to_input_weights_ptr,
aux_input_to_input_row_sums, n_cell,
n_aux_input);
}
tensor_utils::ReductionSumVector(aux_input_to_forget_weights_ptr,
aux_input_to_forget_row_sums, n_cell,
n_aux_input);
tensor_utils::ReductionSumVector(aux_input_to_cell_weights_ptr,
aux_input_to_cell_row_sums, n_cell,
n_aux_input);
tensor_utils::ReductionSumVector(aux_input_to_output_weights_ptr,
aux_input_to_output_row_sums, n_cell,
n_aux_input);
}
if (!use_cifg) {
if (!recurrent_to_input_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_input_weights_ptr,
recurrent_to_input_row_sums, n_cell,
n_output);
}
}
if (!recurrent_to_forget_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_forget_weights_ptr,
recurrent_to_forget_row_sums, n_cell,
n_output);
}
if (!recurrent_to_cell_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_cell_weights_ptr,
recurrent_to_cell_row_sums, n_cell,
n_output);
}
if (!recurrent_to_output_is_diag) {
tensor_utils::ReductionSumVector(recurrent_to_output_weights_ptr,
recurrent_to_output_row_sums, n_cell,
n_output);
}
if (projection_weights_ptr != nullptr) {
tensor_utils::ReductionSumVector(
projection_weights_ptr, projection_weights_row_sums, n_output, n_cell);
}
}
inline float GetTensorScale(const TfLiteTensor* tensor) {
return tensor == nullptr ? 1.0f : tensor->params.scale;
}
inline void CalculateLstmGateFloat(
const float* input, const float* input_to_gate_weights,
const float* aux_input, const float* aux_input_to_gate_weights,
const float* output_state, const float* recurrent_to_gate_weights,
const float* cell_state, const float* cell_to_gate_weights,
const float* layer_norm_coefficients, const float* gate_bias,
const int n_batch, const int n_input, const int n_aux_input,
const int n_output, const int n_cell,
const TfLiteFusedActivation activation, float* gate,
const bool is_input_all_zeros, const bool is_aux_input_all_zeros,
float* output, bool recurrent_is_diag, CpuBackendContext* context) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
if (use_layer_norm) {
std::fill_n(gate, n_cell * n_batch, 0.0f);
} else {
tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate);
}
float* accumulation_buffer = gate;
if (!is_input_all_zeros) {
MatrixBatchVectorMultiplyAccumulate(input_to_gate_weights, input,
accumulation_buffer, output, n_cell,
n_input, n_batch, context);
std::swap(accumulation_buffer, output);
}
if (!is_aux_input_all_zeros) {
MatrixBatchVectorMultiplyAccumulate(aux_input_to_gate_weights, aux_input,
accumulation_buffer, output, n_cell,
n_aux_input, n_batch, context);
std::swap(accumulation_buffer, output);
}
if (recurrent_is_diag) {
tflite::tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recurrent_to_gate_weights, n_cell, output_state, n_batch,
accumulation_buffer);
std::swap(accumulation_buffer, output);
} else {
MatrixBatchVectorMultiplyAccumulate(recurrent_to_gate_weights, output_state,
accumulation_buffer, output, n_cell,
n_output, n_batch, context);
}
if (use_peephole) {
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
cell_to_gate_weights, n_cell, cell_state, n_batch, output);
}
if (use_layer_norm) {
tensor_utils::MeanStddevNormalization(output, output, n_cell, n_batch);
tensor_utils::VectorBatchVectorCwiseProduct(layer_norm_coefficients, n_cell,
output, n_batch, output);
tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, output);
}
tensor_utils::ApplyActivationToVector(output, n_batch * n_cell, activation,
gate);
}
void UpdateLstmCellFloat(int n_batch, int n_cell, float* cell_state,
const float* input_gate, float* forget_gate,
const float* cell_gate, bool use_cifg, float clip) {
tensor_utils::VectorVectorCwiseProduct(forget_gate, cell_state,
n_batch * n_cell, cell_state);
if (use_cifg) {
float* scratch = forget_gate;
tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch);
tensor_utils::VectorVectorCwiseProductAccumulate(
cell_gate, scratch, n_batch * n_cell, cell_state);
} else {
tensor_utils::VectorVectorCwiseProductAccumulate(
cell_gate, input_gate, n_batch * n_cell, cell_state);
}
if (clip > 0.0f) {
tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip);
}
}
void CalculateLstmOutputFloat(int n_batch, int n_cell, int n_output,
const float* cell_state, const float* output_gate,
TfLiteFusedActivation activation,
const float* projection_weights,
const float* projection_bias,
const float proj_clip, float* output_state,
float* scratch, float* projection_bias_scratch,
CpuBackendContext* context) {
tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell,
activation, scratch);
tensor_utils::VectorVectorCwiseProduct(output_gate, scratch, n_batch * n_cell,
scratch);
const bool use_projection = (projection_weights != nullptr);
const bool use_projection_bias = (projection_bias != nullptr);
if (use_projection) {
if (use_projection_bias) {
tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, n_batch,
projection_bias_scratch);
} else {
std::fill_n(projection_bias_scratch, n_batch * n_output, 0.0f);
}
MatrixBatchVectorMultiplyAccumulate(projection_weights, scratch,
projection_bias_scratch, output_state,
n_output, n_cell, n_batch, context);
if (proj_clip > 0.0f) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output, proj_clip);
}
} else {
std::copy_n(scratch, n_batch * n_output, output_state);
}
}
void CalculateLstmGateHybrid(
const int8_t* input, const float* input_sf, const int32_t* input_zp,
const int8_t* input_to_gate_weights,
const uint8_t* input_to_gate_weights_ledger,
const float input_to_gate_weights_scale, int32_t* input_to_gate_row_sums,
const int8_t* aux_input, const float* aux_input_sf,
const int32_t* aux_input_zp, const int8_t* aux_input_to_gate_weights,
const float aux_input_to_gate_weights_scale,
int32_t* aux_input_to_gate_row_sums,
const int8_t* output_state, const float* output_state_float,
const float* output_state_sf, const int32_t* output_state_zp,
const int8_t* recurrent_to_gate_weights,
const float* recurrent_to_gate_diag,
const uint8_t* recurrent_to_gate_weights_ledger,
const float recurrent_to_gate_weights_scale,
int32_t* recurrent_to_gate_row_sums,
const float* cell_state, const int8_t* cell_to_gate_weights,
const float cell_to_gate_weights_scale,
const float* layer_norm_coefficients, const float* gate_bias,
const int n_batch, const int n_input, const int n_aux_input,
const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
float* gate,
const bool is_input_all_zeros, const bool is_aux_input_all_zeros,
const bool is_output_state_all_zeros, bool* compute_row_sums,
CpuBackendContext* context,
float* scratch0,
float* scratch1,
int32_t* accum_scratch,
bool recurrent_is_diag) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
if (use_layer_norm) {
std::fill_n(gate, n_cell * n_batch, 0.0f);
} else {
tensor_utils::VectorBatchVectorAssign(gate_bias, n_cell, n_batch, gate);
}
if (!is_input_all_zeros) {
if (input_to_gate_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = input_to_gate_weights_scale * input_sf[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights, input_to_gate_weights_ledger, n_cell, n_input,
input, scales.data(), n_batch, gate);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
input_to_gate_weights, n_cell, n_input, input,
input_to_gate_weights_scale, input_sf, n_batch, gate,
nullptr, input_zp, accum_scratch,
input_to_gate_row_sums, compute_row_sums, scratch0, context);
}
}
if (!is_aux_input_all_zeros) {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
aux_input_to_gate_weights, n_cell, n_aux_input, aux_input,
aux_input_to_gate_weights_scale, aux_input_sf, n_batch, gate,
nullptr, aux_input_zp, accum_scratch,
aux_input_to_gate_row_sums, compute_row_sums, scratch0, context);
}
if (!is_output_state_all_zeros) {
if (recurrent_to_gate_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = recurrent_to_gate_weights_scale * input_sf[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
recurrent_to_gate_weights, recurrent_to_gate_weights_ledger, n_cell,
n_output, output_state, scales.data(), n_batch, gate);
} else {
if (recurrent_is_diag) {
tflite::tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recurrent_to_gate_diag, n_cell, output_state_float, n_batch, gate);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
recurrent_to_gate_weights, n_cell, n_output, output_state,
recurrent_to_gate_weights_scale, output_state_sf, n_batch, gate,
nullptr, output_state_zp, accum_scratch,
recurrent_to_gate_row_sums, compute_row_sums, scratch0, context);
}
}
}
if (use_peephole) {
float* recovered_cell_weights = scratch1;
tensor_utils::VectorScalarMultiply(cell_to_gate_weights, n_cell,
cell_to_gate_weights_scale,
recovered_cell_weights);
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
recovered_cell_weights, n_cell, cell_state, n_batch, gate);
}
if (use_layer_norm) {
tensor_utils::MeanStddevNormalization(gate, gate, n_cell, n_batch);
tensor_utils::VectorBatchVectorCwiseProduct(layer_norm_coefficients, n_cell,
gate, n_batch, gate);
tensor_utils::VectorBatchVectorAdd(gate_bias, n_cell, n_batch, gate);
}
tensor_utils::ApplyActivationToVector(gate, n_cell * n_batch, activation,
gate);
}
void CalculateLstmOutputHybrid(
int n_batch, int n_cell, int n_output, const float* cell_state,
const float* output_gate, TfLiteFusedActivation activation,
const int8_t* projection_weights, const uint8_t* projection_weights_ledger,
float projection_weights_scale, const float* projection_bias,
const float proj_clip, float* output_state, bool asymmetric_quantize_inputs,
int32_t* projection_weights_row_sums, bool* compute_row_sums,
CpuBackendContext* context, float* scratch0, int8_t* scratch1,
float* scratch2, int32_t* scratch3, int32_t* scratch4) {
tensor_utils::ApplyActivationToVector(cell_state, n_batch * n_cell,
activation, scratch0);
tensor_utils::VectorVectorCwiseProduct(output_gate, scratch0,
n_batch * n_cell, scratch0);
const bool use_projection = (projection_weights != nullptr);
const bool use_projection_bias = (projection_bias != nullptr);
if (use_projection) {
if (use_projection_bias) {
tensor_utils::VectorBatchVectorAssign(projection_bias, n_output, n_batch,
output_state);
} else {
std::fill_n(output_state, n_batch * n_output, 0.0f);
}
if (!tensor_utils::IsZeroVector(scratch0, n_batch * n_cell)) {
tensor_utils::BatchQuantizeFloats(scratch0, n_batch, n_cell, scratch1,
scratch2, scratch3,
asymmetric_quantize_inputs);
if (projection_weights_ledger != nullptr) {
std::vector<float> scales(n_batch);
for (int i = 0; i < n_batch; i++) {
scales[i] = projection_weights_scale * scratch2[i];
}
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate(
projection_weights, projection_weights_ledger, n_output, n_cell,
scratch1, scales.data(), n_batch, output_state);
} else {
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
projection_weights, n_output, n_cell, scratch1,
projection_weights_scale, scratch2, n_batch, output_state,
nullptr, scratch3, scratch4,
projection_weights_row_sums, compute_row_sums, scratch2, context);
}
}
if (proj_clip > 0.0f) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output, proj_clip);
}
} else {
std::copy_n(scratch0, n_batch * n_output, output_state);
}
}
void CalculateLstmGateInteger8x8_16(
const int8_t* input, const int8_t* input_to_gate_weights,
const int32_t* input_to_gate_bias, const int32_t input_to_gate_scale_a,
const int32_t input_to_gate_scale_b,
const int8_t* output_state, const int8_t* recurrent_to_gate_weights,
const int32_t* recurrent_to_gate_bias,
const int32_t recurrent_to_gate_scale_a,
const int32_t recurrent_to_gate_scale_b,
const int16_t* cell_state, const int16_t* cell_to_gate_weights,
const int32_t cell_to_gate_scale_a, const int32_t cell_to_gate_scale_b,
const int16_t* layer_norm_coefficients, const int32_t* layer_norm_bias,
const int32_t layer_norm_input_scale_a,
const int32_t layer_norm_input_scale_b,
const int32_t layer_norm_variance_guard,
const int n_batch, const int n_input, const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
int16_t* gate,
CpuBackendContext* context,
int32_t* scratch5) {
const bool use_peephole = (cell_to_gate_weights != nullptr);
const bool use_layer_norm = (layer_norm_coefficients != nullptr);
std::fill_n(gate, n_batch * n_cell, 0);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
input, input_to_gate_bias, input_to_gate_weights, input_to_gate_scale_a,
input_to_gate_scale_b, n_batch, n_input, n_cell, 0, scratch5, gate,
context);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
output_state, recurrent_to_gate_bias, recurrent_to_gate_weights,
recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output,
n_cell, 0, scratch5, gate, context);
if (use_peephole) {
tensor_utils::VectorBatchVectorCwiseProductAccumulate(
cell_to_gate_weights, n_output, cell_state, n_batch,
cell_to_gate_scale_a, cell_to_gate_scale_b, gate);
}
if (use_layer_norm) {
tensor_utils::ApplyLayerNorm(
gate, layer_norm_coefficients, layer_norm_bias,
layer_norm_input_scale_a, layer_norm_input_scale_b,
layer_norm_variance_guard, n_batch, n_cell, gate);
}
switch (activation) {
case kTfLiteActSigmoid:
tensor_utils::ApplySigmoid(gate, n_batch, n_cell, gate);
break;
case kTfLiteActTanh:
tensor_utils::ApplyTanh(3, gate, n_batch, n_cell, gate);
break;
default:
TFLITE_ASSERT_FALSE;
}
}
void UpdateLstmCellInteger(int n_batch, int n_cell, int16_t* cell_state,
int32_t cell_state_scale, const int16_t* input_gate,
int16_t* forget_gate, const int16_t* cell_gate,
bool use_cifg, int16_t clip) {
int16_t* scratch = forget_gate;
tensor_utils::CwiseMul(forget_gate, cell_state, n_batch, n_cell, 15,
cell_state);
if (use_cifg) {
tensor_utils::Sub1Vector(forget_gate, n_batch * n_cell, scratch);
tensor_utils::CwiseMul(scratch, cell_gate, n_batch, n_cell,
30 + cell_state_scale, scratch);
} else {
tensor_utils::CwiseMul(input_gate, cell_gate, n_batch, n_cell,
30 + cell_state_scale, scratch);
}
tensor_utils::CwiseAdd(cell_state, scratch, n_batch, n_cell, cell_state);
if (clip > 0) {
tensor_utils::CwiseClipping(cell_state, n_batch * n_cell, clip);
}
}
void CalculateLstmOutputInteger8x8_16(
int n_batch, int n_cell, int n_output, const int16_t* cell_state,
int32_t cell_state_scale, const int16_t* output_gate,
int32_t hidden_scale_a, int32_t hidden_scale_b, int32_t hidden_zp,
const int8_t* projection_weights, int32_t proj_scale_a,
int32_t proj_scale_b, const int32_t* projection_bias,
int32_t output_state_zp, int8_t quantized_proj_clip, int8_t* output_state,
CpuBackendContext* context, int16_t* scratch0, int8_t* scratch1,
int32_t* scratch2) {
tensor_utils::ApplyTanh(15 + cell_state_scale, cell_state, n_batch, n_cell,
scratch0);
const bool use_projection = (projection_weights != nullptr);
if (use_projection) {
tensor_utils::CwiseMul(output_gate, scratch0, hidden_scale_a,
hidden_scale_b, n_batch, n_cell, -hidden_zp,
scratch1);
std::fill_n(output_state, n_batch * n_output, 0);
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
scratch1, projection_bias, projection_weights, proj_scale_a,
proj_scale_b, n_batch, n_cell, n_output, output_state_zp, scratch2,
output_state, context);
if (quantized_proj_clip > 0) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output,
quantized_proj_clip);
}
} else {
tensor_utils::CwiseMul(output_gate, scratch0, hidden_scale_a,
hidden_scale_b, n_batch, n_cell, hidden_zp,
output_state);
}
}
void CalculateLstmGateInteger8x8_8(
const int8_t* input, int32_t input_zp, const int8_t* input_to_gate_weight,
const int32_t input_to_gate_scale_a, const int32_t input_to_gate_scale_b,
const int32_t input_times_weights_scale_a,
const int32_t input_times_weights_scale_b,
const int32_t input_times_weights_zp,
const int8_t* output_state, const int32_t output_state_zp,
const int8_t* recurrent_to_gate_weight,
const int32_t recurrent_to_gate_scale_a,
const int32_t recurrent_to_gate_scale_b,
const int32_t output_state_times_weights_scale_a,
const int32_t output_state_times_weights_scale_b,
const int32_t output_state_times_weights_zp,
const int16_t* layer_norm_gate_weight,
const int32_t layer_norm_gate_scale_a,
const int32_t layer_norm_gate_scale_b, const int32_t* gate_bias,
const int n_batch, const int n_input, const int n_output, const int n_cell,
const TfLiteFusedActivation activation,
int16_t* gate,
int8_t* scratch0, int8_t* scratch1) {
tensor_utils::MatrixBatchVectorMultiply(
input, input_zp, input_to_gate_weight, input_to_gate_scale_a,
input_to_gate_scale_b, n_batch, n_input, n_cell, scratch0,
input_times_weights_zp);
tensor_utils::MatrixBatchVectorMultiply(
output_state, output_state_zp, recurrent_to_gate_weight,
recurrent_to_gate_scale_a, recurrent_to_gate_scale_b, n_batch, n_output,
n_cell, scratch1, output_state_times_weights_zp);
tensor_utils::TwoGateSaturatingAdd(
scratch0, input_times_weights_zp, scratch1, output_state_times_weights_zp,
input_times_weights_scale_a, input_times_weights_scale_b,
output_state_times_weights_scale_a, output_state_times_weights_scale_b,
n_batch, n_cell, gate);
tensor_utils::ApplyLayerNormFloat(
gate, layer_norm_gate_weight, layer_norm_gate_scale_a,
layer_norm_gate_scale_b, gate_bias, n_batch, n_cell, gate);
switch (activation) {
case kTfLiteActSigmoid:
tensor_utils::ApplySigmoidFloat(gate, n_batch, n_cell, gate);
break;
case kTfLiteActTanh:
tensor_utils::ApplyTanhFloat(gate, n_batch, n_cell, -12, gate);
break;
default:
TFLITE_ASSERT_FALSE;
}
}
void CalculateLstmOutputInteger8x8_8(
int n_batch, int n_cell, int n_output, const int16_t* cell_state,
const int16_t* output_gate, const int8_t* projection_weights,
int32_t proj_scale_a, int32_t proj_scale_b, const int32_t* projection_bias,
int32_t output_state_zp, int32_t quantized_proj_clip, int8_t* output_state,
int16_t* scratch) {
tensor_utils::ApplyTanhFloat(cell_state, n_batch, n_cell, -15, scratch);
tensor_utils::CwiseMul(output_gate, scratch, n_batch, n_cell, 15 + 15 - 15,
scratch);
tensor_utils::MatrixBatchVectorMultiply(
scratch, projection_weights, proj_scale_a, proj_scale_b, projection_bias,
n_batch, n_cell, n_output, output_state_zp, output_state);
if (quantized_proj_clip > 0) {
tensor_utils::CwiseClipping(output_state, n_batch * n_output,
quantized_proj_clip);
}
}
inline void LstmStepFloat(
const float* input_ptr, const float* input_to_input_weights_ptr,
const float* input_to_forget_weights_ptr,
const float* input_to_cell_weights_ptr,
const float* input_to_output_weights_ptr, const float* aux_input_ptr,
const float* aux_input_to_input_weights_ptr,
const float* aux_input_to_forget_weights_ptr,
const float* aux_input_to_cell_weights_ptr,
const float* aux_input_to_output_weights_ptr,
const float* recurrent_to_input_weights_ptr,
const float* recurrent_to_forget_weights_ptr,
const float* recurrent_to_cell_weights_ptr,
const float* recurrent_to_output_weights_ptr,
const float* cell_to_input_weights_ptr,
const float* cell_to_forget_weights_ptr,
const float* cell_to_output_weights_ptr,
const float* input_layer_norm_coefficients_ptr,
const float* forget_layer_norm_coefficients_ptr,
const float* cell_layer_norm_coefficients_ptr,
const float* output_layer_norm_coefficients_ptr,
const float* input_gate_bias_ptr, const float* forget_gate_bias_ptr,
const float* cell_gate_bias_ptr, const float* output_gate_bias_ptr,
const float* projection_weights_ptr, const float* projection_bias_ptr,
const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input,
int n_aux_input, int n_output, int output_batch_leading_dim,
float* output_state_ptr, float* cell_state_ptr, float* scratch0,
float* scratch1, float* scratch2, float* scratch3, float* scratch4,
float* output_ptr, bool recurrent_to_input_is_diag,
bool recurrent_to_forget_is_diag, bool recurrent_to_cell_is_diag,
bool recurrent_to_output_is_diag, CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("LstmStepFloat");
const bool use_cifg = (input_to_input_weights_ptr == nullptr);
float* input_gate_scratch = scratch0;
float* forget_gate_scratch = scratch1;
float* cell_gate_scratch = scratch2;
float* output_gate_scratch = scratch3;
float* accumulation_scratch_buffer = scratch4;
const bool is_input_all_zeros =
tensor_utils::IsZeroVector(input_ptr, n_batch * n_input);
const bool is_aux_input_all_zeros =
(aux_input_ptr == nullptr ||
tensor_utils::IsZeroVector(aux_input_ptr, n_batch * n_aux_input));
if (!use_cifg) {
CalculateLstmGateFloat(
input_ptr, input_to_input_weights_ptr, aux_input_ptr,
aux_input_to_input_weights_ptr, output_state_ptr,
recurrent_to_input_weights_ptr,
cell_state_ptr, cell_to_input_weights_ptr,
input_layer_norm_coefficients_ptr, input_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell,
kTfLiteActSigmoid, input_gate_scratch,
is_input_all_zeros, is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_input_is_diag, context);
}
CalculateLstmGateFloat(
input_ptr, input_to_forget_weights_ptr, aux_input_ptr,
aux_input_to_forget_weights_ptr, output_state_ptr,
recurrent_to_forget_weights_ptr,
cell_state_ptr, cell_to_forget_weights_ptr,
forget_layer_norm_coefficients_ptr, forget_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell,
kTfLiteActSigmoid, forget_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_forget_is_diag, context);
CalculateLstmGateFloat(
input_ptr, input_to_cell_weights_ptr, aux_input_ptr,
aux_input_to_cell_weights_ptr, output_state_ptr,
recurrent_to_cell_weights_ptr,
nullptr,
nullptr, cell_layer_norm_coefficients_ptr,
cell_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell,
params->activation, cell_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_cell_is_diag, context);
UpdateLstmCellFloat(n_batch, n_cell, cell_state_ptr, input_gate_scratch,
forget_gate_scratch, cell_gate_scratch, use_cifg,
params->cell_clip);
CalculateLstmGateFloat(
input_ptr, input_to_output_weights_ptr, aux_input_ptr,
aux_input_to_output_weights_ptr, output_state_ptr,
recurrent_to_output_weights_ptr,
cell_state_ptr, cell_to_output_weights_ptr,
output_layer_norm_coefficients_ptr, output_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell,
kTfLiteActSigmoid, output_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, accumulation_scratch_buffer,
recurrent_to_output_is_diag, context);
CalculateLstmOutputFloat(n_batch, n_cell, n_output, cell_state_ptr,
output_gate_scratch, params->activation,
projection_weights_ptr, projection_bias_ptr,
params->proj_clip, output_state_ptr, scratch2,
accumulation_scratch_buffer, context);
for (int b = 0; b < n_batch; b++) {
std::copy_n(output_state_ptr + b * n_output, n_output,
output_ptr + b * output_batch_leading_dim);
}
}
inline void LstmStepHybrid(
const float* input_ptr, const int8_t* input_to_input_weights_ptr,
const uint8_t* input_to_input_weights_ledger_ptr,
float input_to_input_weights_scale,
const int8_t* input_to_forget_weights_ptr,
const uint8_t* input_to_forget_weights_ledger_ptr,
float input_to_forget_weights_scale,
const int8_t* input_to_cell_weights_ptr,
const uint8_t* input_to_cell_weights_ledger_ptr,
float input_to_cell_weights_scale,
const int8_t* input_to_output_weights_ptr,
const uint8_t* input_to_output_weights_ledger_ptr,
float input_to_output_weights_scale, const float* aux_input_ptr,
const int8_t* aux_input_to_input_weights_ptr,
float aux_input_to_input_weights_scale,
const int8_t* aux_input_to_forget_weights_ptr,
float aux_input_to_forget_weights_scale,
const int8_t* aux_input_to_cell_weights_ptr,
float aux_input_to_cell_weights_scale,
const int8_t* aux_input_to_output_weights_ptr,
float aux_input_to_output_weights_scale,
const int8_t* recurrent_to_input_weights_ptr,
const float* recurrent_to_input_diag,
const uint8_t* recurrent_to_input_weights_ledger_ptr,
float recurrent_to_input_weights_scale,
const int8_t* recurrent_to_forget_weights_ptr,
const float* recurrent_to_forget_diag,
const uint8_t* recurrent_to_forget_weights_ledger_ptr,
float recurrent_to_forget_weights_scale,
const int8_t* recurrent_to_cell_weights_ptr,
const float* recurrent_to_cell_diag,
const uint8_t* recurrent_to_cell_weights_ledger_ptr,
float recurrent_to_cell_weights_scale,
const int8_t* recurrent_to_output_weights_ptr,
const float* recurrent_to_output_diag,
const uint8_t* recurrent_to_output_weights_ledger_ptr,
float recurrent_to_output_weights_scale,
const int8_t* cell_to_input_weights_ptr, float cell_to_input_weights_scale,
const int8_t* cell_to_forget_weights_ptr,
float cell_to_forget_weights_scale,
const int8_t* cell_to_output_weights_ptr,
float cell_to_output_weights_scale,
const float* input_layer_norm_coefficients_ptr,
const float* forget_layer_norm_coefficients_ptr,
const float* cell_layer_norm_coefficients_ptr,
const float* output_layer_norm_coefficients_ptr,
const float* input_gate_bias_ptr, const float* forget_gate_bias_ptr,
const float* cell_gate_bias_ptr, const float* output_gate_bias_ptr,
const int8_t* projection_weights_ptr,
const uint8_t* projection_weights_ledger_ptr,
float projection_weights_scale, const float* projection_bias_ptr,
const TfLiteLSTMParams* params, int n_batch, int n_cell, int n_input,
int n_aux_input, int n_output, int output_batch_leading_dim,
float* scratch0, float* scratch1, float* scratch2, float* scratch3,
float* input_sf, float* aux_input_sf, float* output_state_sf,
float* scaling_factors_scratch, float* recovered_cell_weights,
int8_t* quantized_input_ptr, int8_t* quantized_aux_input_ptr,
int8_t* quantized_output_state_ptr, int8_t* quantized_output_scratch,
float* output_state_ptr, float* cell_state_ptr, int32_t* accum_scratch_ptr,
float* output_ptr, int32_t* input_zp, int32_t* aux_input_zp,
int32_t* output_state_zp, int32_t* row_sums, int row_sums_size,
bool* compute_row_sums, bool asymmetric_quantize_inputs,
bool recurrent_to_input_is_diag, bool recurrent_to_forget_is_diag,
bool recurrent_to_cell_is_diag, bool recurrent_to_output_is_diag,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("LstmStepHybrid");
const bool use_cifg = (input_to_input_weights_ptr == nullptr);
float* input_gate_scratch = scratch0;
float* forget_gate_scratch = scratch1;
float* cell_gate_scratch = scratch2;
float* output_gate_scratch = scratch3;
int32_t* input_to_input_row_sums = nullptr;
int32_t* input_to_forget_row_sums = nullptr;
int32_t* input_to_cell_row_sums = nullptr;
int32_t* input_to_output_row_sums = nullptr;
int32_t* aux_input_to_input_row_sums = nullptr;
int32_t* aux_input_to_forget_row_sums = nullptr;
int32_t* aux_input_to_cell_row_sums = nullptr;
int32_t* aux_input_to_output_row_sums = nullptr;
int32_t* recurrent_to_input_row_sums = nullptr;
int32_t* recurrent_to_forget_row_sums = nullptr;
int32_t* recurrent_to_cell_row_sums = nullptr;
int32_t* recurrent_to_output_row_sums = nullptr;
int32_t* projection_weights_row_sums = nullptr;
if (asymmetric_quantize_inputs) {
int num_row_sums = use_cifg ? 6 : 8;
if (aux_input_ptr != nullptr) {
num_row_sums += use_cifg ? 3 : 4;
}
if (projection_weights_ptr != nullptr) {
num_row_sums += ceil(static_cast<float>(n_output) / n_cell);
}
TF_LITE_ASSERT(row_sums_size == num_row_sums);
input_to_input_row_sums = row_sums;
input_to_forget_row_sums =
use_cifg ? input_to_input_row_sums : input_to_input_row_sums + n_cell;
input_to_cell_row_sums = input_to_forget_row_sums + n_cell;
input_to_output_row_sums = input_to_cell_row_sums + n_cell;
if (aux_input_ptr != nullptr) {
aux_input_to_input_row_sums = input_to_output_row_sums + n_cell;
aux_input_to_forget_row_sums = use_cifg
? aux_input_to_input_row_sums
: aux_input_to_input_row_sums + n_cell;
aux_input_to_cell_row_sums = aux_input_to_forget_row_sums + n_cell;
aux_input_to_output_row_sums = aux_input_to_cell_row_sums + n_cell;
}
recurrent_to_input_row_sums = aux_input_ptr
? aux_input_to_output_row_sums + n_cell
: input_to_output_row_sums + n_cell;
recurrent_to_forget_row_sums = use_cifg
? recurrent_to_input_row_sums
: recurrent_to_input_row_sums + n_cell;
recurrent_to_cell_row_sums = recurrent_to_forget_row_sums + n_cell;
recurrent_to_output_row_sums = recurrent_to_cell_row_sums + n_cell;
if (projection_weights_ptr != nullptr) {
projection_weights_row_sums = recurrent_to_output_row_sums + n_cell;
}
if (*compute_row_sums) {
ComputeRowSums(
input_to_input_row_sums, input_to_forget_row_sums,
input_to_cell_row_sums, input_to_output_row_sums,
aux_input_to_input_row_sums, aux_input_to_forget_row_sums,
aux_input_to_cell_row_sums, aux_input_to_output_row_sums,
recurrent_to_input_row_sums, recurrent_to_forget_row_sums,
recurrent_to_cell_row_sums, recurrent_to_output_row_sums,
projection_weights_row_sums, row_sums, n_cell, n_input, n_aux_input,
n_output, input_to_input_weights_ptr, input_to_forget_weights_ptr,
input_to_cell_weights_ptr, input_to_output_weights_ptr,
aux_input_to_input_weights_ptr, aux_input_to_forget_weights_ptr,
aux_input_to_cell_weights_ptr, aux_input_to_output_weights_ptr,
recurrent_to_input_weights_ptr, recurrent_to_forget_weights_ptr,
recurrent_to_cell_weights_ptr, recurrent_to_output_weights_ptr,
projection_weights_ptr, use_cifg, aux_input_ptr,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag);
*compute_row_sums = false;
}
}
const bool is_input_all_zeros =
tensor_utils::IsZeroVector(input_ptr, n_batch * n_input);
const bool is_aux_input_all_zeros =
(aux_input_ptr == nullptr ||
tensor_utils::IsZeroVector(aux_input_ptr, n_batch * n_aux_input));
const bool is_output_state_all_zeros =
tensor_utils::IsZeroVector(output_state_ptr, n_batch * n_output);
if (!is_input_all_zeros) {
tensor_utils::BatchQuantizeFloats(input_ptr, n_batch, n_input,
quantized_input_ptr, input_sf, input_zp,
asymmetric_quantize_inputs);
}
if (!is_aux_input_all_zeros) {
tensor_utils::BatchQuantizeFloats(aux_input_ptr, n_batch, n_aux_input,
quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, asymmetric_quantize_inputs);
}
if (!is_output_state_all_zeros) {
tensor_utils::BatchQuantizeFloats(
output_state_ptr, n_batch, n_output, quantized_output_state_ptr,
output_state_sf, output_state_zp, asymmetric_quantize_inputs);
}
if (!use_cifg) {
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_input_weights_ptr,
input_to_input_weights_ledger_ptr, input_to_input_weights_scale,
input_to_input_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_input_weights_ptr,
aux_input_to_input_weights_scale, aux_input_to_input_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_input_weights_ptr,
recurrent_to_input_diag, recurrent_to_input_weights_ledger_ptr,
recurrent_to_input_weights_scale, recurrent_to_input_row_sums,
cell_state_ptr, cell_to_input_weights_ptr, cell_to_input_weights_scale,
input_layer_norm_coefficients_ptr, input_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid,
input_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros,
is_output_state_all_zeros, compute_row_sums, context,
scaling_factors_scratch, recovered_cell_weights, accum_scratch_ptr,
recurrent_to_input_is_diag);
}
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_forget_weights_ptr,
input_to_forget_weights_ledger_ptr, input_to_forget_weights_scale,
input_to_forget_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_forget_weights_ptr,
aux_input_to_forget_weights_scale, aux_input_to_forget_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_forget_weights_ptr,
recurrent_to_forget_diag, recurrent_to_forget_weights_ledger_ptr,
recurrent_to_forget_weights_scale, recurrent_to_forget_row_sums,
cell_state_ptr, cell_to_forget_weights_ptr, cell_to_forget_weights_scale,
forget_layer_norm_coefficients_ptr, forget_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid,
forget_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros,
is_output_state_all_zeros, compute_row_sums, context,
scaling_factors_scratch, recovered_cell_weights, accum_scratch_ptr,
recurrent_to_forget_is_diag);
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_cell_weights_ptr,
input_to_cell_weights_ledger_ptr, input_to_cell_weights_scale,
input_to_cell_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_cell_weights_ptr,
aux_input_to_cell_weights_scale, aux_input_to_cell_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_cell_weights_ptr, recurrent_to_cell_diag,
recurrent_to_cell_weights_ledger_ptr, recurrent_to_cell_weights_scale,
recurrent_to_cell_row_sums,
nullptr, nullptr,
0.0f, cell_layer_norm_coefficients_ptr,
cell_gate_bias_ptr, n_batch, n_input, n_aux_input, n_output, n_cell,
params->activation, cell_gate_scratch, is_input_all_zeros,
is_aux_input_all_zeros, is_output_state_all_zeros, compute_row_sums,
context, scaling_factors_scratch, recovered_cell_weights,
accum_scratch_ptr, recurrent_to_cell_is_diag);
UpdateLstmCellFloat(n_batch, n_cell, cell_state_ptr, input_gate_scratch,
forget_gate_scratch, cell_gate_scratch, use_cifg,
params->cell_clip);
CalculateLstmGateHybrid(
quantized_input_ptr, input_sf, input_zp, input_to_output_weights_ptr,
input_to_output_weights_ledger_ptr, input_to_output_weights_scale,
input_to_output_row_sums, quantized_aux_input_ptr, aux_input_sf,
aux_input_zp, aux_input_to_output_weights_ptr,
aux_input_to_output_weights_scale, aux_input_to_output_row_sums,
quantized_output_state_ptr, output_state_ptr, output_state_sf,
output_state_zp, recurrent_to_output_weights_ptr,
recurrent_to_output_diag, recurrent_to_output_weights_ledger_ptr,
recurrent_to_output_weights_scale, recurrent_to_output_row_sums,
cell_state_ptr, cell_to_output_weights_ptr, cell_to_output_weights_scale,
output_layer_norm_coefficients_ptr, output_gate_bias_ptr, n_batch,
n_input, n_aux_input, n_output, n_cell, kTfLiteActSigmoid,
output_gate_scratch, is_input_all_zeros, is_aux_input_all_zeros,
is_output_state_all_zeros, compute_row_sums, context,
scaling_factors_scratch, recovered_cell_weights, accum_scratch_ptr,
recurrent_to_output_is_diag);
CalculateLstmOutputHybrid(
n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch,
params->activation, projection_weights_ptr, projection_weights_ledger_ptr,
projection_weights_scale, projection_bias_ptr, params->proj_clip,
output_state_ptr, asymmetric_quantize_inputs, projection_weights_row_sums,
compute_row_sums, context, scratch2, quantized_output_scratch, input_sf,
input_zp, accum_scratch_ptr);
for (int b = 0; b < n_batch; b++) {
std::copy_n(output_state_ptr + b * n_output, n_output,
output_ptr + b * output_batch_leading_dim);
}
}
inline void LstmStepInteger8x8_16(
const int8_t* input_ptr, const int8_t* input_to_input_weight_ptr,
int32_t effective_input_to_input_scale_a,
int32_t effective_input_to_input_scale_b,
const int8_t* input_to_forget_weight_ptr,
int32_t effective_input_to_forget_scale_a,
int32_t effective_input_to_forget_scale_b,
const int8_t* input_to_cell_weight_ptr,
int32_t effective_input_to_cell_scale_a,
int32_t effective_input_to_cell_scale_b,
const int8_t* input_to_output_weight_ptr,
int32_t effective_input_to_output_scale_a,
int32_t effective_input_to_output_scale_b,
const int8_t* recurrent_to_input_weight_ptr,
int32_t effective_recurrent_to_input_scale_a,
int32_t effective_recurrent_to_input_scale_b,
const int8_t* recurrent_to_forget_weight_ptr,
int32_t effective_recurrent_to_forget_scale_a,
int32_t effective_recurrent_to_forget_scale_b,
const int8_t* recurrent_to_cell_weight_ptr,
int32_t effective_recurrent_to_cell_scale_a,
int32_t effective_recurrent_to_cell_scale_b,
const int8_t* recurrent_to_output_weight_ptr,
int32_t effective_recurrent_to_output_scale_a,
int32_t effective_recurrent_to_output_scale_b,
const int16_t* cell_to_input_weight_ptr,
int32_t effective_cell_to_input_scale_a,
int32_t effective_cell_to_input_scale_b,
const int16_t* cell_to_forget_weight_ptr,
int32_t effective_cell_to_forget_scale_a,
int32_t effective_cell_to_forget_scale_b,
const int16_t* cell_to_output_weight_ptr,
int32_t effective_cell_to_output_scale_a,
int32_t effective_cell_to_output_scale_b,
const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a,
int32_t effective_proj_scale_b, int32_t hidden_zp,
int32_t effective_hidden_scale_a, int32_t effective_hidden_scale_b,
const int16_t* layer_norm_input_weight_ptr,
int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b,
const int16_t* layer_norm_forget_weight_ptr,
int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b,
const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a,
int32_t layer_norm_cell_scale_b,
const int16_t* layer_norm_output_weight_ptr,
int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr,
const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr,
int16_t quantized_cell_clip, int8_t quantized_proj_clip,
int32_t cell_state_scale, int32_t input_variance_guard,
int32_t forget_variance_guard, int32_t cell_variance_guard,
int32_t output_variance_guard,
const int32_t* input_to_forget_effective_bias,
const int32_t* recurrent_to_forget_effective_bias,
const int32_t* input_to_cell_effective_bias,
const int32_t* recurrent_to_cell_effective_bias,
const int32_t* input_to_output_effective_bias,
const int32_t* recurrent_to_output_effective_bias,
const int32_t* input_to_input_effective_bias,
const int32_t* recurrent_to_input_effective_bias,
const int32_t* projection_effective_bias, int n_batch, int n_cell,
int n_input, int n_output, int8_t* output_state_ptr,
int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr,
int16_t* scratch0, int16_t* scratch1, int16_t* scratch2, int16_t* scratch3,
int8_t* scratch4, int32_t* scratch5, CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("LstmStepInteger8x8_16");
int16_t* input_gate_scratch = scratch0;
int16_t* forget_gate_scratch = scratch1;
int16_t* cell_gate_scratch = scratch2;
int16_t* output_gate_scratch = scratch3;
const bool use_cifg = (input_to_input_weight_ptr == nullptr);
TFLITE_DCHECK(input_to_forget_effective_bias);
TFLITE_DCHECK(recurrent_to_forget_effective_bias);
TFLITE_DCHECK(input_to_cell_effective_bias);
TFLITE_DCHECK(recurrent_to_cell_effective_bias);
TFLITE_DCHECK(input_to_output_effective_bias);
TFLITE_DCHECK(recurrent_to_output_effective_bias);
if (!use_cifg) {
TFLITE_DCHECK(input_to_input_effective_bias);
TFLITE_DCHECK(recurrent_to_input_effective_bias);
}
const bool use_projection = (projection_weight_ptr != nullptr);
if (use_projection) {
TFLITE_DCHECK(projection_effective_bias);
}
if (!use_cifg) {
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_input_weight_ptr, input_to_input_effective_bias,
effective_input_to_input_scale_a, effective_input_to_input_scale_b,
output_state_ptr, recurrent_to_input_weight_ptr,
recurrent_to_input_effective_bias, effective_recurrent_to_input_scale_a,
effective_recurrent_to_input_scale_b, cell_state_ptr,
cell_to_input_weight_ptr, effective_cell_to_input_scale_a,
effective_cell_to_input_scale_b, layer_norm_input_weight_ptr,
input_gate_bias_ptr, layer_norm_input_scale_a, layer_norm_input_scale_b,
input_variance_guard, n_batch, n_input, n_output, n_cell,
kTfLiteActSigmoid, input_gate_scratch, context, scratch5);
}
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_forget_weight_ptr, input_to_forget_effective_bias,
effective_input_to_forget_scale_a, effective_input_to_forget_scale_b,
output_state_ptr, recurrent_to_forget_weight_ptr,
recurrent_to_forget_effective_bias, effective_recurrent_to_forget_scale_a,
effective_recurrent_to_forget_scale_b, cell_state_ptr,
cell_to_forget_weight_ptr, effective_cell_to_forget_scale_a,
effective_cell_to_forget_scale_b, layer_norm_forget_weight_ptr,
forget_gate_bias_ptr, layer_norm_forget_scale_a,
layer_norm_forget_scale_b, forget_variance_guard, n_batch, n_input,
n_output, n_cell, kTfLiteActSigmoid, forget_gate_scratch, context,
scratch5);
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_cell_weight_ptr, input_to_cell_effective_bias,
effective_input_to_cell_scale_a, effective_input_to_cell_scale_b,
output_state_ptr, recurrent_to_cell_weight_ptr,
recurrent_to_cell_effective_bias, effective_recurrent_to_cell_scale_a,
effective_recurrent_to_cell_scale_b, cell_state_ptr,
nullptr, 0,
0, layer_norm_cell_weight_ptr,
cell_gate_bias_ptr, layer_norm_cell_scale_a, layer_norm_cell_scale_b,
cell_variance_guard, n_batch, n_input, n_output, n_cell, kTfLiteActTanh,
cell_gate_scratch, context, scratch5);
UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr, cell_state_scale,
input_gate_scratch, forget_gate_scratch,
cell_gate_scratch, use_cifg, quantized_cell_clip);
CalculateLstmGateInteger8x8_16(
input_ptr, input_to_output_weight_ptr, input_to_output_effective_bias,
effective_input_to_output_scale_a, effective_input_to_output_scale_b,
output_state_ptr, recurrent_to_output_weight_ptr,
recurrent_to_output_effective_bias, effective_recurrent_to_output_scale_a,
effective_recurrent_to_output_scale_b, cell_state_ptr,
cell_to_output_weight_ptr, effective_cell_to_output_scale_a,
effective_cell_to_output_scale_b, layer_norm_output_weight_ptr,
output_gate_bias_ptr, layer_norm_output_scale_a,
layer_norm_output_scale_b, output_variance_guard, n_batch, n_input,
n_output, n_cell, kTfLiteActSigmoid, output_gate_scratch, context,
scratch5);
CalculateLstmOutputInteger8x8_16(
n_batch, n_cell, n_output, cell_state_ptr, cell_state_scale,
output_gate_scratch, effective_hidden_scale_a, effective_hidden_scale_b,
hidden_zp, projection_weight_ptr, effective_proj_scale_a,
effective_proj_scale_b, projection_effective_bias, output_state_zp,
quantized_proj_clip, output_state_ptr, context, scratch0, scratch4,
scratch5);
std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
}
inline void LstmStepInteger8x8_8(
const int8_t* input_ptr, int32_t input_zp,
const int8_t* input_to_input_weight_ptr,
int32_t effective_input_to_input_scale_a,
int32_t effective_input_to_input_scale_b,
const int8_t* input_to_forget_weight_ptr,
int32_t effective_input_to_forget_scale_a,
int32_t effective_input_to_forget_scale_b,
const int8_t* input_to_cell_weight_ptr,
int32_t effective_input_to_cell_scale_a,
int32_t effective_input_to_cell_scale_b,
const int8_t* input_to_output_weight_ptr,
int32_t effective_input_to_output_scale_a,
int32_t effective_input_to_output_scale_b,
const int8_t* recurrent_to_input_weight_ptr,
int32_t effective_recurrent_to_input_scale_a,
int32_t effective_recurrent_to_input_scale_b,
const int8_t* recurrent_to_forget_weight_ptr,
int32_t effective_recurrent_to_forget_scale_a,
int32_t effective_recurrent_to_forget_scale_b,
const int8_t* recurrent_to_cell_weight_ptr,
int32_t effective_recurrent_to_cell_scale_a,
int32_t effective_recurrent_to_cell_scale_b,
const int8_t* recurrent_to_output_weight_ptr,
int32_t effective_recurrent_to_output_scale_a,
int32_t effective_recurrent_to_output_scale_b,
const int8_t* cell_to_input_weight_ptr,
int32_t effective_cell_to_input_scale_a,
int32_t effective_cell_to_input_scale_b,
const int8_t* cell_to_forget_weight_ptr,
int32_t effective_cell_to_forget_scale_a,
int32_t effective_cell_to_forget_scale_b,
const int8_t* cell_to_output_weight_ptr,
int32_t effective_cell_to_output_scale_a,
int32_t effective_cell_to_output_scale_b,
const int8_t* projection_weight_ptr, int32_t effective_proj_scale_a,
int32_t effective_proj_scale_b, const int16_t* layer_norm_input_weight_ptr,
int32_t layer_norm_input_scale_a, int32_t layer_norm_input_scale_b,
const int16_t* layer_norm_forget_weight_ptr,
int32_t layer_norm_forget_scale_a, int32_t layer_norm_forget_scale_b,
const int16_t* layer_norm_cell_weight_ptr, int32_t layer_norm_cell_scale_a,
int32_t layer_norm_cell_scale_b,
const int16_t* layer_norm_output_weight_ptr,
int32_t layer_norm_output_scale_a, int32_t layer_norm_output_scale_b,
const int32_t* input_gate_bias_ptr, const int32_t* forget_gate_bias_ptr,
const int32_t* cell_gate_bias_ptr, const int32_t* output_gate_bias_ptr,
const int32_t* projection_bias_ptr, const TfLiteLSTMParams* params,
const int32_t* intermediate_scale_a, const int32_t* intermediate_scale_b,
const int32_t* intermediate_zp, int16_t quantized_cell_clip,
int8_t quantized_proj_clip, int n_batch, int n_cell, int n_input,
int n_output, int output_batch_leading_dim, int8_t* output_state_ptr,
int32_t output_state_zp, int16_t* cell_state_ptr, int8_t* output_ptr,
int8_t* scratch0, int8_t* scratch1, int16_t* scratch2, int16_t* scratch3,
int16_t* scratch4, int16_t* scratch5, int16_t* scratch6,
int16_t* scratch7) {
ruy::profiler::ScopeLabel label("LstmStepInteger8x8_8");
int16_t* forget_gate_scratch = scratch2;
int16_t* cell_gate_scratch = scratch3;
int16_t* output_gate_scratch = scratch4;
CalculateLstmGateInteger8x8_8(
input_ptr, input_zp, input_to_forget_weight_ptr,
effective_input_to_forget_scale_a, effective_input_to_forget_scale_b,
intermediate_scale_a[2], intermediate_scale_b[2], intermediate_zp[4],
output_state_ptr, output_state_zp, recurrent_to_forget_weight_ptr,
effective_recurrent_to_forget_scale_a,
effective_recurrent_to_forget_scale_b, intermediate_scale_a[3],
intermediate_scale_b[3], intermediate_zp[5], layer_norm_forget_weight_ptr,
layer_norm_forget_scale_a, layer_norm_forget_scale_b,
forget_gate_bias_ptr, n_batch, n_input, n_output, n_cell,
kTfLiteActSigmoid, forget_gate_scratch, scratch0, scratch1);
CalculateLstmGateInteger8x8_8(
input_ptr, input_zp, input_to_cell_weight_ptr,
effective_input_to_cell_scale_a, effective_input_to_cell_scale_b,
intermediate_scale_a[4], intermediate_scale_b[4], intermediate_zp[7],
output_state_ptr, output_state_zp, recurrent_to_cell_weight_ptr,
effective_recurrent_to_cell_scale_a, effective_recurrent_to_cell_scale_b,
intermediate_scale_a[5], intermediate_scale_b[5], intermediate_zp[8],
layer_norm_cell_weight_ptr, layer_norm_cell_scale_a,
layer_norm_cell_scale_b, cell_gate_bias_ptr, n_batch, n_input, n_output,
n_cell, kTfLiteActTanh, cell_gate_scratch, scratch0, scratch1);
UpdateLstmCellInteger(n_batch, n_cell, cell_state_ptr,
-15, nullptr,
forget_gate_scratch, cell_gate_scratch,
true, quantized_cell_clip);
CalculateLstmGateInteger8x8_8(
input_ptr, input_zp, input_to_output_weight_ptr,
effective_input_to_output_scale_a, effective_input_to_output_scale_b,
intermediate_scale_a[6], intermediate_scale_b[6], intermediate_zp[10],
output_state_ptr, output_state_zp, recurrent_to_output_weight_ptr,
effective_recurrent_to_output_scale_a,
effective_recurrent_to_output_scale_b, intermediate_scale_a[11],
intermediate_scale_b[7], intermediate_zp[7], layer_norm_output_weight_ptr,
layer_norm_output_scale_a, layer_norm_output_scale_b,
output_gate_bias_ptr, n_batch, n_input, n_output, n_cell,
kTfLiteActSigmoid, output_gate_scratch, scratch0, scratch1);
CalculateLstmOutputInteger8x8_8(
n_batch, n_cell, n_output, cell_state_ptr, output_gate_scratch,
projection_weight_ptr, effective_proj_scale_a, effective_proj_scale_b,
projection_bias_ptr, output_state_zp, quantized_proj_clip,
output_state_ptr, scratch2);
std::copy_n(output_state_ptr, n_batch * n_output, output_ptr);
}
}
TfLiteStatus EvalFloat(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* aux_input,
const TfLiteTensor* aux_input_to_input_weights,
const TfLiteTensor* aux_input_to_forget_weights,
const TfLiteTensor* aux_input_to_cell_weights,
const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence, bool time_major,
int output_offset, TfLiteTensor* scratch_buffer, TfLiteTensor* output_state,
TfLiteTensor* cell_state, TfLiteTensor* output,
bool recurrent_to_input_is_diag, bool recurrent_to_forget_is_diag,
bool recurrent_to_cell_is_diag, bool recurrent_to_output_is_diag,
CpuBackendContext* context) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
int max_time, n_batch;
if (input->dims->size == 3) {
max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
} else {
max_time = 1;
n_batch = input->dims->data[0];
}
const int n_input = input->dims->data[input->dims->size - 1];
const int aux_input_size =
(aux_input) ? aux_input->dims->data[aux_input->dims->size - 1] : 0;
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_is_diag
? recurrent_to_output_weights->dims->data[0]
: recurrent_to_output_weights->dims->data[1];
const bool use_cifg = (input_to_input_weights == nullptr);
float* scratch_buffer_ptr = GetTensorData<float>(scratch_buffer);
float* input_gate_scratch = nullptr;
float* cell_gate_scratch = nullptr;
float* forget_gate_scratch = nullptr;
float* output_gate_scratch = nullptr;
float* accumulation_scratch_buffer = nullptr;
if (use_cifg) {
cell_gate_scratch = scratch_buffer_ptr;
forget_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
accumulation_scratch_buffer = scratch_buffer_ptr + 3 * n_cell * n_batch;
} else {
input_gate_scratch = scratch_buffer_ptr;
cell_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
forget_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 3 * n_cell * n_batch;
accumulation_scratch_buffer = scratch_buffer_ptr + 4 * n_cell * n_batch;
}
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
if (time_major) {
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const float* input_ptr = GetTensorData<float>(input) + t_rel * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr = GetTensorData<float>(aux_input) + t_rel * input_step;
}
float* output_ptr =
GetTensorData<float>(output) + t_rel * output_step + output_offset;
LstmStepFloat(
input_ptr, GetTensorData<float>(input_to_input_weights),
GetTensorData<float>(input_to_forget_weights),
GetTensorData<float>(input_to_cell_weights),
GetTensorData<float>(input_to_output_weights), aux_input_ptr,
GetTensorData<float>(aux_input_to_input_weights),
GetTensorData<float>(aux_input_to_forget_weights),
GetTensorData<float>(aux_input_to_cell_weights),
GetTensorData<float>(aux_input_to_output_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<float>(cell_to_input_weights),
GetTensorData<float>(cell_to_forget_weights),
GetTensorData<float>(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<float>(projection_weights),
GetTensorData<float>(projection_bias), params, n_batch, n_cell,
n_input, aux_input_size, n_output, output_batch_leading_dim,
GetTensorData<float>(output_state), GetTensorData<float>(cell_state),
input_gate_scratch, forget_gate_scratch, cell_gate_scratch,
output_gate_scratch, accumulation_scratch_buffer, output_ptr,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag, context);
}
} else {
for (int b = 0; b < n_batch; b++) {
const int input_step = n_input;
const int output_step = output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const int time_offset = b * max_time + t_rel;
const float* input_ptr =
GetTensorData<float>(input) + time_offset * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr =
GetTensorData<float>(aux_input) + time_offset * input_step;
}
float* output_ptr = GetTensorData<float>(output) +
time_offset * output_step + output_offset;
float* output_state_ptr =
GetTensorData<float>(output_state) + b * output_batch_leading_dim;
float* cell_state_ptr = GetTensorData<float>(cell_state) + b * n_cell;
float* input_gate_scratch_ptr =
input_gate_scratch ? input_gate_scratch + b * n_cell : nullptr;
float* forget_gate_scratch_ptr = forget_gate_scratch + b * n_cell;
float* cell_gate_scratch_ptr = cell_gate_scratch + b * n_cell;
float* output_gate_scratch_ptr = output_gate_scratch + b * n_cell;
LstmStepFloat(
input_ptr, GetTensorData<float>(input_to_input_weights),
GetTensorData<float>(input_to_forget_weights),
GetTensorData<float>(input_to_cell_weights),
GetTensorData<float>(input_to_output_weights), aux_input_ptr,
GetTensorData<float>(aux_input_to_input_weights),
GetTensorData<float>(aux_input_to_forget_weights),
GetTensorData<float>(aux_input_to_cell_weights),
GetTensorData<float>(aux_input_to_output_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<float>(cell_to_input_weights),
GetTensorData<float>(cell_to_forget_weights),
GetTensorData<float>(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<float>(projection_weights),
GetTensorData<float>(projection_bias), params, 1,
n_cell, n_input, aux_input_size, n_output, output_batch_leading_dim,
output_state_ptr, cell_state_ptr, input_gate_scratch_ptr,
forget_gate_scratch_ptr, cell_gate_scratch_ptr,
output_gate_scratch_ptr, accumulation_scratch_buffer, output_ptr,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag, context);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalHybrid(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_input_weights_ledger,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_forget_weights_ledger,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_cell_weights_ledger,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* input_to_output_weights_ledger,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_input_weights_ledger,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_forget_weights_ledger,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_cell_weights_ledger,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* recurrent_to_output_weights_ledger,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* aux_input,
const TfLiteTensor* aux_input_to_input_weights,
const TfLiteTensor* aux_input_to_forget_weights,
const TfLiteTensor* aux_input_to_cell_weights,
const TfLiteTensor* aux_input_to_output_weights,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights,
const TfLiteTensor* projection_weights_ledger,
const TfLiteTensor* projection_bias, const TfLiteLSTMParams* params,
bool forward_sequence, bool time_major, int output_offset,
TfLiteTensor* scratch_buffer, TfLiteTensor* input_sf,
TfLiteTensor* aux_input_sf, TfLiteTensor* output_state_sf,
TfLiteTensor* prod_scaling_factors, TfLiteTensor* recovered_cell_weights,
TfLiteTensor* input_quantized, TfLiteTensor* aux_input_quantized,
TfLiteTensor* output_state_quantized, TfLiteTensor* cell_state_quantized,
TfLiteTensor* output_state, TfLiteTensor* cell_state,
TfLiteTensor* output_scratch_buffer, TfLiteTensor* output,
TfLiteTensor* input_zp, TfLiteTensor* aux_input_zp,
TfLiteTensor* output_state_zp, TfLiteTensor* row_sums, int row_sums_size,
bool* compute_row_sums, bool recurrent_to_input_is_diag,
bool recurrent_to_forget_is_diag, bool recurrent_to_cell_is_diag,
bool recurrent_to_output_is_diag, CpuBackendContext* context) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
const int n_input = input->dims->data[input->dims->size - 1];
int max_time, n_batch;
if (input->dims->size == 2) {
max_time = 1;
n_batch = input->dims->data[0];
} else {
max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
}
const int aux_input_size =
(aux_input) ? aux_input->dims->data[aux_input->dims->size - 1] : 0;
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_is_diag
? recurrent_to_output_weights->dims->data[0]
: recurrent_to_output_weights->dims->data[1];
const bool use_cifg = (input_to_input_weights == nullptr);
float* scratch_buffer_ptr = GetTensorData<float>(scratch_buffer);
float* input_gate_scratch = nullptr;
float* cell_gate_scratch = nullptr;
float* forget_gate_scratch = nullptr;
float* output_gate_scratch = nullptr;
if (use_cifg) {
cell_gate_scratch = scratch_buffer_ptr;
forget_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
} else {
input_gate_scratch = scratch_buffer_ptr;
cell_gate_scratch = scratch_buffer_ptr + n_cell * n_batch;
forget_gate_scratch = scratch_buffer_ptr + 2 * n_cell * n_batch;
output_gate_scratch = scratch_buffer_ptr + 3 * n_cell * n_batch;
}
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
int32_t* input_zp_ptr = nullptr;
int32_t* aux_input_zp_ptr = nullptr;
int32_t* output_state_zp_ptr = nullptr;
int32_t* row_sums_ptr = nullptr;
if (params->asymmetric_quantize_inputs) {
input_zp_ptr = GetTensorData<int32_t>(input_zp);
aux_input_zp_ptr = GetTensorData<int32_t>(aux_input_zp);
output_state_zp_ptr = GetTensorData<int32_t>(output_state_zp);
row_sums_ptr = GetTensorData<int32_t>(row_sums);
}
if (time_major) {
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const float* input_ptr = GetTensorData<float>(input) + t_rel * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr = GetTensorData<float>(aux_input) + t_rel * input_step;
}
float* output_ptr =
GetTensorData<float>(output) + t_rel * output_step + output_offset;
LstmStepHybrid(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
GetTensorData<uint8_t>(input_to_input_weights_ledger),
GetTensorScale(input_to_input_weights),
GetTensorData<int8_t>(input_to_forget_weights),
GetTensorData<uint8_t>(input_to_forget_weights_ledger),
GetTensorScale(input_to_forget_weights),
GetTensorData<int8_t>(input_to_cell_weights),
GetTensorData<uint8_t>(input_to_cell_weights_ledger),
GetTensorScale(input_to_cell_weights),
GetTensorData<int8_t>(input_to_output_weights),
GetTensorData<uint8_t>(input_to_output_weights_ledger),
GetTensorScale(input_to_output_weights), aux_input_ptr,
GetTensorData<int8_t>(aux_input_to_input_weights),
GetTensorScale(aux_input_to_input_weights),
GetTensorData<int8_t>(aux_input_to_forget_weights),
GetTensorScale(aux_input_to_forget_weights),
GetTensorData<int8_t>(aux_input_to_cell_weights),
GetTensorScale(aux_input_to_cell_weights),
GetTensorData<int8_t>(aux_input_to_output_weights),
GetTensorScale(aux_input_to_output_weights),
GetTensorData<int8_t>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<uint8_t>(recurrent_to_input_weights_ledger),
GetTensorScale(recurrent_to_input_weights),
GetTensorData<int8_t>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<uint8_t>(recurrent_to_forget_weights_ledger),
GetTensorScale(recurrent_to_forget_weights),
GetTensorData<int8_t>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<uint8_t>(recurrent_to_cell_weights_ledger),
GetTensorScale(recurrent_to_cell_weights),
GetTensorData<int8_t>(recurrent_to_output_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<uint8_t>(recurrent_to_output_weights_ledger),
GetTensorScale(recurrent_to_output_weights),
GetTensorData<int8_t>(cell_to_input_weights),
GetTensorScale(cell_to_input_weights),
GetTensorData<int8_t>(cell_to_forget_weights),
GetTensorScale(cell_to_forget_weights),
GetTensorData<int8_t>(cell_to_output_weights),
GetTensorScale(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<int8_t>(projection_weights),
GetTensorData<uint8_t>(projection_weights_ledger),
GetTensorScale(projection_weights),
GetTensorData<float>(projection_bias), params, n_batch, n_cell,
n_input, aux_input_size, n_output, output_batch_leading_dim,
input_gate_scratch, forget_gate_scratch, cell_gate_scratch,
output_gate_scratch, GetTensorData<float>(input_sf),
GetTensorData<float>(aux_input_sf),
GetTensorData<float>(output_state_sf),
GetTensorData<float>(prod_scaling_factors),
GetTensorData<float>(recovered_cell_weights),
GetTensorData<int8_t>(input_quantized),
GetTensorData<int8_t>(aux_input_quantized),
GetTensorData<int8_t>(output_state_quantized),
GetTensorData<int8_t>(cell_state_quantized),
GetTensorData<float>(output_state), GetTensorData<float>(cell_state),
GetTensorData<int32_t>(output_scratch_buffer), output_ptr,
input_zp_ptr, aux_input_zp_ptr, output_state_zp_ptr, row_sums_ptr,
row_sums_size, compute_row_sums, params->asymmetric_quantize_inputs,
recurrent_to_input_is_diag, recurrent_to_forget_is_diag,
recurrent_to_cell_is_diag, recurrent_to_output_is_diag, context);
}
} else {
for (int b = 0; b < n_batch; b++) {
const int input_step = n_input;
const int output_step = output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const int time_offset = b * max_time + t_rel;
const float* input_ptr =
GetTensorData<float>(input) + time_offset * input_step;
const float* aux_input_ptr = nullptr;
if (aux_input) {
aux_input_ptr =
GetTensorData<float>(aux_input) + time_offset * input_step;
}
float* output_ptr = GetTensorData<float>(output) +
time_offset * output_step + output_offset;
float* output_state_ptr =
GetTensorData<float>(output_state) + b * output_batch_leading_dim;
float* cell_state_ptr = GetTensorData<float>(cell_state) + b * n_cell;
float* input_gate_scratch_ptr =
input_gate_scratch ? input_gate_scratch + b * n_cell : nullptr;
float* forget_gate_scratch_ptr = forget_gate_scratch + b * n_cell;
float* cell_gate_scratch_ptr = cell_gate_scratch + b * n_cell;
float* output_gate_scratch_ptr = output_gate_scratch + b * n_cell;
LstmStepHybrid(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
GetTensorData<uint8_t>(input_to_input_weights_ledger),
GetTensorScale(input_to_input_weights),
GetTensorData<int8_t>(input_to_forget_weights),
GetTensorData<uint8_t>(input_to_forget_weights_ledger),
GetTensorScale(input_to_forget_weights),
GetTensorData<int8_t>(input_to_cell_weights),
GetTensorData<uint8_t>(input_to_cell_weights_ledger),
GetTensorScale(input_to_cell_weights),
GetTensorData<int8_t>(input_to_output_weights),
GetTensorData<uint8_t>(input_to_output_weights_ledger),
GetTensorScale(input_to_output_weights), aux_input_ptr,
GetTensorData<int8_t>(aux_input_to_input_weights),
GetTensorScale(aux_input_to_input_weights),
GetTensorData<int8_t>(aux_input_to_forget_weights),
GetTensorScale(aux_input_to_forget_weights),
GetTensorData<int8_t>(aux_input_to_cell_weights),
GetTensorScale(aux_input_to_cell_weights),
GetTensorData<int8_t>(aux_input_to_output_weights),
GetTensorScale(aux_input_to_output_weights),
GetTensorData<int8_t>(recurrent_to_input_weights),
GetTensorData<float>(recurrent_to_input_weights),
GetTensorData<uint8_t>(recurrent_to_input_weights_ledger),
GetTensorScale(recurrent_to_input_weights),
GetTensorData<int8_t>(recurrent_to_forget_weights),
GetTensorData<float>(recurrent_to_forget_weights),
GetTensorData<uint8_t>(recurrent_to_forget_weights_ledger),
GetTensorScale(recurrent_to_forget_weights),
GetTensorData<int8_t>(recurrent_to_cell_weights),
GetTensorData<float>(recurrent_to_cell_weights),
GetTensorData<uint8_t>(recurrent_to_cell_weights_ledger),
GetTensorScale(recurrent_to_cell_weights),
GetTensorData<int8_t>(recurrent_to_output_weights),
GetTensorData<float>(recurrent_to_output_weights),
GetTensorData<uint8_t>(recurrent_to_output_weights_ledger),
GetTensorScale(recurrent_to_output_weights),
GetTensorData<int8_t>(cell_to_input_weights),
GetTensorScale(cell_to_input_weights),
GetTensorData<int8_t>(cell_to_forget_weights),
GetTensorScale(cell_to_forget_weights),
GetTensorData<int8_t>(cell_to_output_weights),
GetTensorScale(cell_to_output_weights),
GetTensorData<float>(input_layer_norm_coefficients),
GetTensorData<float>(forget_layer_norm_coefficients),
GetTensorData<float>(cell_layer_norm_coefficients),
GetTensorData<float>(output_layer_norm_coefficients),
GetTensorData<float>(input_gate_bias),
GetTensorData<float>(forget_gate_bias),
GetTensorData<float>(cell_gate_bias),
GetTensorData<float>(output_gate_bias),
GetTensorData<int8_t>(projection_weights),
GetTensorData<uint8_t>(projection_weights_ledger),
GetTensorScale(projection_weights),
GetTensorData<float>(projection_bias), params,
1, n_cell, n_input, aux_input_size, n_output,
output_batch_leading_dim, input_gate_scratch_ptr,
forget_gate_scratch_ptr, cell_gate_scratch_ptr,
output_gate_scratch_ptr, GetTensorData<float>(input_sf),
GetTensorData<float>(aux_input_sf),
GetTensorData<float>(output_state_sf),
GetTensorData<float>(prod_scaling_factors),
GetTensorData<float>(recovered_cell_weights),
GetTensorData<int8_t>(input_quantized),
GetTensorData<int8_t>(aux_input_quantized),
GetTensorData<int8_t>(output_state_quantized),
GetTensorData<int8_t>(cell_state_quantized), output_state_ptr,
cell_state_ptr, GetTensorData<int32_t>(output_scratch_buffer),
output_ptr, input_zp_ptr, aux_input_zp_ptr, output_state_zp_ptr,
row_sums_ptr, row_sums_size, compute_row_sums,
params->asymmetric_quantize_inputs, recurrent_to_input_is_diag,
recurrent_to_forget_is_diag, recurrent_to_cell_is_diag,
recurrent_to_output_is_diag, context);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalInteger8x8_16(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, bool forward_sequence, bool time_major,
const lstm_eval::IntegerLstmParameter* integer_lstm_param,
TfLiteTensor* output_state, TfLiteTensor* cell_state, TfLiteTensor* output,
TfLiteTensor* scratch0, TfLiteTensor* scratch1, TfLiteTensor* scratch2,
TfLiteTensor* scratch3, TfLiteTensor* scratch4, TfLiteTensor* scratch5,
CpuBackendContext* context) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
const int n_input = input->dims->data[input->dims->size - 1];
int max_time, n_batch;
if (input->dims->size == 2) {
max_time = 1;
n_batch = input->dims->data[0];
} else {
max_time = (time_major) ? input->dims->data[0] : input->dims->data[1];
n_batch = (time_major) ? input->dims->data[1] : input->dims->data[0];
}
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_weights->dims->data[1];
int output_state_zp = output_state->params.zero_point;
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
if (time_major) {
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = t;
int8_t* output_ptr = GetTensorData<int8_t>(output) + t_rel * output_step;
const int8_t* input_ptr =
GetTensorData<int8_t>(input) + t_rel * input_step;
LstmStepInteger8x8_16(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
integer_lstm_param->effective_input_to_input_scale_a,
integer_lstm_param->effective_input_to_input_scale_b,
GetTensorData<int8_t>(input_to_forget_weights),
integer_lstm_param->effective_input_to_forget_scale_a,
integer_lstm_param->effective_input_to_forget_scale_b,
GetTensorData<int8_t>(input_to_cell_weights),
integer_lstm_param->effective_input_to_cell_scale_a,
integer_lstm_param->effective_input_to_cell_scale_b,
GetTensorData<int8_t>(input_to_output_weights),
integer_lstm_param->effective_input_to_output_scale_a,
integer_lstm_param->effective_input_to_output_scale_b,
GetTensorData<int8_t>(recurrent_to_input_weights),
integer_lstm_param->effective_recurrent_to_input_scale_a,
integer_lstm_param->effective_recurrent_to_input_scale_b,
GetTensorData<int8_t>(recurrent_to_forget_weights),
integer_lstm_param->effective_recurrent_to_forget_scale_a,
integer_lstm_param->effective_recurrent_to_forget_scale_b,
GetTensorData<int8_t>(recurrent_to_cell_weights),
integer_lstm_param->effective_recurrent_to_cell_scale_a,
integer_lstm_param->effective_recurrent_to_cell_scale_b,
GetTensorData<int8_t>(recurrent_to_output_weights),
integer_lstm_param->effective_recurrent_to_output_scale_a,
integer_lstm_param->effective_recurrent_to_output_scale_b,
GetTensorData<int16_t>(cell_to_input_weights),
integer_lstm_param->effective_cell_to_input_scale_a,
integer_lstm_param->effective_cell_to_input_scale_b,
GetTensorData<int16_t>(cell_to_forget_weights),
integer_lstm_param->effective_cell_to_forget_scale_a,
integer_lstm_param->effective_cell_to_forget_scale_b,
GetTensorData<int16_t>(cell_to_output_weights),
integer_lstm_param->effective_cell_to_output_scale_a,
integer_lstm_param->effective_cell_to_output_scale_b,
GetTensorData<int8_t>(projection_weights),
integer_lstm_param->effective_proj_scale_a,
integer_lstm_param->effective_proj_scale_b,
integer_lstm_param->hidden_zp,
integer_lstm_param->effective_hidden_scale_a,
integer_lstm_param->effective_hidden_scale_b,
GetTensorData<int16_t>(input_layer_norm_coefficients),
integer_lstm_param->layer_norm_input_scale_a,
integer_lstm_param->layer_norm_input_scale_b,
GetTensorData<int16_t>(forget_layer_norm_coefficients),
integer_lstm_param->layer_norm_forget_scale_a,
integer_lstm_param->layer_norm_forget_scale_b,
GetTensorData<int16_t>(cell_layer_norm_coefficients),
integer_lstm_param->layer_norm_cell_scale_a,
integer_lstm_param->layer_norm_cell_scale_b,
GetTensorData<int16_t>(output_layer_norm_coefficients),
integer_lstm_param->layer_norm_output_scale_a,
integer_lstm_param->layer_norm_output_scale_b,
GetTensorData<int32_t>(input_gate_bias),
GetTensorData<int32_t>(forget_gate_bias),
GetTensorData<int32_t>(cell_gate_bias),
GetTensorData<int32_t>(output_gate_bias),
integer_lstm_param->quantized_cell_clip,
integer_lstm_param->quantized_proj_clip,
integer_lstm_param->cell_scale,
integer_lstm_param->input_variance_guard,
integer_lstm_param->forget_variance_guard,
integer_lstm_param->cell_variance_guard,
integer_lstm_param->output_variance_guard,
integer_lstm_param->input_to_forget_effective_bias.get(),
integer_lstm_param->recurrent_to_forget_effective_bias.get(),
integer_lstm_param->input_to_cell_effective_bias.get(),
integer_lstm_param->recurrent_to_cell_effective_bias.get(),
integer_lstm_param->input_to_output_effective_bias.get(),
integer_lstm_param->recurrent_to_output_effective_bias.get(),
integer_lstm_param->input_to_input_effective_bias.get(),
integer_lstm_param->recurrent_to_input_effective_bias.get(),
integer_lstm_param->projection_effective_bias.get(), n_batch, n_cell,
n_input, n_output, GetTensorData<int8_t>(output_state),
output_state_zp, GetTensorData<int16_t>(cell_state), output_ptr,
GetTensorData<int16_t>(scratch0), GetTensorData<int16_t>(scratch1),
GetTensorData<int16_t>(scratch2), GetTensorData<int16_t>(scratch3),
GetTensorData<int8_t>(scratch4), GetTensorData<int32_t>(scratch5),
context);
}
} else {
for (int b = 0; b < n_batch; b++) {
const int input_step = n_input;
const int output_step = output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = forward_sequence ? t : max_time - t - 1;
const int time_offset = b * max_time + t_rel;
const int8_t* input_ptr =
GetTensorData<int8_t>(input) + time_offset * input_step;
int8_t* output_ptr =
GetTensorData<int8_t>(output) + time_offset * output_step;
int8_t* output_state_ptr =
GetTensorData<int8_t>(output_state) + b * output_batch_leading_dim;
int16_t* cell_state_ptr =
GetTensorData<int16_t>(cell_state) + b * n_cell;
LstmStepInteger8x8_16(
input_ptr, GetTensorData<int8_t>(input_to_input_weights),
integer_lstm_param->effective_input_to_input_scale_a,
integer_lstm_param->effective_input_to_input_scale_b,
GetTensorData<int8_t>(input_to_forget_weights),
integer_lstm_param->effective_input_to_forget_scale_a,
integer_lstm_param->effective_input_to_forget_scale_b,
GetTensorData<int8_t>(input_to_cell_weights),
integer_lstm_param->effective_input_to_cell_scale_a,
integer_lstm_param->effective_input_to_cell_scale_b,
GetTensorData<int8_t>(input_to_output_weights),
integer_lstm_param->effective_input_to_output_scale_a,
integer_lstm_param->effective_input_to_output_scale_b,
GetTensorData<int8_t>(recurrent_to_input_weights),
integer_lstm_param->effective_recurrent_to_input_scale_a,
integer_lstm_param->effective_recurrent_to_input_scale_b,
GetTensorData<int8_t>(recurrent_to_forget_weights),
integer_lstm_param->effective_recurrent_to_forget_scale_a,
integer_lstm_param->effective_recurrent_to_forget_scale_b,
GetTensorData<int8_t>(recurrent_to_cell_weights),
integer_lstm_param->effective_recurrent_to_cell_scale_a,
integer_lstm_param->effective_recurrent_to_cell_scale_b,
GetTensorData<int8_t>(recurrent_to_output_weights),
integer_lstm_param->effective_recurrent_to_output_scale_a,
integer_lstm_param->effective_recurrent_to_output_scale_b,
GetTensorData<int16_t>(cell_to_input_weights),
integer_lstm_param->effective_cell_to_input_scale_a,
integer_lstm_param->effective_cell_to_input_scale_b,
GetTensorData<int16_t>(cell_to_forget_weights),
integer_lstm_param->effective_cell_to_forget_scale_a,
integer_lstm_param->effective_cell_to_forget_scale_b,
GetTensorData<int16_t>(cell_to_output_weights),
integer_lstm_param->effective_cell_to_output_scale_a,
integer_lstm_param->effective_cell_to_output_scale_b,
GetTensorData<int8_t>(projection_weights),
integer_lstm_param->effective_proj_scale_a,
integer_lstm_param->effective_proj_scale_b,
integer_lstm_param->hidden_zp,
integer_lstm_param->effective_hidden_scale_a,
integer_lstm_param->effective_hidden_scale_b,
GetTensorData<int16_t>(input_layer_norm_coefficients),
integer_lstm_param->layer_norm_input_scale_a,
integer_lstm_param->layer_norm_input_scale_b,
GetTensorData<int16_t>(forget_layer_norm_coefficients),
integer_lstm_param->layer_norm_forget_scale_a,
integer_lstm_param->layer_norm_forget_scale_b,
GetTensorData<int16_t>(cell_layer_norm_coefficients),
integer_lstm_param->layer_norm_cell_scale_a,
integer_lstm_param->layer_norm_cell_scale_b,
GetTensorData<int16_t>(output_layer_norm_coefficients),
integer_lstm_param->layer_norm_output_scale_a,
integer_lstm_param->layer_norm_output_scale_b,
GetTensorData<int32_t>(input_gate_bias),
GetTensorData<int32_t>(forget_gate_bias),
GetTensorData<int32_t>(cell_gate_bias),
GetTensorData<int32_t>(output_gate_bias),
integer_lstm_param->quantized_cell_clip,
integer_lstm_param->quantized_proj_clip,
integer_lstm_param->cell_scale,
integer_lstm_param->input_variance_guard,
integer_lstm_param->forget_variance_guard,
integer_lstm_param->cell_variance_guard,
integer_lstm_param->output_variance_guard,
integer_lstm_param->input_to_forget_effective_bias.get(),
integer_lstm_param->recurrent_to_forget_effective_bias.get(),
integer_lstm_param->input_to_cell_effective_bias.get(),
integer_lstm_param->recurrent_to_cell_effective_bias.get(),
integer_lstm_param->input_to_output_effective_bias.get(),
integer_lstm_param->recurrent_to_output_effective_bias.get(),
integer_lstm_param->input_to_input_effective_bias.get(),
integer_lstm_param->recurrent_to_input_effective_bias.get(),
integer_lstm_param->projection_effective_bias.get(), 1,
n_cell, n_input, n_output, output_state_ptr, output_state_zp,
cell_state_ptr, output_ptr, GetTensorData<int16_t>(scratch0),
GetTensorData<int16_t>(scratch1), GetTensorData<int16_t>(scratch2),
GetTensorData<int16_t>(scratch3), GetTensorData<int8_t>(scratch4),
GetTensorData<int32_t>(scratch5), context);
}
}
}
return kTfLiteOk;
}
TfLiteStatus EvalInteger8x8_8(
const TfLiteTensor* input, const TfLiteTensor* input_to_input_weights,
const TfLiteTensor* input_to_forget_weights,
const TfLiteTensor* input_to_cell_weights,
const TfLiteTensor* input_to_output_weights,
const TfLiteTensor* recurrent_to_input_weights,
const TfLiteTensor* recurrent_to_forget_weights,
const TfLiteTensor* recurrent_to_cell_weights,
const TfLiteTensor* recurrent_to_output_weights,
const TfLiteTensor* cell_to_input_weights,
const TfLiteTensor* cell_to_forget_weights,
const TfLiteTensor* cell_to_output_weights,
const TfLiteTensor* input_layer_norm_coefficients,
const TfLiteTensor* forget_layer_norm_coefficients,
const TfLiteTensor* cell_layer_norm_coefficients,
const TfLiteTensor* output_layer_norm_coefficients,
const TfLiteTensor* input_gate_bias, const TfLiteTensor* forget_gate_bias,
const TfLiteTensor* cell_gate_bias, const TfLiteTensor* output_gate_bias,
const TfLiteTensor* projection_weights, const TfLiteTensor* projection_bias,
const TfLiteLSTMParams* params, TfLiteTensor* output_state,
TfLiteTensor* cell_state, TfLiteTensor* output,
const lstm_eval::IntegerLstmParameter* integer_lstm_param,
TfLiteTensor* scratch0, TfLiteTensor* scratch1, TfLiteTensor* scratch2,
TfLiteTensor* scratch3, TfLiteTensor* scratch4, TfLiteTensor* scratch5,
TfLiteTensor* scratch6, TfLiteTensor* scratch7) {
TF_LITE_ASSERT(input->dims->size >= 2 && input->dims->size <= 3);
const int n_input = input->dims->data[input->dims->size - 1];
int max_time, n_batch;
if (input->dims->size == 2) {
max_time = 1;
n_batch = input->dims->data[0];
} else {
max_time = input->dims->data[0];
n_batch = input->dims->data[1];
}
const int n_cell = input_to_output_weights->dims->data[0];
const int n_output = recurrent_to_output_weights->dims->data[1];
const int32_t input_zp = input->params.zero_point;
const int32_t output_state_zp = output_state->params.zero_point;
const int output_batch_leading_dim =
output->dims->data[output->dims->size - 1];
const int input_step = n_batch * n_input;
const int output_step = n_batch * output_batch_leading_dim;
for (int t = 0; t < max_time; t++) {
const int t_rel = t;
int8_t* output_ptr = GetTensorData<int8_t>(output) + t_rel * output_step;
const int8_t* input_ptr = GetTensorData<int8_t>(input) + t_rel * input_step;
lstm_eval::LstmStepInteger8x8_8(
input_ptr, input_zp,
GetTensorData<int8_t>(input_to_input_weights),
integer_lstm_param->effective_input_to_input_scale_a,
integer_lstm_param->effective_input_to_input_scale_b,
GetTensorData<int8_t>(input_to_forget_weights),
integer_lstm_param->effective_input_to_forget_scale_a,
integer_lstm_param->effective_input_to_forget_scale_b,
GetTensorData<int8_t>(input_to_cell_weights),
integer_lstm_param->effective_input_to_cell_scale_a,
integer_lstm_param->effective_input_to_cell_scale_b,
GetTensorData<int8_t>(input_to_output_weights),
integer_lstm_param->effective_input_to_output_scale_a,
integer_lstm_param->effective_input_to_output_scale_b,
GetTensorData<int8_t>(recurrent_to_input_weights),
integer_lstm_param->effective_recurrent_to_input_scale_a,
integer_lstm_param->effective_recurrent_to_input_scale_b,
GetTensorData<int8_t>(recurrent_to_forget_weights),
integer_lstm_param->effective_recurrent_to_forget_scale_a,
integer_lstm_param->effective_recurrent_to_forget_scale_b,
GetTensorData<int8_t>(recurrent_to_cell_weights),
integer_lstm_param->effective_recurrent_to_cell_scale_a,
integer_lstm_param->effective_recurrent_to_cell_scale_b,
GetTensorData<int8_t>(recurrent_to_output_weights),
integer_lstm_param->effective_recurrent_to_output_scale_a,
integer_lstm_param->effective_recurrent_to_output_scale_b,
GetTensorData<int8_t>(cell_to_input_weights),
integer_lstm_param->effective_cell_to_input_scale_a,
integer_lstm_param->effective_cell_to_input_scale_b,
GetTensorData<int8_t>(cell_to_forget_weights),
integer_lstm_param->effective_cell_to_forget_scale_a,
integer_lstm_param->effective_cell_to_forget_scale_b,
GetTensorData<int8_t>(cell_to_output_weights),
integer_lstm_param->effective_cell_to_output_scale_a,
integer_lstm_param->effective_cell_to_output_scale_b,
GetTensorData<int8_t>(projection_weights),
integer_lstm_param->effective_proj_scale_a,
integer_lstm_param->effective_proj_scale_b,
GetTensorData<int16_t>(input_layer_norm_coefficients),
integer_lstm_param->layer_norm_input_scale_a,
integer_lstm_param->layer_norm_input_scale_b,
GetTensorData<int16_t>(forget_layer_norm_coefficients),
integer_lstm_param->layer_norm_forget_scale_a,
integer_lstm_param->layer_norm_forget_scale_b,
GetTensorData<int16_t>(cell_layer_norm_coefficients),
integer_lstm_param->layer_norm_cell_scale_a,
integer_lstm_param->layer_norm_cell_scale_b,
GetTensorData<int16_t>(output_layer_norm_coefficients),
integer_lstm_param->layer_norm_output_scale_a,
integer_lstm_param->layer_norm_output_scale_b,
GetTensorData<int32_t>(input_gate_bias),
GetTensorData<int32_t>(forget_gate_bias),
GetTensorData<int32_t>(cell_gate_bias),
GetTensorData<int32_t>(output_gate_bias),
GetTensorData<int32_t>(projection_bias),
params, integer_lstm_param->intermediate_scale_a,
integer_lstm_param->intermediate_scale_b,
integer_lstm_param->intermediate_zp,
integer_lstm_param->quantized_cell_clip,
integer_lstm_param->quantized_proj_clip, n_batch, n_cell, n_input,
n_output, output_batch_leading_dim, GetTensorData<int8_t>(output_state),
output_state_zp, GetTensorData<int16_t>(cell_state), output_ptr,
GetTensorData<int8_t>(scratch0), GetTensorData<int8_t>(scratch1),
GetTensorData<int16_t>(scratch2), GetTensorData<int16_t>(scratch3),
GetTensorData<int16_t>(scratch4), GetTensorData<int16_t>(scratch5),
GetTensorData<int16_t>(scratch6), GetTensorData<int16_t>(scratch7));
}
return kTfLiteOk;
}
}
}
}
} | #include "tensorflow/lite/kernels/lstm_eval.h"
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace {
template <typename T>
bool ArrayEq(const T* result, const T* expected_result, int size) {
for (int i = 0; i < size; ++i) {
if (result[i] != expected_result[i]) {
return false;
}
}
return true;
}
template <typename T>
bool ArrayFloatNear(const T* result, const T* expected_result, int size,
double threshold) {
for (int i = 0; i < size; ++i) {
if (std::abs(result[i] - expected_result[i]) > threshold) {
return false;
}
}
return true;
}
class BaseLstmParam {
public:
TfLiteTensor* Geti2i() {
PackWeightToTensor(&i2i_tensor_, i2i_, i2i_size_);
i2i_tensor_.data.int8 = i2i_.data();
return &i2i_tensor_;
}
TfLiteTensor* Geti2f() {
PackWeightToTensor(&i2f_tensor_, i2f_, i2f_size_);
i2f_tensor_.data.int8 = i2f_.data();
return &i2f_tensor_;
}
TfLiteTensor* Geti2c() {
PackWeightToTensor(&i2c_tensor_, i2c_, i2c_size_);
i2c_tensor_.data.int8 = i2c_.data();
return &i2c_tensor_;
}
TfLiteTensor* Geti2o() {
PackWeightToTensor(&i2o_tensor_, i2o_, i2o_size_);
i2o_tensor_.data.int8 = i2o_.data();
return &i2o_tensor_;
}
TfLiteTensor* Getr2i() {
PackWeightToTensor(&r2i_tensor_, r2i_, r2i_size_);
r2i_tensor_.data.int8 = r2i_.data();
return &r2i_tensor_;
}
TfLiteTensor* Getr2f() {
PackWeightToTensor(&r2f_tensor_, r2f_, r2f_size_);
r2f_tensor_.data.int8 = r2f_.data();
return &r2f_tensor_;
}
TfLiteTensor* Getr2c() {
PackWeightToTensor(&r2c_tensor_, r2c_, r2c_size_);
r2c_tensor_.data.int8 = r2c_.data();
return &r2c_tensor_;
}
TfLiteTensor* Getr2o() {
PackWeightToTensor(&r2o_tensor_, r2o_, r2o_size_);
r2o_tensor_.data.int8 = r2o_.data();
return &r2o_tensor_;
}
TfLiteTensor* GetProjection() {
PackWeightToTensor(&projection_tensor_, projection_, projection_size_);
projection_tensor_.data.int8 = projection_.data();
return &projection_tensor_;
}
~BaseLstmParam() {
TfLiteIntArrayFree(input_tensor_.dims);
TfLiteIntArrayFree(i2i_tensor_.dims);
TfLiteIntArrayFree(i2f_tensor_.dims);
TfLiteIntArrayFree(i2c_tensor_.dims);
TfLiteIntArrayFree(i2o_tensor_.dims);
TfLiteIntArrayFree(r2i_tensor_.dims);
TfLiteIntArrayFree(r2f_tensor_.dims);
TfLiteIntArrayFree(r2c_tensor_.dims);
TfLiteIntArrayFree(r2o_tensor_.dims);
TfLiteIntArrayFree(layer_norm_input_tensor_.dims);
TfLiteIntArrayFree(layer_norm_forget_tensor_.dims);
TfLiteIntArrayFree(layer_norm_cell_tensor_.dims);
TfLiteIntArrayFree(layer_norm_output_tensor_.dims);
TfLiteIntArrayFree(input_gate_bias_tensor_.dims);
TfLiteIntArrayFree(forget_gate_bias_tensor_.dims);
TfLiteIntArrayFree(cell_gate_bias_tensor_.dims);
TfLiteIntArrayFree(output_gate_bias_tensor_.dims);
TfLiteIntArrayFree(projection_tensor_.dims);
TfLiteIntArrayFree(projection_bias_tensor_.dims);
TfLiteIntArrayFree(activation_tensor_.dims);
TfLiteIntArrayFree(cell_tensor_.dims);
TfLiteIntArrayFree(output_tensor_.dims);
}
protected:
template <typename T>
void PackWeightToTensor(TfLiteTensor* tensor, std::vector<T>& data,
std::vector<int32_t> dims) {
if (data.empty()) {
int total = 1;
for (int i = 0; i < dims.size(); ++i) {
total *= dims[i];
}
for (int i = 0; i < total; ++i) {
data.push_back(0);
}
}
tensor->dims = TfLiteIntArrayCreate(dims.size());
for (int i = 0; i < dims.size(); ++i) {
tensor->dims->data[i] = dims[i];
}
}
const int n_batch_ = 2;
const int n_input_ = 18;
const int n_cell_ = 10;
const int n_output_ = 6;
std::vector<int32_t> input_size_ = {n_batch_, n_input_};
TfLiteTensor input_tensor_;
std::vector<int8_t> i2i_ = {
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int32_t> i2i_size_ = {n_cell_, n_input_};
TfLiteTensor i2i_tensor_;
std::vector<int8_t> i2f_ = {
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 11, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, -6, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 13, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
};
std::vector<int32_t> i2f_size_ = {n_cell_, n_input_};
TfLiteTensor i2f_tensor_;
std::vector<int8_t> i2c_ = {
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 16, 1, 2, 3, 14, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int32_t> i2c_size_ = {n_cell_, n_input_};
TfLiteTensor i2c_tensor_;
std::vector<int8_t> i2o_ = {
1, 2, 3, 4, 5, 6, 1, 2, 3, 4, -5, 6, 1, 7, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6, -1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 3, 6, 1, 2, 6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
8, 2, 3, 4, 5, 6, 7, 2, 3, 4, 5, 6, 1, 2, 3, 14, 5, 6,
18, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, -6, 1, 2, 3, 4, 5, 6,
8, 2, 3, 4, 5, 6, 3, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, 3, 4, 5, 6, 5, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 0,
8, 2, 3, 4, 3, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, -4, 5, 6,
1, 2, 3, -4, 5, 6, 1, 2, 3, 4, 5, 6, -1, 2, 3, 4, 5, 6,
1, -2, 2, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 8, 5, -6,
};
std::vector<int32_t> i2o_size_ = {n_cell_, n_input_};
TfLiteTensor i2o_tensor_;
std::vector<int8_t> r2i_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2i_size_ = {n_cell_, n_output_};
TfLiteTensor r2i_tensor_;
std::vector<int8_t> r2f_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2f_size_ = {n_cell_, n_output_};
TfLiteTensor r2f_tensor_;
std::vector<int8_t> r2c_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2c_size_ = {n_cell_, n_output_};
TfLiteTensor r2c_tensor_;
std::vector<int8_t> r2o_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int32_t> r2o_size_ = {n_cell_, n_output_};
TfLiteTensor r2o_tensor_;
std::vector<int32_t> layer_norm_input_size_ = {n_cell_};
TfLiteTensor layer_norm_input_tensor_;
TfLiteTensor layer_norm_forget_tensor_;
std::vector<int32_t> layer_norm_forget_size_ = {n_cell_};
std::vector<int32_t> layer_norm_cell_size_ = {n_cell_};
TfLiteTensor layer_norm_cell_tensor_;
std::vector<int32_t> layer_norm_output_size_ = {n_cell_};
TfLiteTensor layer_norm_output_tensor_;
std::vector<int32_t> input_gate_bias_size_ = {n_cell_};
TfLiteTensor input_gate_bias_tensor_;
std::vector<int32_t> forget_gate_bias_size_ = {n_cell_};
TfLiteTensor forget_gate_bias_tensor_;
std::vector<int32_t> cell_gate_bias_size_ = {n_cell_};
TfLiteTensor cell_gate_bias_tensor_;
std::vector<int32_t> output_gate_bias_size_ = {n_cell_};
TfLiteTensor output_gate_bias_tensor_;
std::vector<int8_t> projection_ = {
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
8, 2, 3, 4, 5, 6, 1, 2, 3, 4,
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
};
std::vector<int32_t> projection_size_ = {n_cell_, n_output_};
TfLiteTensor projection_tensor_;
std::vector<int32_t> projection_bias_ = {
16, 4, 5, 6, 1, 1
};
std::vector<int32_t> projection_bias_size_ = {n_output_};
TfLiteTensor projection_bias_tensor_;
std::vector<int32_t> activation_size_ = {n_batch_, n_output_};
TfLiteTensor activation_tensor_;
std::vector<int32_t> cell_size_ = {n_batch_, n_cell_};
TfLiteTensor cell_tensor_;
std::vector<int32_t> output_size_ = {n_batch_, n_output_};
TfLiteTensor output_tensor_;
};
class QuantizedLstmParam : public BaseLstmParam {
public:
TfLiteTensor* GetInput() {
PackWeightToTensor(&input_tensor_, input_, input_size_);
input_tensor_.data.int8 = input_.data();
return &input_tensor_;
}
TfLiteTensor* GetInputLayerNorm() {
PackWeightToTensor(&layer_norm_input_tensor_, layer_norm_input_,
layer_norm_input_size_);
layer_norm_input_tensor_.data.i16 = layer_norm_input_.data();
return &layer_norm_input_tensor_;
}
TfLiteTensor* GetForgetLayerNorm() {
PackWeightToTensor(&layer_norm_forget_tensor_, layer_norm_forget_,
layer_norm_forget_size_);
layer_norm_forget_tensor_.data.i16 = layer_norm_forget_.data();
return &layer_norm_forget_tensor_;
}
TfLiteTensor* GetCellLayerNorm() {
PackWeightToTensor(&layer_norm_cell_tensor_, layer_norm_cell_,
layer_norm_cell_size_);
layer_norm_cell_tensor_.data.i16 = layer_norm_cell_.data();
return &layer_norm_cell_tensor_;
}
TfLiteTensor* GetOutputLayerNorm() {
PackWeightToTensor(&layer_norm_output_tensor_, layer_norm_output_,
layer_norm_output_size_);
layer_norm_output_tensor_.data.i16 = layer_norm_output_.data();
return &layer_norm_output_tensor_;
}
TfLiteTensor* GetInputBias() {
PackWeightToTensor(&input_gate_bias_tensor_, input_gate_bias_,
input_gate_bias_size_);
input_gate_bias_tensor_.data.i32 = input_gate_bias_.data();
return &input_gate_bias_tensor_;
}
TfLiteTensor* GetForgetBias() {
PackWeightToTensor(&forget_gate_bias_tensor_, forget_gate_bias_,
forget_gate_bias_size_);
forget_gate_bias_tensor_.data.i32 = forget_gate_bias_.data();
return &forget_gate_bias_tensor_;
}
TfLiteTensor* GetCellBias() {
PackWeightToTensor(&cell_gate_bias_tensor_, cell_gate_bias_,
cell_gate_bias_size_);
cell_gate_bias_tensor_.data.i32 = cell_gate_bias_.data();
return &cell_gate_bias_tensor_;
}
TfLiteTensor* GetOutputBias() {
PackWeightToTensor(&output_gate_bias_tensor_, output_gate_bias_,
output_gate_bias_size_);
output_gate_bias_tensor_.data.i32 = output_gate_bias_.data();
return &output_gate_bias_tensor_;
}
TfLiteTensor* GetProjectionBias() {
PackWeightToTensor(&projection_bias_tensor_, projection_bias_,
projection_bias_size_);
projection_bias_tensor_.data.i32 = projection_bias_.data();
return &projection_bias_tensor_;
}
ops::builtin::lstm_eval::IntegerLstmParameter* GetQuantParam() {
integer_lstm_param_.effective_input_to_input_scale_a = 1808677632;
integer_lstm_param_.effective_input_to_input_scale_b = -1;
integer_lstm_param_.effective_recurrent_to_input_scale_a = 1078887680;
integer_lstm_param_.effective_recurrent_to_input_scale_b = -1;
integer_lstm_param_.effective_cell_to_input_scale_a = 1073741824;
integer_lstm_param_.effective_cell_to_input_scale_b = 1;
integer_lstm_param_.effective_input_to_forget_scale_a = 1845996800;
integer_lstm_param_.effective_input_to_forget_scale_b = -3;
integer_lstm_param_.effective_recurrent_to_forget_scale_a = 1477412736;
integer_lstm_param_.effective_recurrent_to_forget_scale_b = -2;
integer_lstm_param_.effective_cell_to_forget_scale_a = 1073741824;
integer_lstm_param_.effective_cell_to_forget_scale_b = 1;
integer_lstm_param_.effective_input_to_cell_scale_a = 1648385408;
integer_lstm_param_.effective_input_to_cell_scale_b = -2;
integer_lstm_param_.effective_recurrent_to_cell_scale_a = 1185544192,
integer_lstm_param_.effective_recurrent_to_cell_scale_b = -1;
integer_lstm_param_.effective_input_to_output_scale_a = 1328153600;
integer_lstm_param_.effective_input_to_output_scale_b = -1;
integer_lstm_param_.effective_recurrent_to_output_scale_a = 1479582592;
integer_lstm_param_.effective_recurrent_to_output_scale_b = -1;
integer_lstm_param_.effective_cell_to_output_scale_a = 1073741824,
integer_lstm_param_.effective_cell_to_output_scale_b = 1;
integer_lstm_param_.effective_proj_scale_a = 1105682560;
integer_lstm_param_.effective_proj_scale_b = -8;
integer_lstm_param_.effective_hidden_scale_a = 0;
integer_lstm_param_.effective_hidden_scale_b = 0;
integer_lstm_param_.layer_norm_input_scale_a = 2011617664;
integer_lstm_param_.layer_norm_input_scale_b = -11;
integer_lstm_param_.layer_norm_forget_scale_a = 1968024960;
integer_lstm_param_.layer_norm_forget_scale_b = -13;
integer_lstm_param_.layer_norm_cell_scale_a = 1097334528,
integer_lstm_param_.layer_norm_cell_scale_b = -12;
integer_lstm_param_.layer_norm_output_scale_a = 1837163008;
integer_lstm_param_.layer_norm_output_scale_b = -12;
integer_lstm_param_.quantized_cell_clip = 20480;
integer_lstm_param_.quantized_proj_clip = 0;
integer_lstm_param_.cell_scale = -11;
integer_lstm_param_.input_variance_guard = 1;
integer_lstm_param_.forget_variance_guard = 2;
integer_lstm_param_.cell_variance_guard = 2;
integer_lstm_param_.output_variance_guard = 1;
integer_lstm_param_.hidden_zp = 0;
integer_lstm_param_.input_to_forget_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_forget_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_cell_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_cell_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_output_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_output_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.input_to_input_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.recurrent_to_input_effective_bias.reset(
new int32_t[n_cell_]);
integer_lstm_param_.projection_effective_bias.reset(new int32_t[n_output_]);
std::fill_n(integer_lstm_param_.input_to_forget_effective_bias.get(),
n_cell_, 152);
std::fill_n(integer_lstm_param_.recurrent_to_forget_effective_bias.get(),
n_cell_, 315);
std::fill_n(integer_lstm_param_.input_to_cell_effective_bias.get(), n_cell_,
165);
std::fill_n(integer_lstm_param_.recurrent_to_cell_effective_bias.get(),
n_cell_, 1165);
std::fill_n(integer_lstm_param_.input_to_output_effective_bias.get(),
n_cell_, 159);
std::fill_n(integer_lstm_param_.recurrent_to_output_effective_bias.get(),
n_cell_, 915);
std::fill_n(integer_lstm_param_.input_to_input_effective_bias.get(),
n_cell_, -15);
std::fill_n(integer_lstm_param_.recurrent_to_input_effective_bias.get(),
n_cell_, 315);
std::fill_n(integer_lstm_param_.projection_effective_bias.get(), n_output_,
115);
return &integer_lstm_param_;
}
TfLiteTensor* GetScratch0() {
PackWeightToTensor(&scratch0_tensor_, scratch0_, scratch0_size_);
scratch0_tensor_.data.i16 = scratch0_.data();
return &scratch0_tensor_;
}
TfLiteTensor* GetScratch1() {
PackWeightToTensor(&scratch1_tensor_, scratch1_, scratch1_size_);
scratch1_tensor_.data.i16 = scratch1_.data();
return &scratch1_tensor_;
}
TfLiteTensor* GetScratch2() {
PackWeightToTensor(&scratch2_tensor_, scratch2_, scratch2_size_);
scratch2_tensor_.data.i16 = scratch2_.data();
return &scratch2_tensor_;
}
TfLiteTensor* GetScratch3() {
PackWeightToTensor(&scratch3_tensor_, scratch3_, scratch3_size_);
scratch3_tensor_.data.i16 = scratch3_.data();
return &scratch3_tensor_;
}
TfLiteTensor* GetScratch4() {
PackWeightToTensor(&scratch4_tensor_, scratch4_, scratch4_size_);
scratch4_tensor_.data.int8 = scratch4_.data();
return &scratch4_tensor_;
}
TfLiteTensor* GetScratch5() {
PackWeightToTensor(&scratch5_tensor_, scratch5_, scratch5_size_);
scratch5_tensor_.data.i32 = scratch5_.data();
return &scratch5_tensor_;
}
TfLiteTensor* GetActivation() {
PackWeightToTensor(&activation_tensor_, activation_, activation_size_);
activation_tensor_.data.int8 = activation_.data();
activation_tensor_.params.zero_point = 50;
return &activation_tensor_;
}
TfLiteTensor* GetOutput() {
PackWeightToTensor(&output_tensor_, output_, output_size_);
output_tensor_.data.int8 = output_.data();
return &output_tensor_;
}
TfLiteTensor* GetCell() {
PackWeightToTensor(&cell_tensor_, cell_, cell_size_);
cell_tensor_.data.i16 = cell_.data();
return &cell_tensor_;
}
~QuantizedLstmParam() {
TfLiteIntArrayFree(scratch0_tensor_.dims);
TfLiteIntArrayFree(scratch1_tensor_.dims);
TfLiteIntArrayFree(scratch2_tensor_.dims);
TfLiteIntArrayFree(scratch3_tensor_.dims);
TfLiteIntArrayFree(scratch4_tensor_.dims);
TfLiteIntArrayFree(scratch5_tensor_.dims);
}
private:
std::vector<int8_t> input_ = {
8, 2, 3, 4, 5, 6, 1, -2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
1, 2, -3, 4, 5, 6, 1, 2, 3, 4, 5, 6, 1, 2, 3, 4, 5, 6,
};
std::vector<int16_t> layer_norm_input_ = {8, 2, 3, 4, 5, 6, 1, 2, 3, 4};
std::vector<int16_t> layer_norm_forget_ = {
1, 2, 3, 4, 7, 3, 4, -5, 6, 3,
};
std::vector<int16_t> layer_norm_cell_ = {
6, 4, 5, 6, 1, 2, 3, 4, -5, 6,
};
std::vector<int16_t> layer_norm_output_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> input_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> forget_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> cell_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> output_gate_bias_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int8_t> activation_;
std::vector<int16_t> cell_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6,
1, 14, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int8_t> output_ = {
1, 1, 3, 4, -5, 6,
1, 4, 3, 4, -5, 6,
};
ops::builtin::lstm_eval::IntegerLstmParameter integer_lstm_param_;
std::vector<int16_t> scratch0_;
std::vector<int32_t> scratch0_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch0_tensor_;
std::vector<int16_t> scratch1_;
std::vector<int32_t> scratch1_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch1_tensor_;
std::vector<int16_t> scratch2_;
std::vector<int32_t> scratch2_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch2_tensor_;
std::vector<int16_t> scratch3_;
std::vector<int32_t> scratch3_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch3_tensor_;
std::vector<int8_t> scratch4_;
std::vector<int32_t> scratch4_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch4_tensor_;
std::vector<int32_t> scratch5_;
std::vector<int32_t> scratch5_size_ = {n_batch_, n_cell_};
TfLiteTensor scratch5_tensor_;
};
void TestOneFullyQuantizedLSTM() {
CpuBackendContext context;
QuantizedLstmParam one_parameter;
auto activation = one_parameter.GetActivation();
auto output = one_parameter.GetOutput();
auto cell = one_parameter.GetCell();
auto param = one_parameter.GetQuantParam();
ops::builtin::lstm_eval::EvalInteger8x8_16(
one_parameter.GetInput(), one_parameter.Geti2i(), one_parameter.Geti2f(),
one_parameter.Geti2c(), one_parameter.Geti2o(), one_parameter.Getr2i(),
one_parameter.Getr2f(), one_parameter.Getr2c(), one_parameter.Getr2o(),
nullptr, nullptr, nullptr, one_parameter.GetInputLayerNorm(),
one_parameter.GetForgetLayerNorm(), one_parameter.GetCellLayerNorm(),
one_parameter.GetOutputLayerNorm(), one_parameter.GetInputBias(),
one_parameter.GetForgetBias(), one_parameter.GetCellBias(),
one_parameter.GetOutputBias(), one_parameter.GetProjection(),
one_parameter.GetProjectionBias(), nullptr, true,
true, param, activation, cell, output,
one_parameter.GetScratch0(), one_parameter.GetScratch1(),
one_parameter.GetScratch2(), one_parameter.GetScratch3(),
one_parameter.GetScratch4(), one_parameter.GetScratch5(), &context);
const std::vector<int16_t> expected_cell = {
7, 1, 3, 2, 0, 1, 0, 2, -2, 4, 1, 6, 4, 3, 0, 1, 0, 2, -2, 4,
};
const std::vector<int8_t> expected_activation = {
50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50,
};
EXPECT_TRUE(ArrayEq(cell->data.i16, expected_cell.data(), 20));
EXPECT_TRUE(ArrayEq(activation->data.int8, expected_activation.data(), 12));
EXPECT_TRUE(ArrayEq(output->data.int8, expected_activation.data(), 12));
}
TEST(TestOneFullyQuantizedLSTM, TestOneFullyQuantizedLSTM) {
TestOneFullyQuantizedLSTM();
}
class HybridLstmParam : public BaseLstmParam {
public:
TfLiteTensor* GetFloatOutput() {
PackWeightToTensor(&output_tensor_, output_float_, output_size_);
output_tensor_.data.f = output_float_.data();
return &output_tensor_;
}
const TfLiteLSTMParams GetLSTMParam() {
return {kTfLiteActRelu, 0, 0, kTfLiteLSTMFullKernel, true};
}
TfLiteTensor* GetScratchBuffer() {
PackWeightToTensor(&scratch_buffer_tensor_, scratch_buffer_,
scratch_buffer_size_);
scratch_buffer_tensor_.data.f = scratch_buffer_.data();
return &scratch_buffer_tensor_;
}
TfLiteTensor* GetInputScalingFactors() {
PackWeightToTensor(&input_sf_tensor_, input_sf_,
quantization_extra_scratch_buffer_sizes_);
input_sf_tensor_.data.f = input_sf_.data();
return &input_sf_tensor_;
}
TfLiteTensor* GetAuxInputScalingFactors() {
PackWeightToTensor(&aux_input_sf_tensor_, aux_input_sf_,
quantization_extra_scratch_buffer_sizes_);
aux_input_sf_tensor_.data.f = aux_input_sf_.data();
return &aux_input_sf_tensor_;
}
TfLiteTensor* GetOutputStateScalingFactors() {
PackWeightToTensor(&output_state_sf_tensor_, output_state_sf_,
quantization_extra_scratch_buffer_sizes_);
output_state_sf_tensor_.data.f = output_state_sf_.data();
return &output_state_sf_tensor_;
}
TfLiteTensor* GetProdScalingFactors() {
PackWeightToTensor(&prod_scaling_factors_tensor_, prod_scaling_factors_,
quantization_extra_scratch_buffer_sizes_);
prod_scaling_factors_tensor_.data.f = prod_scaling_factors_.data();
return &prod_scaling_factors_tensor_;
}
TfLiteTensor* GetInputQuantized() {
PackWeightToTensor(&input_quantized_tensor_, input_quantized_, input_size_);
input_quantized_tensor_.data.int8 = input_quantized_.data();
return &input_quantized_tensor_;
}
TfLiteTensor* GetActivationStateQuantized() {
PackWeightToTensor(&activation_quantized_tensor_, activation_quantized_,
activation_size_);
activation_quantized_tensor_.data.int8 = activation_quantized_.data();
return &activation_quantized_tensor_;
}
TfLiteTensor* GetCellStateQuantized() {
PackWeightToTensor(&cell_quantized_tensor_, cell_quantized_, cell_size_);
cell_quantized_tensor_.data.int8 = cell_quantized_.data();
return &cell_quantized_tensor_;
}
TfLiteTensor* GetInputZeroPoints() {
PackWeightToTensor(&input_zp_tensor_, input_zp_,
quantization_extra_scratch_buffer_sizes_);
input_zp_tensor_.data.i32 = input_zp_.data();
return &input_zp_tensor_;
}
TfLiteTensor* GetAuxInputZeroPoints() {
PackWeightToTensor(&aux_input_zp_tensor_, aux_input_zp_,
quantization_extra_scratch_buffer_sizes_);
aux_input_zp_tensor_.data.i32 = aux_input_zp_.data();
return &aux_input_zp_tensor_;
}
TfLiteTensor* GetOutputStateZeroPoints() {
PackWeightToTensor(&output_state_zp_tensor_, output_state_zp_,
quantization_extra_scratch_buffer_sizes_);
output_state_zp_tensor_.data.i32 = output_state_zp_.data();
return &output_state_zp_tensor_;
}
TfLiteTensor* GetRowSums() {
PackWeightToTensor(&row_sums_tensor_, row_sums_, row_sums_size_);
row_sums_tensor_.data.i32 = row_sums_.data();
return &row_sums_tensor_;
}
TfLiteTensor* GetFloatInput() {
PackWeightToTensor(&input_tensor_, input_float_, input_size_);
input_tensor_.data.f = input_float_.data();
return &input_tensor_;
}
TfLiteTensor* GetActivation() {
PackWeightToTensor(&activation_tensor_, activation_state_,
activation_size_);
activation_tensor_.data.f = activation_state_.data();
return &activation_tensor_;
}
TfLiteTensor* GetCell() {
PackWeightToTensor(&cell_tensor_, cell_state_, cell_size_);
cell_tensor_.data.f = cell_state_.data();
return &cell_tensor_;
}
TfLiteTensor* GetAccumScratchBuffer() {
PackWeightToTensor(&accum_scratch_tensor_, accum_scratch_,
accum_scratch_size_);
accum_scratch_tensor_.data.i32 = accum_scratch_.data();
return &accum_scratch_tensor_;
}
TfLiteTensor* GetInputBias() {
PackWeightToTensor(&input_gate_bias_tensor_, input_float_bias_,
input_gate_bias_size_);
input_gate_bias_tensor_.data.f = input_float_bias_.data();
return &input_gate_bias_tensor_;
}
TfLiteTensor* GetForgetBias() {
PackWeightToTensor(&forget_gate_bias_tensor_, forget_float_bias_,
forget_gate_bias_size_);
forget_gate_bias_tensor_.data.f = forget_float_bias_.data();
return &forget_gate_bias_tensor_;
}
TfLiteTensor* GetCellBias() {
PackWeightToTensor(&cell_gate_bias_tensor_, cell_float_bias_,
cell_gate_bias_size_);
cell_gate_bias_tensor_.data.f = cell_float_bias_.data();
return &cell_gate_bias_tensor_;
}
TfLiteTensor* GetOutputBias() {
PackWeightToTensor(&output_gate_bias_tensor_, output_float_bias_,
output_gate_bias_size_);
output_gate_bias_tensor_.data.f = output_float_bias_.data();
return &output_gate_bias_tensor_;
}
TfLiteTensor* GetProjectionBias() {
PackWeightToTensor(&projection_bias_tensor_, projection_float_bias_,
projection_bias_size_);
projection_bias_tensor_.data.f = projection_float_bias_.data();
return &projection_bias_tensor_;
}
int GetNumRowSums() { return n_row_sums_; }
TfLiteTensor* GetInputLayerNorm() {
PackWeightToTensor(&layer_norm_input_tensor_, layer_norm_float_input_,
layer_norm_input_size_);
layer_norm_input_tensor_.data.f = layer_norm_float_input_.data();
return &layer_norm_input_tensor_;
}
TfLiteTensor* GetForgetLayerNorm() {
PackWeightToTensor(&layer_norm_forget_tensor_, layer_norm_float_forget_,
layer_norm_forget_size_);
layer_norm_forget_tensor_.data.f = layer_norm_float_forget_.data();
return &layer_norm_forget_tensor_;
}
TfLiteTensor* GetCellLayerNorm() {
PackWeightToTensor(&layer_norm_cell_tensor_, layer_norm_float_cell_,
layer_norm_cell_size_);
layer_norm_cell_tensor_.data.f = layer_norm_float_cell_.data();
return &layer_norm_cell_tensor_;
}
TfLiteTensor* GetOutputLayerNorm() {
PackWeightToTensor(&layer_norm_output_tensor_, layer_norm_float_output_,
layer_norm_output_size_);
layer_norm_output_tensor_.data.f = layer_norm_float_output_.data();
return &layer_norm_output_tensor_;
}
static TfLiteTensor* addScale(TfLiteTensor* t, float scale) {
t->params.scale = scale;
return t;
}
~HybridLstmParam() {
TfLiteIntArrayFree(scratch_buffer_tensor_.dims);
TfLiteIntArrayFree(accum_scratch_tensor_.dims);
TfLiteIntArrayFree(input_sf_tensor_.dims);
TfLiteIntArrayFree(aux_input_sf_tensor_.dims);
TfLiteIntArrayFree(output_state_sf_tensor_.dims);
TfLiteIntArrayFree(prod_scaling_factors_tensor_.dims);
TfLiteIntArrayFree(input_quantized_tensor_.dims);
TfLiteIntArrayFree(activation_quantized_tensor_.dims);
TfLiteIntArrayFree(cell_quantized_tensor_.dims);
TfLiteIntArrayFree(input_zp_tensor_.dims);
TfLiteIntArrayFree(aux_input_zp_tensor_.dims);
TfLiteIntArrayFree(output_state_zp_tensor_.dims);
TfLiteIntArrayFree(row_sums_tensor_.dims);
}
private:
const int n_row_sums_ = 9;
std::vector<float> scratch_buffer_;
std::vector<int32_t> scratch_buffer_size_ = {n_batch_, n_cell_ * 4};
TfLiteTensor scratch_buffer_tensor_;
std::vector<int32_t> quantization_extra_scratch_buffer_sizes_ = {n_batch_};
std::vector<float> input_sf_;
TfLiteTensor input_sf_tensor_;
std::vector<float> aux_input_sf_;
TfLiteTensor aux_input_sf_tensor_;
std::vector<float> output_state_sf_;
TfLiteTensor output_state_sf_tensor_;
std::vector<float> prod_scaling_factors_;
TfLiteTensor prod_scaling_factors_tensor_;
std::vector<int32_t> input_zp_;
TfLiteTensor input_zp_tensor_;
std::vector<int32_t> aux_input_zp_;
TfLiteTensor aux_input_zp_tensor_;
std::vector<int32_t> output_state_zp_;
TfLiteTensor output_state_zp_tensor_;
std::vector<int8_t> input_quantized_;
TfLiteTensor input_quantized_tensor_;
std::vector<int8_t> activation_quantized_;
TfLiteTensor activation_quantized_tensor_;
std::vector<int8_t> cell_quantized_;
TfLiteTensor cell_quantized_tensor_;
std::vector<float> cell_state_ = {
16, 4, 5, 6, 1, 1, 3, 4, -5, 6, 1, 14, 5, 6, 1, 1, 3, 4, -5, 6,
};
std::vector<int32_t> row_sums_;
std::vector<int32_t> row_sums_size_ = {n_row_sums_, n_cell_};
TfLiteTensor row_sums_tensor_;
std::vector<float> activation_state_;
std::vector<int32_t> accum_scratch_;
std::vector<int32_t> accum_scratch_size_ = {n_cell_, n_batch_};
TfLiteTensor accum_scratch_tensor_;
std::vector<float> output_float_ = {
1, 1, 3, 4, -5, 6,
1, 4, 3, 4, -5, 6,
};
std::vector<float> input_float_ = {
6.06, 7.66, 7.10, 9.32, 3.85, 0.33, 7.15, 1.56, 9.54,
5.30, 4.53, 0.19, 1.83, 4.60, 0.84, 5.08, 4.37, 9.92,
4.08, 3.79, 1.17, 8.99, 0.14, 9.22, 3.18, 2.97, 7.53,
0.59, 9.89, 9.13, 7.68, 0.63, 2.15, 4.31, 7.20, 4.09,
};
std::vector<float> input_float_bias_ = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
std::vector<float> forget_float_bias_ = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
};
std::vector<float> cell_float_bias_ = {
-11, -7, -4, -5, -1, -1, -2, -3.5, -3, -4,
};
std::vector<float> output_float_bias_ = {0.16, 0.4, 0.5, 0.6, 0.1,
0.1, 0.3, 0.4, -0.5, 0.6};
std::vector<float> projection_float_bias_ = {0, 0, 0, 0, 0, 0};
std::vector<float> layer_norm_float_input_ = {8, 2, 3, 4, 5, 6, 1, -2, 3, 4};
std::vector<float> layer_norm_float_forget_ = {
0.1, 0.2, 0.3, 0.4, 0.7, 0.3, 0.4, -0.5, 0.6, 0.3,
};
std::vector<float> layer_norm_float_cell_ = {
0.6, 0.4, 0.5, 0.6, 0.1, 0.2, 0.3, 0.4, -0.5, 0.6,
};
std::vector<float> layer_norm_float_output_ = {
0.6, 0.4, 0.5, 0.6, 0.1, 0.2, 0.3, 0.4, -0.5, 0.6,
};
};
void TestOneHybridAsymmLSTM() {
CpuBackendContext context;
HybridLstmParam one_parameter;
auto activation = one_parameter.GetActivation();
auto output = one_parameter.GetFloatOutput();
auto cell = one_parameter.GetCell();
auto param = one_parameter.GetLSTMParam();
bool compute_row_sums = true;
constexpr float kDefaultScale = 18.0;
ops::builtin::lstm_eval::EvalHybrid(
one_parameter.GetFloatInput(),
HybridLstmParam::addScale(one_parameter.Geti2i(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Geti2f(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Geti2c(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Geti2o(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2i(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2f(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2c(), kDefaultScale), nullptr,
HybridLstmParam::addScale(one_parameter.Getr2o(), kDefaultScale), nullptr,
nullptr,
nullptr,
nullptr, one_parameter.GetInputLayerNorm(),
one_parameter.GetForgetLayerNorm(), one_parameter.GetCellLayerNorm(),
one_parameter.GetOutputLayerNorm(),
nullptr,
nullptr,
nullptr,
nullptr,
nullptr, one_parameter.GetInputBias(),
one_parameter.GetForgetBias(), one_parameter.GetCellBias(),
one_parameter.GetOutputBias(),
HybridLstmParam::addScale(one_parameter.GetProjection(), 1.0), nullptr,
one_parameter.GetProjectionBias(), ¶m,
true,
true,
0, one_parameter.GetScratchBuffer(),
one_parameter.GetInputScalingFactors(),
one_parameter.GetAuxInputScalingFactors(),
one_parameter.GetOutputStateScalingFactors(),
one_parameter.GetProdScalingFactors(),
nullptr, one_parameter.GetInputQuantized(),
nullptr,
one_parameter.GetActivationStateQuantized(),
one_parameter.GetCellStateQuantized(), activation, cell,
one_parameter.GetAccumScratchBuffer(), output,
one_parameter.GetInputZeroPoints(), one_parameter.GetAuxInputZeroPoints(),
one_parameter.GetOutputStateZeroPoints(), one_parameter.GetRowSums(),
one_parameter.GetNumRowSums(), &compute_row_sums,
false,
false,
false,
false, &context);
const std::vector<float> expected_cell = {
7.83134, 1.96158, 2.18285, 3.28739, 0.483214,
0.618206, 1.21539, 1.4052, -3.17735, 2.24296,
0.498944, 6.91104, 1.74126, 3.28993, 0.580477,
0.489936, 1.2527, 1.50157, -3.71849, 2.76743,
};
const std::vector<float> expected_activation = {
53.0403, 59.3623, 24.8493, 53.0403, 59.3623, 24.8493,
36.7559, 57.5202, 29.7217, 36.7559, 57.5202, 29.7217,
};
EXPECT_TRUE(ArrayFloatNear(cell->data.f, expected_cell.data(), 20, 1e-2));
EXPECT_TRUE(
ArrayFloatNear(activation->data.f, expected_activation.data(), 12, 2e-4));
EXPECT_TRUE(
ArrayFloatNear(output->data.f, expected_activation.data(), 12, 2e-4));
}
TEST(TestOneHybridAsymmLSTM, TestOneHybridAsymmLSTM) {
TestOneHybridAsymmLSTM();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lstm_eval.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/lstm_eval_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5a011d6-a644-480a-9873-2b3562db0ce6 | cpp | google/arolla | slice_qtype | arolla/qtype/slice_qtype.cc | arolla/qtype/slice_qtype_test.cc | #include "arolla/qtype/slice_qtype.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/repr.h"
namespace arolla {
namespace {
std::string SliceQTypeName(QTypePtr start, QTypePtr stop, QTypePtr step) {
return absl::StrCat("slice<", JoinTypeNames({start, stop, step}), ">");
}
class SliceQType final : public BasicDerivedQType {
public:
SliceQType(QTypePtr start, QTypePtr stop, QTypePtr step)
: BasicDerivedQType(ConstructorArgs{
.name = SliceQTypeName(start, stop, step),
.base_qtype = MakeTupleQType({start, stop, step}),
.qtype_specialization_key =
std::string(GetSliceQTypeSpecializationKey()),
}) {}
ReprToken UnsafeReprToken(const void* source) const override {
return ReprToken{
absl::StrCat("slice", GetBaseQType()->UnsafeReprToken(source).str)};
}
};
class SliceQTypeRegistry {
public:
static SliceQTypeRegistry* instance() {
static absl::NoDestructor<SliceQTypeRegistry> result;
return result.get();
}
QTypePtr GetQType(QTypePtr start, QTypePtr stop, QTypePtr step)
ABSL_LOCKS_EXCLUDED(lock_) {
{
absl::ReaderMutexLock guard(&lock_);
if (const auto it = registry_.find({start, stop, step});
it != registry_.end()) {
return it->second.get();
}
}
auto slice_qtype = std::make_unique<SliceQType>(start, stop, step);
absl::MutexLock guard(&lock_);
return registry_.try_emplace({start, stop, step}, std::move(slice_qtype))
.first->second.get();
}
private:
using RegistryKey = std::tuple<QTypePtr, QTypePtr, QTypePtr>;
absl::Mutex lock_;
absl::flat_hash_map<RegistryKey, std::unique_ptr<SliceQType>> registry_
ABSL_GUARDED_BY(lock_);
};
}
bool IsSliceQType(const QType* qtype) {
return fast_dynamic_downcast_final<const SliceQType*>(qtype) != nullptr;
}
QTypePtr MakeSliceQType(QTypePtr start, QTypePtr stop, QTypePtr step) {
return SliceQTypeRegistry::instance()->GetQType(start, stop, step);
}
absl::string_view GetSliceQTypeSpecializationKey() {
return "::arolla::SliceQType";
}
} | #include "arolla/qtype/slice_qtype.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/bytes.h"
namespace arolla::testing {
namespace {
TEST(SliceQType, MakeSliceQType) {
auto start = GetQType<int32_t>();
auto stop = GetQType<double>();
auto step = GetQType<Bytes>();
auto qtype = MakeSliceQType(start, stop, step);
EXPECT_EQ(qtype->name(), "slice<INT32,FLOAT64,BYTES>");
auto derived_qtype_interface =
dynamic_cast<const DerivedQTypeInterface*>(qtype);
ASSERT_NE(derived_qtype_interface, nullptr);
auto tuple_qtype = MakeTupleQType({start, stop, step});
EXPECT_EQ(derived_qtype_interface->GetBaseQType(), tuple_qtype);
{
auto qtype2 = MakeSliceQType(start, stop, step);
EXPECT_EQ(qtype, qtype2);
}
{
auto qtype2 = MakeSliceQType(start, stop, start);
EXPECT_EQ(qtype2->name(), "slice<INT32,FLOAT64,INT32>");
EXPECT_NE(qtype, qtype2);
}
}
TEST(SliceQType, IsSliceQType) {
auto start = GetQType<int32_t>();
auto stop = GetQType<double>();
auto step = GetQType<Bytes>();
auto tuple_qtype = MakeTupleQType({start, stop, step});
EXPECT_FALSE(IsSliceQType(tuple_qtype));
auto qtype = MakeSliceQType(start, stop, step);
EXPECT_TRUE(IsSliceQType(qtype));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/slice_qtype.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/slice_qtype_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
138ea3dc-4705-493e-91f7-16b9d6230197 | cpp | abseil/abseil-cpp | atomic_hook | absl/base/internal/atomic_hook.h | absl/base/internal/atomic_hook_test.cc | #ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
#include <atomic>
#include <cassert>
#include <cstdint>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if defined(_MSC_VER) && !defined(__clang__)
#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0
#else
#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1
#endif
#if defined(_MSC_VER)
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
#else
#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
template <typename T>
class AtomicHook;
#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT
#else
#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
#endif
template <typename ReturnType, typename... Args>
class AtomicHook<ReturnType (*)(Args...)> {
public:
using FnPtr = ReturnType (*)(Args...);
constexpr AtomicHook() : AtomicHook(DummyFunction) {}
#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(default_fn), default_fn_(default_fn) {}
#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
explicit constexpr AtomicHook(FnPtr default_fn)
: hook_(kUninitialized), default_fn_(default_fn) {}
#else
explicit constexpr AtomicHook(FnPtr default_fn)
: default_fn_(default_fn) {
static_assert(kUninitialized == 0, "here we rely on zero-initialization");
}
#endif
void Store(FnPtr fn) {
bool success = DoStore(fn);
static_cast<void>(success);
assert(success);
}
template <typename... CallArgs>
ReturnType operator()(CallArgs&&... args) const {
return DoLoad()(std::forward<CallArgs>(args)...);
}
FnPtr Load() const {
FnPtr ptr = DoLoad();
return (ptr == DummyFunction) ? nullptr : ptr;
}
private:
static ReturnType DummyFunction(Args...) {
return ReturnType();
}
#if ABSL_HAVE_WORKING_ATOMIC_POINTER
FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
bool DoStore(FnPtr fn) {
assert(fn);
FnPtr expected = default_fn_;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
const bool same_value_already_stored = (expected == fn);
return store_succeeded || same_value_already_stored;
}
std::atomic<FnPtr> hook_;
#else
static constexpr intptr_t kUninitialized = 0;
static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
"intptr_t can't contain a function pointer");
FnPtr DoLoad() const {
const intptr_t value = hook_.load(std::memory_order_acquire);
if (value == kUninitialized) {
return default_fn_;
}
return reinterpret_cast<FnPtr>(value);
}
bool DoStore(FnPtr fn) {
assert(fn);
const auto value = reinterpret_cast<intptr_t>(fn);
intptr_t expected = kUninitialized;
const bool store_succeeded = hook_.compare_exchange_strong(
expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
const bool same_value_already_stored = (expected == value);
return store_succeeded || same_value_already_stored;
}
std::atomic<intptr_t> hook_;
#endif
const FnPtr default_fn_;
};
#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/base/internal/atomic_hook.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/internal/atomic_hook_test_helper.h"
namespace {
using ::testing::Eq;
int value = 0;
void TestHook(int x) { value = x; }
TEST(AtomicHookTest, NoDefaultFunction) {
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
void (*)(int)>
hook;
value = 0;
EXPECT_TRUE(hook.Load() == nullptr);
EXPECT_EQ(value, 0);
hook(1);
EXPECT_EQ(value, 0);
hook.Store(TestHook);
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 0);
hook(1);
EXPECT_EQ(value, 1);
hook.Store(TestHook);
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 1);
hook(2);
EXPECT_EQ(value, 2);
}
TEST(AtomicHookTest, WithDefaultFunction) {
ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
void (*)(int)>
hook(TestHook);
value = 0;
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 0);
hook(1);
EXPECT_EQ(value, 1);
hook.Store(TestHook);
EXPECT_TRUE(hook.Load() == TestHook);
EXPECT_EQ(value, 1);
hook(2);
EXPECT_EQ(value, 2);
}
ABSL_CONST_INIT int override_func_calls = 0;
void OverrideFunc() { override_func_calls++; }
static struct OverrideInstaller {
OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); }
} override_installer;
TEST(AtomicHookTest, DynamicInitFromAnotherTU) {
EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0));
EXPECT_THAT(override_func_calls, Eq(0));
absl::atomic_hook_internal::func();
EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0));
EXPECT_THAT(override_func_calls, Eq(1));
EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/atomic_hook.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/internal/atomic_hook_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
c504c794-17d6-46c9-82b6-e56d0e7dac4f | cpp | tensorflow/tensorflow | serdes | third_party/xla/xla/python/ifrt/serdes.cc | third_party/xla/xla/python/ifrt/serdes_test.cc | #include "xla/python/ifrt/serdes.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
struct Registry {
absl::Mutex mu;
absl::flat_hash_map<const void*, SerDes*> type_id_to_serdes
ABSL_GUARDED_BY(mu);
absl::flat_hash_map<absl::string_view, SerDes*> name_to_serdes
ABSL_GUARDED_BY(mu);
};
Registry* registry() {
static auto* r = new Registry();
return r;
}
}
char Serializable::ID = 0;
char DeserializeOptions::ID = 0;
char SerDes::ID = 0;
void RegisterSerDes(const void* type_id, std::unique_ptr<SerDes> serdes) {
Registry* const r = registry();
absl::MutexLock l(&r->mu);
CHECK(r->type_id_to_serdes.insert({type_id, serdes.get()}).second)
<< "xla::ifrt::SerDes cannot be registered more than once for the same "
"type id: "
<< type_id;
const absl::string_view name = serdes->type_name();
CHECK(r->name_to_serdes.insert({name, serdes.get()}).second)
<< "xla::ifrt::SerDes cannot be registered more than once for the same "
"name: "
<< name;
serdes.release();
}
absl::StatusOr<Serialized> Serialize(Serializable& serializable) {
SerDes* serdes;
{
Registry* const r = registry();
absl::MutexLock l(&r->mu);
auto it = r->type_id_to_serdes.find(serializable.dynamicClassID());
if (it == r->type_id_to_serdes.end()) {
return absl::UnimplementedError(
"Serialize call failed. Serializable has no associated SerDes "
"implementation");
}
serdes = it->second;
}
TF_ASSIGN_OR_RETURN(std::string data, serdes->Serialize(serializable));
Serialized proto;
proto.set_type_name(std::string(serdes->type_name()));
proto.set_data(std::move(data));
return proto;
}
namespace serdes_internal {
absl::StatusOr<std::unique_ptr<Serializable>> DeserializeUnchecked(
const Serialized& serialized, std::unique_ptr<DeserializeOptions> options) {
SerDes* serdes;
{
Registry* const r = registry();
absl::MutexLock l(&r->mu);
auto it = r->name_to_serdes.find(serialized.type_name());
if (it == r->name_to_serdes.end()) {
return absl::UnimplementedError(absl::StrCat(
"Deserialize call failed. Serializable has no associated SerDes ",
"implementation. type_name: ", serialized.type_name()));
}
serdes = it->second;
}
return serdes->Deserialize(serialized.data(), std::move(options));
}
}
}
} | #include "xla/python/ifrt/serdes.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/serdes.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
using ::tsl::testing::StatusIs;
struct TestNumberDeserializeOptions;
struct TestNumber : llvm::RTTIExtends<TestNumber, Serializable> {
using DeserializeOptions = TestNumberDeserializeOptions;
int number;
explicit TestNumber(int number) : number(number) {}
static char ID;
};
[[maybe_unused]] char TestNumber::ID = 0;
struct TestNumberDeserializeOptions
: llvm::RTTIExtends<TestNumberDeserializeOptions, DeserializeOptions> {
absl::Status injected_failure;
static char ID;
};
[[maybe_unused]] char TestNumberDeserializeOptions::ID = 0;
class TestNumberSerDes : public llvm::RTTIExtends<TestNumberSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::TestNumber";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const TestNumber& obj = llvm::cast<TestNumber>(serializable);
return absl::StrCat(obj.number);
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
if (options != nullptr) {
auto* deserialize_options =
llvm::cast<TestNumberDeserializeOptions>(options.get());
TF_RETURN_IF_ERROR(deserialize_options->injected_failure);
}
int number;
if (!absl::SimpleAtoi(serialized, &number)) {
return absl::DataLossError("Unable to parse serialized TestNumber");
}
return std::make_unique<TestNumber>(number);
}
static char ID;
};
[[maybe_unused]] char TestNumberSerDes::ID = 0;
class TestNumberTest : public testing::Test {
protected:
static void SetUpTestSuite() {
RegisterSerDes<TestNumber>(std::make_unique<TestNumberSerDes>());
}
};
TEST_F(TestNumberTest, RoundTrip) {
auto obj = std::make_unique<TestNumber>(1234);
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*obj));
TF_ASSERT_OK_AND_ASSIGN(
auto deserialized,
Deserialize<TestNumber>(serialized, nullptr));
EXPECT_EQ(obj->number, deserialized->number);
}
TEST_F(TestNumberTest, WithOptions) {
auto obj = std::make_unique<TestNumber>(1234);
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(*obj));
auto options = std::make_unique<TestNumberDeserializeOptions>();
options->injected_failure = absl::InternalError("injected failure");
EXPECT_THAT(Deserialize<TestNumber>(serialized, std::move(options)),
StatusIs(absl::StatusCode::kInternal, "injected failure"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a766a7d9-fcbf-441a-9870-f271207b1bd0 | cpp | google/quiche | wire_serialization | quiche/common/wire_serialization.h | quiche/common/wire_serialization_test.cc | #ifndef QUICHE_COMMON_WIRE_SERIALIZATION_H_
#define QUICHE_COMMON_WIRE_SERIALIZATION_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_data_writer.h"
#include "quiche/common/quiche_status_utils.h"
namespace quiche {
template <typename T>
class QUICHE_NO_EXPORT SerializeIntoWriterStatus {
public:
static_assert(std::is_trivially_copyable_v<T> && sizeof(T) <= 32,
"The types passed into SerializeInto() APIs are passed by "
"value; if your type has non-trivial copy costs, it should be "
"wrapped into a type that carries a pointer");
using Type = decltype(std::declval<T>().SerializeIntoWriter(
std::declval<QuicheDataWriter&>()));
static constexpr bool kIsBool = std::is_same_v<Type, bool>;
static constexpr bool kIsStatus = std::is_same_v<Type, absl::Status>;
static_assert(
kIsBool || kIsStatus,
"SerializeIntoWriter() has to return either a bool or an absl::Status");
static ABSL_ATTRIBUTE_ALWAYS_INLINE Type OkValue() {
if constexpr (kIsStatus) {
return absl::OkStatus();
} else {
return true;
}
}
};
inline ABSL_ATTRIBUTE_ALWAYS_INLINE bool IsWriterStatusOk(bool status) {
return status;
}
inline ABSL_ATTRIBUTE_ALWAYS_INLINE bool IsWriterStatusOk(
const absl::Status& status) {
return status.ok();
}
template <typename T>
class QUICHE_EXPORT WireFixedSizeIntBase {
public:
using DataType = T;
static_assert(std::is_integral_v<DataType>,
"WireFixedSizeIntBase is only usable with integral types");
explicit WireFixedSizeIntBase(T value) { value_ = value; }
size_t GetLengthOnWire() const { return sizeof(T); }
T value() const { return value_; }
private:
T value_;
};
class QUICHE_EXPORT WireUint8 : public WireFixedSizeIntBase<uint8_t> {
public:
using WireFixedSizeIntBase::WireFixedSizeIntBase;
bool SerializeIntoWriter(QuicheDataWriter& writer) const {
return writer.WriteUInt8(value());
}
};
class QUICHE_EXPORT WireUint16 : public WireFixedSizeIntBase<uint16_t> {
public:
using WireFixedSizeIntBase::WireFixedSizeIntBase;
bool SerializeIntoWriter(QuicheDataWriter& writer) const {
return writer.WriteUInt16(value());
}
};
class QUICHE_EXPORT WireUint32 : public WireFixedSizeIntBase<uint32_t> {
public:
using WireFixedSizeIntBase::WireFixedSizeIntBase;
bool SerializeIntoWriter(QuicheDataWriter& writer) const {
return writer.WriteUInt32(value());
}
};
class QUICHE_EXPORT WireUint64 : public WireFixedSizeIntBase<uint64_t> {
public:
using WireFixedSizeIntBase::WireFixedSizeIntBase;
bool SerializeIntoWriter(QuicheDataWriter& writer) const {
return writer.WriteUInt64(value());
}
};
class QUICHE_EXPORT WireVarInt62 {
public:
using DataType = uint64_t;
explicit WireVarInt62(uint64_t value) { value_ = value; }
template <typename T>
explicit WireVarInt62(T value) {
static_assert(std::is_enum_v<T> || std::is_convertible_v<T, uint64_t>);
value_ = static_cast<uint64_t>(value);
}
size_t GetLengthOnWire() const {
return QuicheDataWriter::GetVarInt62Len(value_);
}
bool SerializeIntoWriter(QuicheDataWriter& writer) const {
return writer.WriteVarInt62(value_);
}
private:
uint64_t value_;
};
class QUICHE_EXPORT WireBytes {
public:
using DataType = absl::string_view;
explicit WireBytes(absl::string_view value) { value_ = value; }
size_t GetLengthOnWire() { return value_.size(); }
bool SerializeIntoWriter(QuicheDataWriter& writer) {
return writer.WriteStringPiece(value_);
}
private:
absl::string_view value_;
};
template <class LengthWireType>
class QUICHE_EXPORT WireStringWithLengthPrefix {
public:
using DataType = absl::string_view;
explicit WireStringWithLengthPrefix(absl::string_view value) {
value_ = value;
}
size_t GetLengthOnWire() {
return LengthWireType(value_.size()).GetLengthOnWire() + value_.size();
}
absl::Status SerializeIntoWriter(QuicheDataWriter& writer) {
if (!LengthWireType(value_.size()).SerializeIntoWriter(writer)) {
return absl::InternalError("Failed to serialize the length prefix");
}
if (!writer.WriteStringPiece(value_)) {
return absl::InternalError("Failed to serialize the string proper");
}
return absl::OkStatus();
}
private:
absl::string_view value_;
};
using WireStringWithVarInt62Length = WireStringWithLengthPrefix<WireVarInt62>;
template <typename WireType, typename InnerType = typename WireType::DataType>
class QUICHE_EXPORT WireOptional {
public:
using DataType = std::optional<InnerType>;
using Status = SerializeIntoWriterStatus<WireType>;
explicit WireOptional(DataType value) { value_ = value; }
size_t GetLengthOnWire() const {
return value_.has_value() ? WireType(*value_).GetLengthOnWire() : 0;
}
typename Status::Type SerializeIntoWriter(QuicheDataWriter& writer) const {
if (value_.has_value()) {
return WireType(*value_).SerializeIntoWriter(writer);
}
return Status::OkValue();
}
private:
DataType value_;
};
template <typename WireType,
typename SpanElementType = typename WireType::DataType>
class QUICHE_EXPORT WireSpan {
public:
using DataType = absl::Span<const SpanElementType>;
explicit WireSpan(DataType value) { value_ = value; }
size_t GetLengthOnWire() const {
size_t total = 0;
for (const SpanElementType& value : value_) {
total += WireType(value).GetLengthOnWire();
}
return total;
}
absl::Status SerializeIntoWriter(QuicheDataWriter& writer) const {
for (size_t i = 0; i < value_.size(); i++) {
auto status = WireType(value_[i]).SerializeIntoWriter(writer);
if (IsWriterStatusOk(status)) {
continue;
}
if constexpr (SerializeIntoWriterStatus<WireType>::kIsStatus) {
return AppendToStatus(std::move(status),
" while serializing the value #", i);
} else {
return absl::InternalError(
absl::StrCat("Failed to serialize vector value #", i));
}
}
return absl::OkStatus();
}
private:
DataType value_;
};
namespace wire_serialization_internal {
template <typename T>
auto SerializeIntoWriterWrapper(QuicheDataWriter& writer, int argno, T data) {
#if defined(NDEBUG)
(void)argno;
(void)data;
return data.SerializeIntoWriter(writer);
#else
const size_t initial_offset = writer.length();
const size_t expected_size = data.GetLengthOnWire();
auto result = data.SerializeIntoWriter(writer);
const size_t final_offset = writer.length();
if (IsWriterStatusOk(result)) {
QUICHE_DCHECK_EQ(initial_offset + expected_size, final_offset)
<< "while serializing field #" << argno;
}
return result;
#endif
}
template <typename T>
std::enable_if_t<SerializeIntoWriterStatus<T>::kIsBool, absl::Status>
SerializeIntoWriterCore(QuicheDataWriter& writer, int argno, T data) {
const bool success = SerializeIntoWriterWrapper(writer, argno, data);
if (!success) {
return absl::InternalError(
absl::StrCat("Failed to serialize field #", argno));
}
return absl::OkStatus();
}
template <typename T>
std::enable_if_t<SerializeIntoWriterStatus<T>::kIsStatus, absl::Status>
SerializeIntoWriterCore(QuicheDataWriter& writer, int argno, T data) {
return AppendToStatus(SerializeIntoWriterWrapper(writer, argno, data),
" while serializing field #", argno);
}
template <typename T1, typename... Ts>
absl::Status SerializeIntoWriterCore(QuicheDataWriter& writer, int argno,
T1 data1, Ts... rest) {
QUICHE_RETURN_IF_ERROR(SerializeIntoWriterCore(writer, argno, data1));
return SerializeIntoWriterCore(writer, argno + 1, rest...);
}
inline absl::Status SerializeIntoWriterCore(QuicheDataWriter&, int) {
return absl::OkStatus();
}
}
template <typename... Ts>
absl::Status SerializeIntoWriter(QuicheDataWriter& writer, Ts... data) {
return wire_serialization_internal::SerializeIntoWriterCore(
writer, 0, data...);
}
template <typename T>
size_t ComputeLengthOnWire(T data) {
return data.GetLengthOnWire();
}
template <typename T1, typename... Ts>
size_t ComputeLengthOnWire(T1 data1, Ts... rest) {
return data1.GetLengthOnWire() + ComputeLengthOnWire(rest...);
}
inline size_t ComputeLengthOnWire() { return 0; }
template <typename... Ts>
absl::StatusOr<QuicheBuffer> SerializeIntoBuffer(
QuicheBufferAllocator* allocator, Ts... data) {
size_t buffer_size = ComputeLengthOnWire(data...);
if (buffer_size == 0) {
return QuicheBuffer();
}
QuicheBuffer buffer(allocator, buffer_size);
QuicheDataWriter writer(buffer.size(), buffer.data());
QUICHE_RETURN_IF_ERROR(SerializeIntoWriter(writer, data...));
if (writer.remaining() != 0) {
return absl::InternalError(absl::StrCat(
"Excess ", writer.remaining(), " bytes allocated while serializing"));
}
return buffer;
}
template <typename... Ts>
absl::StatusOr<std::string> SerializeIntoString(Ts... data) {
size_t buffer_size = ComputeLengthOnWire(data...);
if (buffer_size == 0) {
return std::string();
}
std::string buffer;
buffer.resize(buffer_size);
QuicheDataWriter writer(buffer.size(), buffer.data());
QUICHE_RETURN_IF_ERROR(SerializeIntoWriter(writer, data...));
if (writer.remaining() != 0) {
return absl::InternalError(absl::StrCat(
"Excess ", writer.remaining(), " bytes allocated while serializing"));
}
return buffer;
}
}
#endif | #include "quiche/common/wire_serialization.h"
#include <array>
#include <limits>
#include <optional>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_buffer_allocator.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/quiche_status_utils.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quiche::test {
namespace {
using ::testing::ElementsAre;
constexpr uint64_t kInvalidVarInt = std::numeric_limits<uint64_t>::max();
template <typename... Ts>
absl::StatusOr<quiche::QuicheBuffer> SerializeIntoSimpleBuffer(Ts... data) {
return SerializeIntoBuffer(quiche::SimpleBufferAllocator::Get(), data...);
}
template <typename... Ts>
void ExpectEncoding(const std::string& description, absl::string_view expected,
Ts... data) {
absl::StatusOr<quiche::QuicheBuffer> actual =
SerializeIntoSimpleBuffer(data...);
QUICHE_ASSERT_OK(actual);
quiche::test::CompareCharArraysWithHexError(description, actual->data(),
actual->size(), expected.data(),
expected.size());
}
template <typename... Ts>
void ExpectEncodingHex(const std::string& description,
absl::string_view expected_hex, Ts... data) {
std::string expected;
ASSERT_TRUE(absl::HexStringToBytes(expected_hex, &expected));
ExpectEncoding(description, expected, data...);
}
TEST(SerializationTest, SerializeStrings) {
absl::StatusOr<quiche::QuicheBuffer> one_string =
SerializeIntoSimpleBuffer(WireBytes("test"));
QUICHE_ASSERT_OK(one_string);
EXPECT_EQ(one_string->AsStringView(), "test");
absl::StatusOr<quiche::QuicheBuffer> two_strings =
SerializeIntoSimpleBuffer(WireBytes("Hello"), WireBytes("World"));
QUICHE_ASSERT_OK(two_strings);
EXPECT_EQ(two_strings->AsStringView(), "HelloWorld");
}
TEST(SerializationTest, SerializeIntegers) {
ExpectEncodingHex("one uint8_t value", "42", WireUint8(0x42));
ExpectEncodingHex("two uint8_t values", "ab01", WireUint8(0xab),
WireUint8(0x01));
ExpectEncodingHex("one uint16_t value", "1234", WireUint16(0x1234));
ExpectEncodingHex("one uint32_t value", "12345678", WireUint32(0x12345678));
ExpectEncodingHex("one uint64_t value", "123456789abcdef0",
WireUint64(UINT64_C(0x123456789abcdef0)));
ExpectEncodingHex("mix of values", "aabbcc000000dd", WireUint8(0xaa),
WireUint16(0xbbcc), WireUint32(0xdd));
}
TEST(SerializationTest, SerializeLittleEndian) {
char buffer[4];
QuicheDataWriter writer(sizeof(buffer), buffer,
quiche::Endianness::HOST_BYTE_ORDER);
QUICHE_ASSERT_OK(
SerializeIntoWriter(writer, WireUint16(0x1234), WireUint16(0xabcd)));
absl::string_view actual(writer.data(), writer.length());
std::string expected;
ASSERT_TRUE(absl::HexStringToBytes("3412cdab", &expected));
EXPECT_EQ(actual, expected);
}
TEST(SerializationTest, SerializeVarInt62) {
ExpectEncodingHex("1-byte varint", "25", WireVarInt62(37));
ExpectEncodingHex("2-byte varint", "7bbd", WireVarInt62(15293));
ExpectEncodingHex("4-byte varint", "9d7f3e7d", WireVarInt62(494878333));
ExpectEncodingHex("8-byte varint", "c2197c5eff14e88c",
WireVarInt62(UINT64_C(151288809941952652)));
}
TEST(SerializationTest, SerializeStringWithVarInt62Length) {
ExpectEncodingHex("short string", "0474657374",
WireStringWithVarInt62Length("test"));
const std::string long_string(15293, 'a');
ExpectEncoding("long string", absl::StrCat("\x7b\xbd", long_string),
WireStringWithVarInt62Length(long_string));
ExpectEncodingHex("empty string", "00", WireStringWithVarInt62Length(""));
}
TEST(SerializationTest, SerializeOptionalValues) {
std::optional<uint8_t> has_no_value;
std::optional<uint8_t> has_value = 0x42;
ExpectEncodingHex("optional without value", "00", WireUint8(0),
WireOptional<WireUint8>(has_no_value));
ExpectEncodingHex("optional with value", "0142", WireUint8(1),
WireOptional<WireUint8>(has_value));
ExpectEncodingHex("empty data", "", WireOptional<WireUint8>(has_no_value));
std::optional<std::string> has_no_string;
std::optional<std::string> has_string = "\x42";
ExpectEncodingHex("optional no string", "",
WireOptional<WireStringWithVarInt62Length>(has_no_string));
ExpectEncodingHex("optional string", "0142",
WireOptional<WireStringWithVarInt62Length>(has_string));
}
enum class TestEnum {
kValue1 = 0x17,
kValue2 = 0x19,
};
TEST(SerializationTest, SerializeEnumValue) {
ExpectEncodingHex("enum value", "17", WireVarInt62(TestEnum::kValue1));
}
TEST(SerializationTest, SerializeLotsOfValues) {
ExpectEncodingHex("ten values", "00010203040506070809", WireUint8(0),
WireUint8(1), WireUint8(2), WireUint8(3), WireUint8(4),
WireUint8(5), WireUint8(6), WireUint8(7), WireUint8(8),
WireUint8(9));
}
TEST(SerializationTest, FailDueToLackOfSpace) {
char buffer[4];
QuicheDataWriter writer(sizeof(buffer), buffer);
QUICHE_EXPECT_OK(SerializeIntoWriter(writer, WireUint32(0)));
ASSERT_EQ(writer.remaining(), 0u);
EXPECT_THAT(
SerializeIntoWriter(writer, WireUint32(0)),
StatusIs(absl::StatusCode::kInternal, "Failed to serialize field #0"));
EXPECT_THAT(
SerializeIntoWriter(writer, WireStringWithVarInt62Length("test")),
StatusIs(
absl::StatusCode::kInternal,
"Failed to serialize the length prefix while serializing field #0"));
}
TEST(SerializationTest, FailDueToInvalidValue) {
EXPECT_QUICHE_BUG(
ExpectEncoding("invalid varint", "", WireVarInt62(kInvalidVarInt)),
"too big for VarInt62");
}
TEST(SerializationTest, InvalidValueCausesPartialWrite) {
char buffer[3] = {'\0'};
QuicheDataWriter writer(sizeof(buffer), buffer);
QUICHE_EXPECT_OK(SerializeIntoWriter(writer, WireBytes("a")));
EXPECT_THAT(
SerializeIntoWriter(writer, WireBytes("b"),
WireBytes("A considerably long string, writing which "
"will most likely cause ASAN to crash"),
WireBytes("c")),
StatusIs(absl::StatusCode::kInternal, "Failed to serialize field #1"));
EXPECT_THAT(buffer, ElementsAre('a', 'b', '\0'));
QUICHE_EXPECT_OK(SerializeIntoWriter(writer, WireBytes("z")));
EXPECT_EQ(buffer[2], 'z');
}
TEST(SerializationTest, SerializeVector) {
std::vector<absl::string_view> strs = {"foo", "test", "bar"};
absl::StatusOr<quiche::QuicheBuffer> serialized =
SerializeIntoSimpleBuffer(WireSpan<WireBytes>(absl::MakeSpan(strs)));
QUICHE_ASSERT_OK(serialized);
EXPECT_EQ(serialized->AsStringView(), "footestbar");
}
struct AwesomeStruct {
uint64_t awesome_number;
std::string awesome_text;
};
class WireAwesomeStruct {
public:
using DataType = AwesomeStruct;
WireAwesomeStruct(const AwesomeStruct& awesome) : awesome_(awesome) {}
size_t GetLengthOnWire() {
return quiche::ComputeLengthOnWire(WireUint16(awesome_.awesome_number),
WireBytes(awesome_.awesome_text));
}
absl::Status SerializeIntoWriter(QuicheDataWriter& writer) {
return AppendToStatus(::quiche::SerializeIntoWriter(
writer, WireUint16(awesome_.awesome_number),
WireBytes(awesome_.awesome_text)),
" while serializing AwesomeStruct");
}
private:
const AwesomeStruct& awesome_;
};
TEST(SerializationTest, CustomStruct) {
AwesomeStruct awesome;
awesome.awesome_number = 0xabcd;
awesome.awesome_text = "test";
ExpectEncodingHex("struct", "abcd74657374", WireAwesomeStruct(awesome));
}
TEST(SerializationTest, CustomStructSpan) {
std::array<AwesomeStruct, 2> awesome;
awesome[0].awesome_number = 0xabcd;
awesome[0].awesome_text = "test";
awesome[1].awesome_number = 0x1234;
awesome[1].awesome_text = std::string(3, '\0');
ExpectEncodingHex("struct", "abcd746573741234000000",
WireSpan<WireAwesomeStruct>(absl::MakeSpan(awesome)));
}
class WireFormatterThatWritesTooLittle {
public:
using DataType = absl::string_view;
explicit WireFormatterThatWritesTooLittle(absl::string_view s) : s_(s) {}
size_t GetLengthOnWire() const { return s_.size(); }
bool SerializeIntoWriter(QuicheDataWriter& writer) {
return writer.WriteStringPiece(s_.substr(0, s_.size() - 1));
}
private:
absl::string_view s_;
};
TEST(SerializationTest, CustomStructWritesTooLittle) {
absl::Status status;
#if defined(NDEBUG)
constexpr absl::string_view kStr = "\xaa\xbb\xcc\xdd";
status = SerializeIntoSimpleBuffer(WireFormatterThatWritesTooLittle(kStr))
.status();
EXPECT_THAT(status, StatusIs(absl::StatusCode::kInternal,
::testing::HasSubstr("Excess 1 bytes")));
#elif GTEST_HAS_DEATH_TEST
constexpr absl::string_view kStr = "\xaa\xbb\xcc\xdd";
EXPECT_QUICHE_DEBUG_DEATH(
status = SerializeIntoSimpleBuffer(WireFormatterThatWritesTooLittle(kStr))
.status(),
"while serializing field #0");
EXPECT_THAT(status, StatusIs(absl::StatusCode::kOk));
#endif
}
TEST(SerializationTest, Empty) { ExpectEncodingHex("nothing", ""); }
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/wire_serialization.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/wire_serialization_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
41037164-37cc-4768-8671-a4dc89ea1e73 | cpp | tensorflow/tensorflow | elu | tensorflow/lite/kernels/internal/reference/elu.h | tensorflow/lite/delegates/xnnpack/elu_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Elu(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
output_data[i] = val < 0.0f ? TfLiteExpm1(val) : val;
}
}
}
}
#endif | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Elu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
TEST(Elu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
TEST(Elu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
TEST(Elu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_ELU,
xnnpack_delegate.get());
}
TEST(Elu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_ELU, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/reference/elu.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/elu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0c8c31e5-79a6-45c5-8432-abce7d1be83d | cpp | tensorflow/tensorflow | topological_sort | tensorflow/compiler/mlir/tensorflow/utils/topological_sort.cc | tensorflow/core/grappler/utils/topological_sort_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/topological_sort.h"
#include <algorithm>
#include <queue>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
namespace mlir {
namespace TF {
ExtraDependenciesFunction no_extra_dependencies = nullptr;
std::vector<Operation*> SortBlockTopologically(
Block& block, PriorityFunction priorityFunction,
ExtraDependenciesFunction extraDependencies) {
llvm::DenseMap<Operation*, int> remaining_incoming_data_edges;
llvm::DenseMap<Operation*, int> remaining_incoming_ctrl_edges;
llvm::DenseMap<Operation*, int> position;
llvm::DenseMap<Operation*, Operation*> ancestor;
SmallVector<Operation*> ready;
llvm::SmallVector<mlir::Operation*, 4> empty_op_set;
auto ctrlPredecessors =
[&](Operation* op) -> llvm::SmallVector<mlir::Operation*, 4> const& {
if (extraDependencies) {
return extraDependencies(op, true);
} else {
return empty_op_set;
}
};
auto ctrlSuccessors =
[&](Operation* op) -> llvm::SmallVector<mlir::Operation*, 4> const& {
if (extraDependencies) {
return extraDependencies(op, false);
} else {
return empty_op_set;
}
};
int i = 0;
for (Operation& op : block.getOperations()) {
int incoming_ctrl_edges = 0;
int incoming_data_edges = 0;
op.walk([&](Operation* child) {
ancestor[child] = &op;
for (Operation* predecessor : ctrlPredecessors(child)) {
if (predecessor->getBlock() == &block) {
incoming_ctrl_edges++;
}
}
for (Value v : child->getOperands()) {
if (v.getParentBlock() == &block) {
incoming_data_edges++;
}
}
});
remaining_incoming_data_edges[&op] = incoming_data_edges;
remaining_incoming_ctrl_edges[&op] = incoming_ctrl_edges;
if (incoming_data_edges == 0 && incoming_ctrl_edges == 0) {
ready.push_back(&op);
}
position[&op] = i++;
}
std::queue<Value> todo;
for (Value value : block.getArguments()) {
todo.push(value);
}
std::vector<Operation*> result;
Operation* previous_op = nullptr;
while (!todo.empty() || !ready.empty()) {
while (!todo.empty()) {
Value value = todo.front();
todo.pop();
for (OpOperand& operand : value.getUses()) {
Operation* user = ancestor[operand.getOwner()];
remaining_incoming_data_edges[user]--;
if (remaining_incoming_data_edges[user] == 0 &&
remaining_incoming_ctrl_edges[user] == 0) {
ready.push_back(user);
}
}
}
auto better = [&](Operation* a, Operation* b) {
if (a->hasTrait<OpTrait::IsTerminator>() !=
b->hasTrait<OpTrait::IsTerminator>()) {
return b->hasTrait<OpTrait::IsTerminator>();
}
int a_priority = priorityFunction(previous_op, a);
int b_priority = priorityFunction(previous_op, b);
if (a_priority != b_priority) {
return a_priority > b_priority;
} else {
return position[a] < position[b];
}
};
Operation* best = nullptr;
for (Operation* op : ready) {
if (best == nullptr || better(op, best)) {
best = op;
}
}
if (!best) {
assert(ready.empty());
return result;
}
ready.erase(std::find(ready.begin(), ready.end(), best));
previous_op = best;
for (Value result : best->getResults()) {
todo.push(result);
}
for (Operation* successor : ctrlSuccessors(best)) {
if (ancestor.find(successor) != ancestor.end()) {
successor = ancestor[successor];
remaining_incoming_ctrl_edges[successor]--;
if (remaining_incoming_ctrl_edges[successor] == 0 &&
remaining_incoming_data_edges[successor] == 0) {
ready.push_back(successor);
}
}
}
result.push_back(best);
}
return result;
}
}
} | #include "tensorflow/core/grappler/utils/topological_sort.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/benchmark_testlib.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace grappler {
class TopologicalSortTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, std::vector<string> inputs)
: name(std::move(name)), inputs(std::move(inputs)) {}
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(TopologicalSortTest, NoLoop) {
GraphDef graph = CreateGraph({
{"2", {"5"}},
{"0", {"5", "4"}},
{"1", {"4", "3"}},
{"3", {"2"}},
{"5", {}},
{"4", {}}
});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, &topo_order));
const std::vector<string> order = {"5", "4", "2", "0", "3", "1"};
ASSERT_EQ(topo_order.size(), order.size());
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
EXPECT_EQ(node->name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < topo_order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, WithLoop) {
GraphDef graph = CreateGraph({
{"2", "Merge", {"1", "5"}},
{"3", "Switch", {"2"}},
{"4", "Identity", {"3"}},
{"5", "NextIteration", {"4"}},
{"1", {}}
});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, &topo_order));
const std::vector<string> order = {"1", "2", "3", "4", "5"};
ASSERT_EQ(topo_order.size(), order.size());
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
EXPECT_EQ(node->name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, WithIllegalLoop) {
GraphDef graph = CreateGraph({
{"2", {"1", "3"}},
{"3", {"2"}},
{"1", {}}
});
EXPECT_FALSE(TopologicalSort(&graph).ok());
std::vector<string> order = {"2", "3", "1"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, DuplicatedInputs) {
GraphDef graph = CreateGraph({
{"2", {"1", "1"}},
{"1", {}}
});
TF_EXPECT_OK(TopologicalSort(&graph));
std::vector<string> order = {"1", "2"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, Idempotent) {
GraphDef graph = CreateGraph({
{"1", {}},
{"2", {}},
{"3", {"1", "2"}},
{"4", {"1", "3"}},
{"5", {"2", "3"}}
});
TF_EXPECT_OK(TopologicalSort(&graph));
std::vector<string> order = {"1", "2", "3", "4", "5"};
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
TF_EXPECT_OK(TopologicalSort(&graph));
for (int i = 0; i < order.size(); i++) {
EXPECT_EQ(graph.node(i).name(), order[i]);
}
}
TEST_F(TopologicalSortTest, ExtraDependencies) {
GraphDef graph = CreateGraph({
{"2", {"5"}},
{"0", {"5", "4"}},
{"1", {"4", "3"}},
{"3", {"2"}},
{"5", {}},
{"4", {}}
});
std::vector<TopologicalDependency> extra_dependencies;
extra_dependencies.push_back({&graph.node(5), &graph.node(4)});
std::vector<const NodeDef*> topo_order;
TF_EXPECT_OK(ComputeTopologicalOrder(graph, extra_dependencies, &topo_order));
const std::vector<string> valid_order_1 = {"4", "5", "2", "0", "3", "1"};
const std::vector<string> valid_order_2 = {"4", "5", "0", "2", "3", "1"};
ASSERT_EQ(topo_order.size(), valid_order_1.size());
std::vector<string> computed_order(6, "");
for (int i = 0; i < topo_order.size(); ++i) {
const NodeDef* node = topo_order[i];
computed_order[i] = node->name();
}
EXPECT_TRUE(computed_order == valid_order_1 ||
computed_order == valid_order_2);
extra_dependencies.push_back({&graph.node(1), &graph.node(5)});
EXPECT_FALSE(
ComputeTopologicalOrder(graph, extra_dependencies, &topo_order).ok());
}
static void BM_ComputeTopologicalOrder(::testing::benchmark::State& state) {
const int size = state.range(0);
GraphDef graph = test::CreateRandomGraph(size);
std::vector<const NodeDef*> topo_order;
for (auto s : state) {
topo_order.clear();
Status st = ComputeTopologicalOrder(graph, &topo_order);
CHECK(st.ok()) << "Failed to compute topological order";
}
}
BENCHMARK(BM_ComputeTopologicalOrder)
->Arg(10)
->Arg(100)
->Arg(1000)
->Arg(10000)
->Arg(25000)
->Arg(50000)
->Arg(100000);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/topological_sort.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/topological_sort_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcf5ff3d-be74-493f-a8b0-af3125c467b6 | cpp | tensorflow/tensorflow | matrix_triangular_solve_op | tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc | tensorflow/core/kernels/linalg/matrix_triangular_solve_op_test.cc | #include <tuple>
#include <utility>
#include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/util/matmul_bcast.h"
namespace tensorflow {
namespace {
class MatrixTriangularSolveOp : public XlaOpKernel {
public:
explicit MatrixTriangularSolveOp(OpKernelConstruction* ctx)
: XlaOpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("lower", &lower_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("adjoint", &adjoint_));
}
void Compile(XlaOpKernelContext* ctx) override {
const TensorShape lhs_shape = ctx->InputShape(0);
const TensorShape rhs_shape = ctx->InputShape(1);
MatMulBCast bcast(BCast::FromShape(lhs_shape), BCast::FromShape(rhs_shape));
if (!bcast.IsValid()) {
ctx->SetStatus(errors::InvalidArgument(
"Incompatible shapes: ", lhs_shape.DebugString(), " vs. ",
rhs_shape.DebugString()));
return;
}
auto lhs_size = lhs_shape.dims();
OP_REQUIRES(
ctx,
lhs_shape.dim_size(lhs_size - 1) == lhs_shape.dim_size(lhs_size - 2),
errors::InvalidArgument("The coefficient matrix must be square in "
"the inner-most two dimensions: ",
lhs_shape.DebugString()));
xla::XlaOp a = ctx->Input(0);
xla::XlaOp b = ctx->Input(1);
std::tie(a, b) = Broadcast(a, lhs_shape, b, rhs_shape, bcast);
auto result = xla::TriangularSolve(
a, b, true,
lower_, false,
adjoint_ ? xla::TriangularSolveOptions::ADJOINT
: xla::TriangularSolveOptions::NO_TRANSPOSE);
ctx->SetOutput(0, result);
}
private:
static std::pair<xla::XlaOp, xla::XlaOp> Broadcast(
xla::XlaOp lhs, const TensorShape& lhs_shape, xla::XlaOp rhs,
const TensorShape& rhs_shape, const MatMulBCast& broadcast_helper);
bool lower_;
bool adjoint_;
};
std::pair<xla::XlaOp, xla::XlaOp>
MatrixTriangularSolveOp::Broadcast(xla::XlaOp lhs, const TensorShape& lhs_shape,
xla::XlaOp rhs, const TensorShape& rhs_shape,
const MatMulBCast& broadcast_helper) {
int64_t m = lhs_shape.dim_size(lhs_shape.dims() - 1);
int64_t n = rhs_shape.dim_size(rhs_shape.dims() - 1);
TensorShape lhs_broadcast_shape(broadcast_helper.output_batch_shape());
lhs_broadcast_shape.AddDim(m);
lhs_broadcast_shape.AddDim(m);
auto lhs_output = BroadcastTo(lhs, lhs_broadcast_shape.dim_sizes());
if (!lhs_output.ok()) {
xla::XlaOp error = lhs.builder()->ReportError(lhs_output.status());
return {error, error};
}
TensorShape rhs_broadcast_shape(broadcast_helper.output_batch_shape());
rhs_broadcast_shape.AddDim(m);
rhs_broadcast_shape.AddDim(n);
auto rhs_output = BroadcastTo(rhs, rhs_broadcast_shape.dim_sizes());
if (!rhs_output.ok()) {
xla::XlaOp error = rhs.builder()->ReportError(rhs_output.status());
return {error, error};
}
return {lhs_output.value(), rhs_output.value()};
}
REGISTER_XLA_OP(Name("MatrixTriangularSolve"), MatrixTriangularSolveOp);
}
} | #include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/kernels/broadcast_to_op.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
Node* BroadcastTo(Graph* g, Node* input, Node* shape) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "BroadcastTo")
.Input(input)
.Input(shape)
.Attr("Tidx", DT_INT64)
.Finalize(g, &ret));
return ret;
}
Node* MatrixTriangularSolve(Graph* g, Node* in0, Node* in1, bool adjoint) {
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("n"), "MatrixTriangularSolve")
.Input(in0)
.Input(in1)
.Attr("lower", true)
.Attr("adjoint", adjoint)
.Finalize(g, &ret));
return ret;
}
template <typename T>
static Graph* MatrixTriangularSolveWithBroadcast(int64_t b0, int64_t b1,
int64_t m, int64_t n,
bool manual_broadcast,
DataType type) {
Graph* g = new Graph(OpRegistry::Global());
Tensor in0(type, TensorShape({b0, m, m}));
in0.flat<T>().setRandom();
auto matrix = Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>(
in0.flat<T>().data(), in0.dim_size(1), in0.dim_size(2));
matrix.diagonal() =
(matrix.diagonal().cwiseAbs().array() + static_cast<T>(0.5));
Tensor in1(type, TensorShape({b1, m, n}));
in1.flat<T>().setRandom();
Tensor broadcasted_in0_shape(DT_INT64, TensorShape({3}));
Tensor broadcasted_in1_shape(DT_INT64, TensorShape({3}));
Node* in0_node = nullptr;
Node* in1_node = nullptr;
if (manual_broadcast) {
auto vec0 = broadcasted_in0_shape.vec<int64_t>();
auto vec1 = broadcasted_in1_shape.vec<int64_t>();
for (int i = 0; i < 3; ++i) {
vec0(i) = (i == 0 ? std::max(b0, b1) : in0.shape().dim_size(i));
vec1(i) = (i == 0 ? std::max(b0, b1) : in1.shape().dim_size(i));
}
in0_node = BroadcastTo(g, test::graph::Constant(g, in0),
test::graph::Constant(g, broadcasted_in0_shape));
in1_node = BroadcastTo(g, test::graph::Constant(g, in1),
test::graph::Constant(g, broadcasted_in1_shape));
} else {
in0_node = test::graph::Constant(g, in0);
in1_node = test::graph::Constant(g, in1);
}
MatrixTriangularSolve(g, in0_node, in1_node, false);
return g;
}
#define BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, T, TT, D) \
static void \
BM_MatrixTriangularSolve##_##B1##_##B2##_##M##_##N##_##MB##_##TT##_##D( \
::testing::benchmark::State& state) { \
state.SetItemsProcessed(state.iterations() * std::max(B1, B2) * M * M * \
N * 2); \
test::Benchmark( \
#D, MatrixTriangularSolveWithBroadcast<T>(B1, B2, M, N, MB, TT), \
false) \
.Run(state); \
} \
BENCHMARK( \
BM_MatrixTriangularSolve##_##B1##_##B2##_##M##_##N##_##MB##_##TT##_##D);
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define BM_MatrixTriangularSolve(B1, B2, M, N, MB) \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, float, DT_FLOAT, cpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, double, DT_DOUBLE, cpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, float, DT_FLOAT, gpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, double, DT_DOUBLE, gpu);
#else
#define BM_MatrixTriangularSolve(B1, B2, M, N, MB) \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, float, DT_FLOAT, cpu); \
BM_MatrixTriangularSolveDev(B1, B2, M, N, MB, double, DT_DOUBLE, cpu);
#endif
BM_MatrixTriangularSolve(32, 32, 512, 512, true);
BM_MatrixTriangularSolve(32, 32, 512, 512, false);
BM_MatrixTriangularSolve(1, 32, 512, 512, true);
BM_MatrixTriangularSolve(1, 32, 512, 512, false);
BM_MatrixTriangularSolve(32, 1, 512, 512, true);
BM_MatrixTriangularSolve(32, 1, 512, 512, false);
BM_MatrixTriangularSolve(128, 128, 512, 512, true);
BM_MatrixTriangularSolve(128, 128, 512, 512, false);
BM_MatrixTriangularSolve(1, 128, 512, 512, true);
BM_MatrixTriangularSolve(1, 128, 512, 512, false);
BM_MatrixTriangularSolve(128, 1, 512, 512, true);
BM_MatrixTriangularSolve(128, 1, 512, 512, false);
BM_MatrixTriangularSolve(1, 128, 1024, 1024, true);
BM_MatrixTriangularSolve(1, 128, 1024, 1024, false);
BM_MatrixTriangularSolve(128, 1, 1024, 1024, true);
BM_MatrixTriangularSolve(128, 1, 1024, 1024, false);
BM_MatrixTriangularSolve(1, 128, 200, 1, true);
BM_MatrixTriangularSolve(1, 128, 200, 1, false);
BM_MatrixTriangularSolve(128, 1, 200, 1, true);
BM_MatrixTriangularSolve(128, 1, 200, 1, false);
BM_MatrixTriangularSolve(1, 128, 200, 10000, true);
BM_MatrixTriangularSolve(1, 128, 200, 10000, false);
BM_MatrixTriangularSolve(128, 1, 200, 10000, true);
BM_MatrixTriangularSolve(128, 1, 200, 10000, false);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/matrix_triangular_solve_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/linalg/matrix_triangular_solve_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33336fcd-e3c1-42aa-9201-e1fd29254023 | cpp | tensorflow/tensorflow | quantization | tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc | tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc | #include "tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h"
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace {
using ::mlir::quant::stablehlo::StaticRangePtqComponent;
using ::mlir::quant::stablehlo::WeightOnlyPtqComponent;
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::SignatureDef;
using ::tensorflow::quantization::PyFunctionLibrary;
absl::flat_hash_map<std::string, SignatureDef> GetSignatureDefMapFromBundle(
const SavedModelBundle& saved_model_bundle) {
const protobuf::Map<std::string, SignatureDef>& signatures =
saved_model_bundle.GetSignatures();
absl::flat_hash_map<std::string, SignatureDef> signature_def_map(
signatures.begin(), signatures.end());
signature_def_map.erase(kSavedModelInitOpSignatureKey);
return signature_def_map;
}
absl::flat_hash_map<std::string, std::string> GetFunctionAliases(
const SavedModelBundle& saved_model_bundle) {
const protobuf::Map<std::string, std::string>& function_aliases =
saved_model_bundle.meta_graph_def.meta_info_def().function_aliases();
return absl::flat_hash_map<std::string, std::string>(function_aliases.begin(),
function_aliases.end());
}
}
absl::StatusOr<mlir::ModuleOp> RunQuantization(
const SavedModelBundle* saved_model_bundle,
const absl::string_view saved_model_dir,
const std::unordered_set<std::string>& saved_model_tags,
const QuantizationConfig& quantization_config,
const PyFunctionLibrary* quantization_py_function_lib,
mlir::ModuleOp module_op) {
if (saved_model_bundle == nullptr) {
return absl::InvalidArgumentError(
"Failed to run quantization. `saved_model_bundle` should not be "
"nullptr.");
}
if (quantization_py_function_lib == nullptr) {
return absl::InvalidArgumentError(
"Failed to run quantization. `quantization_py_function_lib` should not "
"be nullptr.");
}
LOG(INFO) << "User-provided quantization config: "
<< quantization_config.DebugString();
const QuantizationConfig updated_config =
ExpandPresets(PopulateDefaults(quantization_config));
LOG(INFO) << "Updated quantization config: " << updated_config.DebugString();
const absl::flat_hash_map<std::string, SignatureDef> signature_def_map =
GetSignatureDefMapFromBundle(*saved_model_bundle);
std::vector<std::string> exported_names;
for (const auto& [key, value_unused] : signature_def_map) {
exported_names.push_back(key);
}
if (failed(mlir::tf_saved_model::FreezeVariables(
module_op, saved_model_bundle->GetSession()))) {
return absl::InternalError("Failed to freeze variables.");
}
mlir::PassManager pm(module_op.getContext());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
mlir::odml::AddLegalizeTFToStablehloPasses(pm, true,
false,
false);
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::createRemoveShardingCustomCallPass());
if (failed(pm.run(module_op))) {
return absl::InternalError("Failed to run legalize TF to StableHLO.");
}
absl::StatusOr<mlir::ModuleOp> quantized_module_op;
if (HasQuantizationMethod(updated_config.specs(),
Method::MethodCase::kStaticRangePtq)) {
StaticRangePtqComponent static_range_ptq_component(
module_op.getContext(), quantization_py_function_lib, saved_model_dir,
exported_names, saved_model_tags, signature_def_map,
GetFunctionAliases(*saved_model_bundle));
quantized_module_op =
static_range_ptq_component.Run(module_op, updated_config);
} else if (HasQuantizationMethod(updated_config.specs(),
Method::MethodCase::kWeightOnlyPtq)) {
WeightOnlyPtqComponent weight_only_ptq_component(module_op.getContext());
quantized_module_op =
weight_only_ptq_component.Run(module_op, updated_config);
} else {
return absl::InvalidArgumentError(
"Quantization config must have either static_range_ptq_preset or "
"weight_only_ptq_preset.");
}
if (!quantized_module_op.ok()) {
return absl::InternalError("Failed to run quantization. Status msg: " +
quantized_module_op.status().ToString());
}
return quantized_module_op;
}
} | #include "tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::CreateTmpDir;
using ::testing::HasSubstr;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(RunQuantizationTest,
WhenSavedModelBundleIsNullptrReturnsInvalidArgumentError) {
const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
ASSERT_THAT(tmp_saved_model_dir, IsOk());
QuantizationConfig config;
const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
nullptr, *tmp_saved_model_dir,
{}, config,
nullptr, {});
EXPECT_THAT(
quantized_module_op,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("`saved_model_bundle` should not be nullptr")));
}
TEST(RunQuantizationTest,
WhenPyFunctionLibIsNullptrReturnsInvalidArgumentError) {
const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
ASSERT_THAT(tmp_saved_model_dir, IsOk());
SavedModelBundle bundle{};
QuantizationConfig config;
const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
&bundle, *tmp_saved_model_dir,
{}, config,
nullptr, {});
EXPECT_THAT(
quantized_module_op,
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("`quantization_py_function_lib` should not be nullptr")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
420151f8-c17c-4762-bc14-ceeecde6a1bc | cpp | tensorflow/tensorflow | cuda_version_parser | third_party/xla/xla/stream_executor/cuda/cuda_version_parser.cc | third_party/xla/xla/stream_executor/cuda/cuda_version_parser_test.cc | #include "xla/stream_executor/cuda/cuda_version_parser.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/semantic_version.h"
namespace stream_executor {
absl::StatusOr<SemanticVersion> ParseCudaVersion(int cuda_version) {
if (cuda_version < 0) {
return absl::InvalidArgumentError("Version numbers cannot be negative!");
}
int major = cuda_version / 1000;
int minor = (cuda_version % 1000) / 10;
return SemanticVersion(major, minor, 0);
}
} | #include "xla/stream_executor/cuda/cuda_version_parser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(CudaVersionParserTest, ValidVersion) {
EXPECT_THAT(ParseCudaVersion(12040), IsOkAndHolds(SemanticVersion{12, 4, 0}));
}
TEST(CudaVersionParserTest, LeastSignificantDigitIsIgnored) {
EXPECT_THAT(ParseCudaVersion(12041), IsOkAndHolds(SemanticVersion{12, 4, 0}));
}
TEST(CudaVersionParserTest, NegativeIntegerIsNotAValidVersion) {
EXPECT_THAT(ParseCudaVersion(-42),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_version_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_version_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0edd6ffb-8765-4628-9b68-48bad086abf4 | cpp | google/arolla | codegen_operator | arolla/codegen/expr/codegen_operator.cc | arolla/codegen/expr/codegen_operator_test.cc | #include "arolla/codegen/expr/codegen_operator.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/flags/flag.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/algorithm/control_flow_graph.h"
#include "arolla/codegen/expr/optimizations.h"
#include "arolla/codegen/expr/types.h"
#include "arolla/expr/annotation_utils.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/derived_qtype_cast_operator.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/prepare_expression.h"
#include "arolla/expr/eval/side_output.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qexpr/operator_metadata.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/fast_dynamic_downcast_final.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/map.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
ABSL_FLAG(int64_t, arolla_codegen_min_local_variables_per_lambda, 50,
R"""(
Minimum number of local variables required in order to create lambda.
There are several things to consider for tuning this parameter.
1. maximum depth of braces is limited in C++, so we shouldn't create too
deep structure.
2. C++ compiler can be not good in optimizing too many lambda functions.
3. On other hand smaller number can eliminate stack usage more.
4. It is not clear whenever compiler can successfully reuse stack memory
for several variables with the same type.
)""");
ABSL_FLAG(int64_t, arolla_codegen_max_allowed_inline_depth, 50,
R"""(
Maximim depth in inlining function calls that used only once.
There are several things to consider for tuning this parameter.
1. Inlining may help compiler to optimize better and take advantage of
temporary variables, save stack pressure.
2. Inlining making code slightly more readable.
3. maximum depth of braces is limited in C++, so we shouldn't create too
deep structure.
)""");
namespace arolla::codegen {
namespace codegen_impl {
bool IsInlinableLiteralType(const QType* qtype) {
auto is_primitive_type = [](const QType* type) {
return IsScalarQType(type) && type != GetQType<Text>() &&
type != GetQType<Bytes>();
};
return qtype != nullptr && is_primitive_type(DecayOptionalQType(qtype));
}
}
namespace {
using expr::DecayRegisteredOperator;
using expr::ExprNodePtr;
using expr::ExprNodeType;
using expr::ExprOperatorPtr;
using expr::ExprOperatorSignature;
using expr::HasBackendExprOperatorTag;
using expr::UnnamedExprOperator;
using expr::eval_internal::InternalRootOperator;
using NodeId = AcyclicCFG::NodeId;
class InternalNamedOutputExportOperator final : public UnnamedExprOperator {
public:
explicit InternalNamedOutputExportOperator(int64_t export_id)
: UnnamedExprOperator(
ExprOperatorSignature({{"x"}}),
FingerprintHasher("codegen::InternalNamedOutputExportOperator")
.Combine(export_id)
.Finish()),
export_id_(export_id) {}
absl::StatusOr<QTypePtr> GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const final {
return input_qtypes[0];
}
int64_t ExportId() const { return export_id_; }
private:
int64_t export_id_;
};
std::optional<int64_t> MaybeGetExportId(const ExprNodePtr& node) {
if (auto* export_op =
fast_dynamic_downcast_final<const InternalNamedOutputExportOperator*>(
node->op().get())) {
return export_op->ExportId();
}
return std::nullopt;
}
absl::StatusOr<std::vector<QTypePtr>> DependencyTypes(
const ExprNodePtr& node,
std::function<absl::StatusOr<QTypePtr>(const ExprNodePtr&)>
qtype_from_expr_fn) {
std::vector<QTypePtr> result;
result.reserve(node->node_deps().size());
for (const ExprNodePtr& dep : node->node_deps()) {
ASSIGN_OR_RETURN(result.emplace_back(), qtype_from_expr_fn(dep));
}
return result;
}
absl::StatusOr<std::optional<QExprOperatorMetadata>> GetOperatorMetadata(
const QExprOperatorMetadataRegistry& op_registry, const ExprNodePtr& node,
std::function<absl::StatusOr<QTypePtr>(const ExprNodePtr&)>
qtype_from_expr_fn) {
ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op()));
if (op == InternalRootOperator()) {
return std::nullopt;
}
if (expr::IsQTypeAnnotation(node)) {
return std::nullopt;
}
if (auto export_id_opt = MaybeGetExportId(node); export_id_opt.has_value()) {
return std::nullopt;
}
if (typeid(*op) == typeid(expr::DerivedQTypeUpcastOperator) ||
typeid(*op) == typeid(expr::DerivedQTypeDowncastOperator)) {
return std::nullopt;
}
if (!HasBackendExprOperatorTag(op)) {
return absl::InvalidArgumentError(absl::StrCat(
node->op()->display_name(), " is not a backend ExprOperator"));
}
ASSIGN_OR_RETURN(auto dependency_types,
DependencyTypes(node, qtype_from_expr_fn));
ASSIGN_OR_RETURN(
auto metadata,
op_registry.LookupOperatorMetadata(op->display_name(), dependency_types),
_ << "while processing: " << expr::GetDebugSnippet(node));
return {metadata};
}
absl::StatusOr<std::pair<std::unique_ptr<AcyclicCFG>, std::vector<ExprNodePtr>>>
BuildEvalCfg(const ExprNodePtr& entry_node) {
auto nodes_order = expr::VisitorOrder(entry_node);
std::reverse(nodes_order.begin(), nodes_order.end());
absl::flat_hash_map<Fingerprint, NodeId> node_id;
node_id.reserve(nodes_order.size());
for (const auto& node : nodes_order) {
NodeId id = node_id.size();
node_id[node->fingerprint()] = id;
}
std::vector<std::vector<NodeId>> deps;
deps.reserve(nodes_order.size());
for (const auto& node : nodes_order) {
std::vector<NodeId> cur_deps;
cur_deps.reserve(node->node_deps().size());
for (const auto& dep : node->node_deps()) {
cur_deps.push_back(node_id[dep->fingerprint()]);
}
deps.push_back(std::move(cur_deps));
}
ASSIGN_OR_RETURN(auto graph, AcyclicCFG::Create(std::move(deps)));
return {std::pair{std::move(graph), std::move(nodes_order)}};
}
std::vector<bool> FindInlinableNodes(const AcyclicCFG& graph) {
std::vector<bool> inlinable(graph.num_nodes(), false);
std::vector<size_t> inline_depth(graph.num_nodes(), 0);
for (NodeId node_id = graph.num_nodes() - 1; node_id > 0; --node_id) {
bool used_once = graph.reverse_deps(node_id).size() == 1;
if (used_once) {
size_t max_inline_depth = 0;
for (NodeId dep : graph.deps(node_id)) {
max_inline_depth = std::max(max_inline_depth, inline_depth[dep]);
}
if (max_inline_depth <
absl::GetFlag(FLAGS_arolla_codegen_max_allowed_inline_depth)) {
inlinable[node_id] = true;
inline_depth[node_id] = max_inline_depth + 1;
}
}
}
inlinable[0] = true;
return inlinable;
}
class Codegen {
public:
Codegen(const QExprOperatorMetadataRegistry& op_registry,
const AcyclicCFG& graph, std::vector<ExprNodePtr> exprs,
absl::flat_hash_map<Fingerprint, QTypePtr> node_qtypes,
std::vector<std::string> side_output_names,
bool inputs_are_cheap_to_read)
: op_registry_(op_registry),
graph_(graph),
dominator_tree_(graph_),
exprs_(std::move(exprs)),
node_qtypes_(std::move(node_qtypes)),
side_output_names_(std::move(side_output_names)),
inputs_are_cheap_to_read_(inputs_are_cheap_to_read) {}
absl::StatusOr<OperatorCodegenData> Process() {
std::vector<bool> inlinable = FindInlinableNodes(graph_);
OperatorCodegenData data;
data.side_outputs.reserve(side_output_names_.size());
for (const auto& name : side_output_names_) {
data.side_outputs.emplace_back(name, -1);
}
for (NodeId node_id = graph_.num_nodes() - 1; node_id >= 0; --node_id) {
RETURN_IF_ERROR(ProcessSingleNode(node_id, inlinable[node_id], &data));
}
for (const auto& [name, assignment_id] : data.side_outputs) {
if (assignment_id == -1) {
return absl::InternalError(absl::StrFormat(
"named output `%s` is lost in transformations", name));
}
}
ASSIGN_OR_RETURN(data.functions, SplitOnFunctions(data));
FilterArgumentsAsFunction(data);
LambdifyFunctions(data);
ComputeLocalExprStatus(data);
data.output_id = ToAssignmentId(0);
return data;
}
private:
absl::StatusOr<QTypePtr> QTypeFromExpr(const ExprNodePtr& node) const {
DCHECK(node_qtypes_.contains(node->fingerprint()));
auto qtype = node_qtypes_.at(node->fingerprint());
if (qtype == nullptr) {
return absl::FailedPreconditionError(absl::StrFormat(
"unable to deduce QType for %s", expr::ToDebugString(node)));
}
return qtype;
}
LValueId ToAssignmentId(NodeId node_id) const {
return graph_.num_nodes() - node_id - 1;
}
NodeId ToNodeId(LValueId assignment_id) const {
return graph_.num_nodes() - assignment_id - 1;
}
bool IsLiteralNode(NodeId node_id) const {
return exprs_[node_id]->is_literal();
}
bool IsLeafNode(NodeId node_id) const { return exprs_[node_id]->is_leaf(); }
absl::StatusOr<std::vector<bool>> FindSeparableNodes() const {
int64_t n = graph_.num_nodes();
absl::flat_hash_set<NodeId> global_nodes;
for (int64_t node_id = 0; node_id != n; ++node_id) {
if (IsLiteralNode(node_id) ||
(inputs_are_cheap_to_read_ && IsLeafNode(node_id))) {
global_nodes.insert(node_id);
}
}
ASSIGN_OR_RETURN(auto externalized_graph,
ExternalizeNodes(graph_, dominator_tree_, global_nodes));
auto is_separable = FindVerticesWithEmptyDominanceFrontier(
*externalized_graph, dominator_tree_);
for (NodeId node_id = 0; node_id != n; ++node_id) {
if (IsLiteralNode(node_id) || IsLeafNode(node_id)) {
is_separable[node_id] = false;
}
}
return is_separable;
}
absl::StatusOr<std::vector<Function>> SplitOnFunctions(
OperatorCodegenData& data) const {
int64_t n = graph_.num_nodes();
ASSIGN_OR_RETURN(auto is_separable, FindSeparableNodes());
CHECK(is_separable[0] || IsLiteralNode(0) || IsLeafNode(0))
<< "InternalError: entry node should be always separable";
std::vector<Function> functions;
constexpr int64_t kUndefined = -1;
std::vector<int64_t> function_id(n, kUndefined);
for (NodeId node_id = n - 1; node_id >= 0; --node_id) {
if (is_separable[node_id]) {
function_id[node_id] = functions.size();
Function new_fn;
new_fn.output_id = ToAssignmentId(node_id);
new_fn.is_result_status_or = data.assignments[new_fn.output_id]
.lvalue()
.is_entire_expr_status_or;
functions.push_back(std::move(new_fn));
}
}
CHECK((function_id[0] != kUndefined) || IsLiteralNode(0) || IsLeafNode(0))
<< "InternalError: entry node should be assigned to the function";
for (NodeId node_id = 0; node_id != n; ++node_id) {
for (NodeId dep : graph_.deps(node_id)) {
if (function_id[dep] == kUndefined) {
function_id[dep] = function_id[node_id];
}
}
}
for (NodeId node_id = n - 1; node_id >= 0; --node_id) {
LValueId assignment_id = ToAssignmentId(node_id);
int64_t cur_function_id = function_id[node_id];
if (IsLiteralNode(node_id)) {
continue;
}
if ((inputs_are_cheap_to_read_ || node_id == 0) && IsLeafNode(node_id)) {
continue;
}
if (!is_separable[node_id]) {
functions[cur_function_id].assignment_ids.push_back(assignment_id);
for (NodeId rdep : graph_.reverse_deps(node_id)) {
CHECK_EQ(function_id[rdep], cur_function_id)
<< "InternalError: only separable nodes can be used by other "
"functions";
}
continue;
}
int64_t rdep_function = kUndefined;
for (NodeId rdep : graph_.reverse_deps(node_id)) {
if (function_id[rdep] != cur_function_id) {
if (rdep_function == kUndefined) {
rdep_function = function_id[rdep];
functions[rdep_function].assignment_ids.push_back(assignment_id);
} else {
CHECK_EQ(rdep_function, function_id[rdep])
<< "InternalError: non leaf function node must be used by not "
"more than one other function";
}
}
}
}
return functions;
}
void LambdifyFunctions(OperatorCodegenData& data) const {
for (Function& function : data.functions) {
LambdifyFunction(data, function);
}
}
void ComputeLocalExprStatus(OperatorCodegenData& data) const {
absl::flat_hash_map<LValueId, int64_t> id2lambda;
for (int64_t i = 0; i < data.lambdas.size(); ++i) {
id2lambda.emplace(data.lambdas[i].output_id, i);
}
absl::flat_hash_map<LValueId, int64_t> id2function;
for (int64_t i = 0; i < data.functions.size(); ++i) {
id2function.emplace(data.functions[i].output_id, i);
}
for (LValueId assignment_id = 0; assignment_id != data.assignments.size();
++assignment_id) {
auto& assignment = data.assignments[assignment_id];
bool is_local_expr_status_or =
assignment.rvalue().operator_returns_status_or;
if (id2function.contains(assignment_id)) {
is_local_expr_status_or =
data.functions[id2function[assignment_id]].is_result_status_or;
} else {
std::vector<LValueId> output_assignments =
DependencyArgs(ToNodeId(assignment_id));
for (LValueId dep_id : output_assignments) {
is_local_expr_status_or =
is_local_expr_status_or ||
(data.assignments[dep_id].is_inlinable() &&
data.assignments[dep_id].lvalue().is_local_expr_status_or);
}
if (id2lambda.contains(assignment_id)) {
Function& lambda = data.lambdas[id2lambda[assignment_id]];
for (LValueId assignment_id : lambda.assignment_ids) {
is_local_expr_status_or |= data.assignments[assignment_id]
.lvalue()
.is_local_expr_status_or;
}
lambda.is_result_status_or = is_local_expr_status_or;
}
}
assignment.lvalue().is_local_expr_status_or = is_local_expr_status_or;
}
}
void FilterArgumentsAsFunction(OperatorCodegenData& data) const {
for (Assignment& assignment : data.assignments) {
RValue& rvalue = assignment.rvalue();
if (rvalue.kind != RValueKind::kFunctionCall &&
rvalue.kind != RValueKind::kFunctionWithContextCall) {
continue;
}
if (rvalue.argument_as_function_offsets.empty()) {
continue;
}
auto new_end = std::remove_if(
rvalue.argument_as_function_offsets.begin(),
rvalue.argument_as_function_offsets.end(), [&](int offset) {
const Assignment& cur_assignment =
data.assignments[rvalue.argument_ids[offset]];
return !cur_assignment.is_inlinable() ||
cur_assignment.lvalue().kind == LValueKind::kLiteral;
});
rvalue.argument_as_function_offsets.erase(
new_end, rvalue.argument_as_function_offsets.end());
}
}
bool IsInlinableAsFunctionArgument(LValueId assignment_id,
const OperatorCodegenData& data) const {
auto& cur_assignment = data.assignments[assignment_id];
if (cur_assignment.lvalue().kind == LValueKind::kLiteral) {
return false;
}
if (!cur_assignment.is_inlinable()) {
return false;
}
NodeId dominator_node_id = dominator_tree_.parent(ToNodeId(assignment_id));
LValueId dominator_assignment_id = ToAssignmentId(dominator_node_id);
auto& parent_assignment = data.assignments[dominator_assignment_id];
const std::vector<LValueId>& parent_arg_ids =
parent_assignment.rvalue().argument_ids;
int arg_in_parent_id =
std::find(parent_arg_ids.begin(), parent_arg_ids.end(), assignment_id) -
parent_arg_ids.begin();
const std::vector<int>& argument_as_function_offsets =
parent_assignment.rvalue().argument_as_function_offsets;
return std::count(argument_as_function_offsets.begin(),
argument_as_function_offsets.end(),
arg_in_parent_id) != 0;
}
void LambdifyFunction(OperatorCodegenData& data, Function& function) const {
absl::flat_hash_map<int64_t, std::vector<LValueId>>
lambda_local_assignments;
for (LValueId assignment_id : function.assignment_ids) {
auto& cur_assignment = data.assignments[assignment_id];
NodeId node_id = ToNodeId(assignment_id);
NodeId dominator_node_id = dominator_tree_.parent(node_id);
LValueId dominator_assignment_id = ToAssignmentId(dominator_node_id);
auto cur_lambda_assignments =
std::move(lambda_local_assignments[assignment_id]);
auto& dominator_assignments =
lambda_local_assignments[dominator_assignment_id];
bool enough_assignments_for_lambda =
cur_lambda_assignments.size() >
absl::GetFlag(FLAGS_arolla_codegen_min_local_variables_per_lambda);
bool as_function_argument =
IsInlinableAsFunctionArgument(assignment_id, data);
if (enough_assignments_for_lambda ||
(as_function_argument && !cur_lambda_assignments.empty())) {
data.lambdas.push_back(
Function{.assignment_ids = std::move(cur_lambda_assignments),
.output_id = assignment_id,
.is_result_status_or = false});
cur_assignment.set_inlinable(as_function_argument);
} else {
dominator_assignments.insert(dominator_assignments.end(),
cur_lambda_assignments.begin(),
cur_lambda_assignments.end());
}
if (!cur_assignment.is_inlinable()) {
dominator_assignments.push_back(assignment_id);
}
}
function.assignment_ids =
std::move(lambda_local_assignments[function.output_id]);
}
std::vector<LValueId> DependencyArgs(NodeId node_id) const {
const auto deps = graph_.deps(node_id);
std::vector<LValueId> deps_vector = std::vector(deps.begin(), deps.end());
for (NodeId& id : deps_vector) {
id = ToAssignmentId(id);
}
return deps_vector;
}
auto DependencyTypes(NodeId node_id,
const OperatorCodegenData& out_data) const {
std::vector<QTypePtr> result;
result.reserve(graph_.deps(node_id).size());
for (NodeId dep : DependencyArgs(node_id)) {
result.push_back(out_data.assignments[dep].lvalue().qtype);
}
return result;
}
absl::Status ProcessInternalRootOperator(NodeId node_id, bool inlinable,
OperatorCodegenData* out_data) {
if (node_id != 0) {
return absl::InternalError(
"InternalRootOperator can be only in the first node");
}
const auto& node = exprs_[node_id];
ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node));
std::string type_name = CppTypeName(qtype).value_or("auto");
std::vector<LValueId> output_assignments = DependencyArgs(node_id);
bool is_entire_expr_status_or = false;
for (LValueId dep_id : output_assignments) {
is_entire_expr_status_or =
is_entire_expr_status_or ||
out_data->assignments[dep_id].lvalue().is_entire_expr_status_or;
}
out_data->assignments.push_back(
Assignment{LValue{.type_name = type_name,
.is_entire_expr_status_or = is_entire_expr_status_or,
.qtype = qtype,
.kind = LValueKind::kLocal},
RValue{.kind = RValueKind::kFirst,
.operator_returns_status_or = false,
.code = "",
.argument_ids = output_assignments},
inlinable});
if (output_assignments.size() < 2) {
return absl::InternalError(
absl::StrFormat("InternalRootOperator must have at least 2 arguments"
", found: %d",
output_assignments.size()));
}
return absl::OkStatus();
}
absl::Status ProcessInternalNamedOutputExportOperator(
NodeId node_id, int64_t export_id, bool inlinable,
OperatorCodegenData* out_data) {
const auto& node = exprs_[node_id];
ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node));
std::string type_name = CppTypeName(qtype).value_or("auto");
std::vector<LValueId> output_assignments = DependencyArgs(node_id);
if (output_assignments.size() != 1) {
return absl::InternalError(
"InternalNamedOutputExportOperator must have 1 argument");
}
out_data->assignments.push_back(
Assignment{LValue{.type_name = type_name,
.is_entire_expr_status_or =
out_data->assignments[output_assignments[0]]
.lvalue()
.is_entire_expr_status_or,
.qtype = qtype,
.kind = LValueKind::kLocal},
RValue{.kind = RValueKind::kOutput,
.operator_returns_status_or = false,
.code = std::to_string(export_id),
.argument_ids = output_assignments},
inlinable});
if (export_id < 0 || export_id >= side_output_names_.size()) {
return absl::InternalError(
absl::StrFormat("export_id is out of range: %d", export_id));
}
out_data->side_outputs[export_id].second = ToAssignmentId(node_id);
return absl::OkStatus();
}
absl::Status ProcessDerivedQTypeCastOperator(NodeId node_id, bool inlinable,
OperatorCodegenData* out_data) {
const auto& node = exprs_[node_id];
ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node));
qtype = DecayDerivedQType(qtype);
std::string type_name = CppTypeName(qtype).value_or("auto");
std::vector<LValueId> output_assignments = DependencyArgs(node_id);
if (output_assignments.size() != 1) {
return absl::InternalError(
"DerivedQTypeCastOperator must have 1 argument");
}
out_data->assignments.push_back(
Assignment{LValue{.type_name = type_name,
.is_entire_expr_status_or =
out_data->assignments[output_assignments[0]]
.lvalue()
.is_entire_expr_status_or,
.qtype = qtype,
.kind = LValueKind::kLocal},
RValue{.kind = RValueKind::kFirst,
.operator_returns_status_or = false,
.code = "",
.argument_ids = output_assignments},
inlinable});
return absl::OkStatus();
}
absl::Status ProcessSingleNode(NodeId node_id, bool inlinable,
OperatorCodegenData* out_data) {
const auto& node = exprs_[node_id];
ASSIGN_OR_RETURN(QTypePtr qtype, QTypeFromExpr(node));
std::string type_name = CppTypeName(qtype).value_or("auto");
switch (node->type()) {
case ExprNodeType::kLeaf: {
if (type_name == "auto") {
return absl::FailedPreconditionError(
absl::StrFormat("CppTypeName must be implemented for all inputs. "
"Leaf: %s; QType: %s",
node->leaf_key(), qtype->name()));
}
out_data->inputs[node->leaf_key()] = ToAssignmentId(node_id);
out_data->assignments.push_back(
Assignment{LValue{.type_name = type_name,
.is_entire_expr_status_or = false,
.qtype = qtype,
.kind = LValueKind::kInput},
RValue::CreateInput(),
inputs_are_cheap_to_read_ || inlinable});
return absl::OkStatus();
}
case ExprNodeType::kPlaceholder: {
return absl::FailedPreconditionError(
absl::StrFormat("operator generation doesn't support placeholders: "
"P.%s found",
node->placeholder_key()));
}
case ExprNodeType::kLiteral: {
auto value = node->qvalue();
DCHECK(value);
ASSIGN_OR_RETURN(std::string value_repr, CppLiteralRepr(*value));
out_data->assignments.push_back(
Assignment{LValue{.type_name = type_name,
.is_entire_expr_status_or = false,
.qtype = qtype,
.kind = LValueKind::kLiteral},
RValue::CreateLiteral(value_repr),
codegen_impl::IsInlinableLiteralType(value->GetType())});
return absl::OkStatus();
}
case ExprNodeType::kOperator: {
ASSIGN_OR_RETURN(auto op, DecayRegisteredOperator(node->op()));
if (op == InternalRootOperator()) {
return ProcessInternalRootOperator(node_id, inlinable, out_data);
}
if (auto export_id_opt = MaybeGetExportId(node);
export_id_opt.has_value()) {
return ProcessInternalNamedOutputExportOperator(
node_id, *export_id_opt, inlinable, out_data);
}
if (typeid(*op) == typeid(expr::DerivedQTypeUpcastOperator) ||
typeid(*op) == typeid(expr::DerivedQTypeDowncastOperator)) {
return ProcessDerivedQTypeCastOperator(node_id, inlinable, out_data);
}
if (!HasBackendExprOperatorTag(op)) {
return absl::InvalidArgumentError(absl::StrCat(
node->op()->display_name(), " is not a backend ExprOperator"));
}
std::string type_name = CppTypeName(qtype).value_or("auto");
ASSIGN_OR_RETURN(std::optional<QExprOperatorMetadata> op_metadata,
GetOperatorMetadata(op_registry_, node,
[&](const ExprNodePtr& node) {
return this->QTypeFromExpr(node);
}));
if (!op_metadata.has_value()) {
return absl::InternalError(absl::StrCat(node->op()->display_name(),
" metadata is not found"));
}
const BuildDetails& build_details = op_metadata->build_details;
out_data->headers.insert(build_details.hdrs.begin(),
build_details.hdrs.end());
out_data->deps.insert(build_details.deps.begin(),
build_details.deps.end());
const std::optional<OpClassDetails>& op_class_details =
build_details.op_class_details;
if (!op_class_details.has_value()) {
return absl::FailedPreconditionError(absl::StrFormat(
"codegen doesn't work with operator without OpClassDetails: %s",
op->display_name()));
}
std::vector<LValueId> dependency_args = DependencyArgs(node_id);
bool is_entire_expr_status_or = op_class_details->returns_status_or;
for (LValueId dep_id : dependency_args) {
const Assignment& assignment = out_data->assignments[dep_id];
is_entire_expr_status_or =
is_entire_expr_status_or ||
assignment.lvalue().is_entire_expr_status_or;
}
std::string op_class = build_details.op_class;
RValueKind function_kind = op_class_details->accepts_context
? RValueKind::kFunctionWithContextCall
: RValueKind::kFunctionCall;
out_data->assignments.push_back(Assignment{
LValue{.type_name = type_name,
.is_entire_expr_status_or = is_entire_expr_status_or,
.qtype = qtype,
.kind = LValueKind::kLocal},
RValue{.kind = function_kind,
.operator_returns_status_or =
op_class_details->returns_status_or,
.code = op_class + "{}",
.argument_ids = dependency_args,
.argument_as_function_offsets =
op_class_details->arg_as_function_ids,
.comment = node->op() != nullptr
? std::string(node->op()->display_name())
: ""},
inlinable});
return absl::OkStatus();
}
}
return absl::InternalError(absl::StrFormat("unexpected AstNodeType: %d",
static_cast<int>(node->type())));
}
private:
const QExprOperatorMetadataRegistry& op_registry_;
const AcyclicCFG& graph_;
DominatorTree dominator_tree_;
std::vector<ExprNodePtr> exprs_;
absl::flat_hash_map<Fingerprint, QTypePtr> node_qtypes_;
std::vector<std::string> side_output_names_;
bool inputs_are_cheap_to_read_;
};
absl::StatusOr<ExprNodePtr> AttachExportOperators(
const ExprNodePtr& expr,
const absl::flat_hash_map<Fingerprint, std::vector<int64_t>>&
export_ids_map) {
return PostOrderTraverse(
expr,
[&](const ExprNodePtr& node, absl::Span<const ExprNodePtr* const> visits)
-> absl::StatusOr<ExprNodePtr> {
ASSIGN_OR_RETURN(
auto new_node,
WithNewDependencies(node, DereferenceVisitPointers(visits)));
if (auto it = export_ids_map.find(node->fingerprint());
it != export_ids_map.end()) {
std::vector<int64_t> export_ids = it->second;
std::sort(export_ids.begin(), export_ids.end());
for (int64_t export_id : export_ids) {
ASSIGN_OR_RETURN(
new_node,
expr::CallOp(
std::make_shared<InternalNamedOutputExportOperator>(
export_id),
{new_node}));
}
}
return new_node;
});
}
absl::StatusOr<absl::flat_hash_set<int64_t>> FindUnconditionalExportIds(
const QExprOperatorMetadataRegistry& op_registry, const ExprNodePtr& expr) {
absl::flat_hash_set<int64_t> res;
std::vector<ExprNodePtr> visit_order = expr::VisitorOrder(expr);
if (visit_order.empty()) {
return absl::InternalError("visitor order is empty");
}
absl::flat_hash_set<Fingerprint> unconditional_nodes;
unconditional_nodes.insert(visit_order.back()->fingerprint());
for (int64_t node_id = static_cast<int64_t>(visit_order.size()) - 1;
node_id > 0; --node_id) {
const auto& node = visit_order[node_id];
if (!unconditional_nodes.contains(node->fingerprint()) || !node->is_op()) {
continue;
}
ASSIGN_OR_RETURN(
std::optional<QExprOperatorMetadata> op_metadata,
GetOperatorMetadata(op_registry, node,
[](const auto& node) { return node->qtype(); }));
std::vector<int> arg_as_function_ids;
if (op_metadata.has_value()) {
const BuildDetails& build_details = op_metadata->build_details;
const std::optional<OpClassDetails>& op_class_details =
build_details.op_class_details;
if (!op_class_details.has_value()) {
return absl::FailedPreconditionError(absl::StrFormat(
"codegen doesn't work with operator without OpClassDetails: %s",
node->op()->display_name()));
}
arg_as_function_ids = op_class_details->arg_as_function_ids;
}
for (int64_t arg_id = 0; arg_id != node->node_deps().size(); ++arg_id) {
if (std::count(arg_as_function_ids.begin(), arg_as_function_ids.end(),
arg_id) == 0) {
unconditional_nodes.insert(node->node_deps()[arg_id]->fingerprint());
}
}
}
for (const auto& node : visit_order) {
if (!unconditional_nodes.contains(node->fingerprint())) {
continue;
}
if (auto export_id_opt = MaybeGetExportId(node);
export_id_opt.has_value()) {
res.emplace(*export_id_opt);
}
}
return res;
}
absl::StatusOr<ExprNodePtr> AttachExportOperators(
const QExprOperatorMetadataRegistry& op_registry, ExprNodePtr expr) {
if (ExprOperatorPtr op = expr->op(); op != InternalRootOperator()) {
return absl::InternalError(
"expected InternalRootOperator in AttachExportOperators");
}
if (expr->node_deps().empty()) {
return absl::InternalError(
"empty argument list for InternalRootOperator in "
"AttachExportOperators");
}
auto main_output_expr = expr->node_deps()[0];
auto named_output_exprs =
absl::MakeConstSpan(expr->node_deps()).subspan(1);
absl::flat_hash_map<Fingerprint, std::vector<int64_t>> export_ids;
for (int64_t export_id = 0; export_id != named_output_exprs.size();
++export_id) {
export_ids[named_output_exprs[export_id]->fingerprint()].emplace_back(
export_id);
}
ASSIGN_OR_RETURN(expr, AttachExportOperators(expr, export_ids));
main_output_expr = expr->node_deps()[0];
named_output_exprs =
absl::MakeConstSpan(expr->node_deps()).subspan(1);
ASSIGN_OR_RETURN(absl::flat_hash_set<int64_t> inner_export_ids,
FindUnconditionalExportIds(op_registry, main_output_expr));
for (int64_t export_id = 0; export_id != named_output_exprs.size();
++export_id) {
if (inner_export_ids.contains(export_id)) {
continue;
}
ASSIGN_OR_RETURN(
absl::flat_hash_set<int64_t> new_export_ids,
FindUnconditionalExportIds(op_registry, named_output_exprs[export_id]));
new_export_ids.erase(export_id);
inner_export_ids.insert(new_export_ids.begin(), new_export_ids.end());
}
std::vector<ExprNodePtr> top_output_exprs = {main_output_expr};
for (int64_t export_id = 0; export_id != named_output_exprs.size();
++export_id) {
if (!inner_export_ids.contains(export_id)) {
top_output_exprs.push_back(named_output_exprs[export_id]);
}
}
if (top_output_exprs.size() == 1) {
return top_output_exprs[0];
}
return BindOp(InternalRootOperator(), top_output_exprs, {});
}
struct NodeWithSideOutputNames {
ExprNodePtr node;
std::vector<std::string> side_output_names;
};
absl::StatusOr<NodeWithSideOutputNames> Preprocess(
const QExprOperatorMetadataRegistry& op_registry, const ExprNodePtr& expr) {
ASSIGN_OR_RETURN((auto [stripped_expr, side_outputs]),
ExtractSideOutputs(expr));
ExprNodePtr new_expr = stripped_expr;
std::vector<std::string> side_output_names;
if (!side_outputs.empty()) {
side_output_names.reserve(side_outputs.size());
std::vector<ExprNodePtr> exprs = {new_expr};
exprs.reserve(side_outputs.size() + 1);
for (const auto& name : SortedMapKeys(side_outputs)) {
side_output_names.push_back(name);
exprs.push_back(side_outputs.at(name));
}
ASSIGN_OR_RETURN(new_expr, BindOp(InternalRootOperator(), exprs, {}));
}
ASSIGN_OR_RETURN(
auto optimizer,
GetOptimizer(absl::GetFlag(FLAGS_arolla_codegen_optimizer_name)));
ASSIGN_OR_RETURN(
new_expr,
expr::eval_internal::PrepareExpression(
new_expr, {},
expr::DynamicEvaluationEngineOptions{.optimizer = optimizer}));
if (!side_outputs.empty()) {
ASSIGN_OR_RETURN(new_expr, AttachExportOperators(op_registry, new_expr));
}
return NodeWithSideOutputNames{std::move(new_expr),
std::move(side_output_names)};
}
}
absl::StatusOr<std::string> LValue::QTypeConstruction() const {
return CppQTypeConstruction(qtype);
}
absl::StatusOr<OperatorCodegenData> GenerateOperatorCode(
ExprNodePtr expr, bool inputs_are_cheap_to_read) {
const QExprOperatorMetadataRegistry& op_registry =
QExprOperatorMetadataRegistry::GetInstance();
ASSIGN_OR_RETURN((auto [new_expr, side_output_names]),
Preprocess(op_registry, expr));
absl::flat_hash_map<Fingerprint, QTypePtr> node_qtypes;
ASSIGN_OR_RETURN(new_expr, expr::eval_internal::ExtractQTypesForCompilation(
new_expr, &node_qtypes));
ASSIGN_OR_RETURN(auto graph_exprs, BuildEvalCfg(new_expr));
const auto& [graph, exprs] = graph_exprs;
Codegen codegen(op_registry, *graph, exprs, node_qtypes, side_output_names,
inputs_are_cheap_to_read);
return codegen.Process();
}
} | #include "arolla/codegen/expr/codegen_operator.h"
#include <cstdint>
#include <initializer_list>
#include <set>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/lambda_expr_operator.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
namespace arolla::codegen {
namespace {
using ::arolla::expr::Leaf;
using ::arolla::expr::Literal;
using ::arolla::testing::WithExportAnnotation;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
int64_t MinUnused(std::set<int64_t> used) {
for (int64_t i = 0; i != used.size(); ++i) {
if (used.count(i) == 0) {
return i;
}
}
return used.size();
}
TEST(CodegenTest, IsInlinableLiteralTypeTest) {
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<int>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<float>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<double>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<int64_t>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<uint64_t>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<bool>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetQType<Unit>()));
EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetQType<Bytes>()));
EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetQType<Text>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<int>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<float>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<double>()));
EXPECT_TRUE(
codegen_impl::IsInlinableLiteralType(GetOptionalQType<int64_t>()));
EXPECT_TRUE(
codegen_impl::IsInlinableLiteralType(GetOptionalQType<uint64_t>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<bool>()));
EXPECT_TRUE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<Unit>()));
EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<Bytes>()));
EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetOptionalQType<Text>()));
EXPECT_FALSE(
codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<bool>()));
EXPECT_FALSE(codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<int>()));
EXPECT_FALSE(
codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<float>()));
EXPECT_FALSE(
codegen_impl::IsInlinableLiteralType(GetDenseArrayQType<double>()));
}
TEST(CodegenTest, SmokeTest) {
ASSERT_OK_AND_ASSIGN(
auto expr,
expr::CallOp("math.add",
{expr::CallOp("math.add", {WithQTypeAnnotation(
Leaf("x"), GetQType<float>()),
Literal(1.f)}),
WithQTypeAnnotation(Leaf("y"), GetQType<float>())}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.headers,
ElementsAre(
"arolla/"
"qexpr/operators/math/arithmetic.h"));
EXPECT_THAT(op.deps,
ElementsAre("
"arolla/"
"qexpr/operators/math:lib"));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_TRUE(op.assignments[input_x_id].is_inlinable());
int64_t input_y_id = op.inputs["y"];
EXPECT_THAT(op.assignments[input_y_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_TRUE(op.assignments[input_x_id].is_inlinable());
EXPECT_EQ(op.assignments.size(), 3 + 2 );
int64_t literal_id = MinUnused({input_x_id, input_y_id});
ASSERT_LT(literal_id, op.assignments.size());
EXPECT_THAT(op.assignments[literal_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLiteral}));
EXPECT_THAT(op.assignments[literal_id].rvalue(),
Eq(RValue::CreateLiteral("float{1.}")));
int64_t tmp0_id = MinUnused({input_x_id, input_y_id, literal_id});
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {input_x_id, literal_id}}));
int64_t tmp1_id = 4;
EXPECT_THAT(op.assignments[tmp1_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp1_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {tmp0_id, input_y_id}}));
EXPECT_EQ(op.output_id, tmp1_id);
EXPECT_THAT(op.function_entry_points(),
UnorderedElementsAre(Pair(tmp0_id, 0), Pair(tmp1_id, 1)));
}
TEST(CodegenTest, SmokeWithNonGlobalInputsTest) {
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(
auto expr, expr::CallOp("math.add", {expr::CallOp("math.add", {x, x}),
WithQTypeAnnotation(
Leaf("y"), GetQType<float>())}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, false));
EXPECT_THAT(op.headers,
ElementsAre(
"arolla/"
"qexpr/operators/math/arithmetic.h"));
EXPECT_THAT(op.deps,
ElementsAre("
"arolla/"
"qexpr/operators/math:lib"));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_FALSE(op.assignments[input_x_id].is_inlinable());
int64_t input_y_id = op.inputs["y"];
EXPECT_THAT(op.assignments[input_y_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_TRUE(op.assignments[input_y_id].is_inlinable());
ASSERT_EQ(op.assignments.size(), 2 + 2 );
int64_t tmp0_id = 1;
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {input_x_id, input_x_id}}));
int64_t tmp1_id = 3;
EXPECT_THAT(op.assignments[tmp1_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp1_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {tmp0_id, input_y_id}}));
EXPECT_EQ(op.output_id, tmp1_id);
EXPECT_THAT(op.function_entry_points(),
UnorderedElementsAre(Pair(tmp0_id, 0), Pair(tmp1_id, 1)));
}
TEST(CodegenTest, SmokeWithStatusOrTest) {
ASSERT_OK_AND_ASSIGN(auto x,
WithQTypeAnnotation(Leaf("x"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto y,
WithQTypeAnnotation(Leaf("y"), GetQType<float>()));
ASSERT_OK_AND_ASSIGN(auto floor_div, expr::CallOp("math.floordiv", {x, y}));
ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {floor_div, y}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.headers,
ElementsAre(
"arolla/"
"qexpr/operators/math/arithmetic.h"));
EXPECT_THAT(op.deps,
ElementsAre("
"arolla/"
"qexpr/operators/math:lib"));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.is_local_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
int64_t input_y_id = op.inputs["y"];
EXPECT_THAT(op.assignments[input_y_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.is_local_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_EQ(op.assignments.size(), 2 + 2 );
int64_t tmp0_id = 2;
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = true,
.is_local_expr_status_or = true,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = true,
.code = "::arolla::FloorDivOp{}",
.argument_ids = {input_x_id, input_y_id}}));
int64_t tmp1_id = 3;
ASSERT_LT(tmp1_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp1_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = true,
.is_local_expr_status_or = true,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp1_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {tmp0_id, input_y_id}}));
EXPECT_EQ(op.output_id, tmp1_id);
}
TEST(CodegenTest, SmokeWithContextTest) {
ASSERT_OK_AND_ASSIGN(
auto x, WithQTypeAnnotation(Leaf("x"), GetDenseArrayQType<float>()));
ASSERT_OK_AND_ASSIGN(
auto y, WithQTypeAnnotation(Leaf("y"), GetDenseArrayQType<float>()));
ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {x, y}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.headers,
ElementsAre(
"arolla/"
"dense_array/qtype/types.h",
"arolla/"
"qexpr/operators/dense_array/lifter.h",
"arolla/"
"qexpr/operators/math/arithmetic.h"));
EXPECT_THAT(op.deps,
ElementsAre(
"
"arolla/"
"dense_array/qtype",
"
"arolla/"
"qexpr/operators/dense_array:lib",
"
"arolla/"
"qexpr/operators/math:lib"));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "::arolla::DenseArray<float>",
.is_entire_expr_status_or = false,
.is_local_expr_status_or = false,
.qtype = GetDenseArrayQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
int64_t input_y_id = op.inputs["y"];
EXPECT_THAT(op.assignments[input_y_id].lvalue(),
Eq(LValue{.type_name = "::arolla::DenseArray<float>",
.is_entire_expr_status_or = false,
.is_local_expr_status_or = false,
.qtype = GetDenseArrayQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_EQ(op.assignments.size(), 1 + 2 );
int64_t tmp0_id = 2;
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "::arolla::DenseArray<float>",
.is_entire_expr_status_or = true,
.is_local_expr_status_or = true,
.qtype = GetDenseArrayQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionWithContextCall,
.operator_returns_status_or = true,
.code = "::arolla::DenseArrayLifter<::arolla::AddOp, "
"::arolla::meta::type_list<float, float>, "
"true>{}",
.argument_ids = {input_x_id, input_y_id}}));
EXPECT_EQ(op.output_id, tmp0_id);
}
TEST(CodegenTest, SmokeTestWithExport) {
ASSERT_OK_AND_ASSIGN(
auto expr,
expr::CallOp(
"math.add",
{WithExportAnnotation(
expr::CallOp("math.add",
{WithQTypeAnnotation(Leaf("x"), GetQType<float>()),
Literal(1.f)}),
"output"),
WithQTypeAnnotation(Leaf("y"), GetQType<float>())}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.headers,
ElementsAre(
"arolla/"
"qexpr/operators/math/arithmetic.h"));
EXPECT_THAT(op.deps,
ElementsAre("
"arolla/"
"qexpr/operators/math:lib"));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
int64_t input_y_id = op.inputs["y"];
EXPECT_THAT(op.assignments[input_y_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_EQ(op.assignments.size(), 4 + 2 );
int64_t literal_id = MinUnused({input_x_id, input_y_id});
ASSERT_LT(literal_id, op.assignments.size());
EXPECT_THAT(op.assignments[literal_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLiteral}));
EXPECT_THAT(op.assignments[literal_id].rvalue(),
Eq(RValue::CreateLiteral("float{1.}")));
int64_t tmp0_id = MinUnused({input_x_id, input_y_id, literal_id});
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable())
<< "used for output, but export is inside of the expression";
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {input_x_id, literal_id}}));
int64_t tmp1_id =
MinUnused({input_x_id, input_y_id, literal_id, tmp0_id});
ASSERT_LT(tmp1_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp1_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp1_id].rvalue(),
Eq(RValue{.kind = RValueKind::kOutput,
.operator_returns_status_or = false,
.code = "0",
.argument_ids = {tmp0_id}}));
int64_t tmp2_id = 5;
EXPECT_THAT(op.assignments[tmp2_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp2_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {tmp1_id, input_y_id}}));
EXPECT_EQ(op.output_id, tmp2_id);
EXPECT_THAT(op.side_outputs, ElementsAre(Pair("output", tmp1_id)));
}
TEST(CodegenTest, SmokeTestWithDerivedQTypeDowncast) {
ASSERT_OK_AND_ASSIGN(
auto expr,
expr::CallOp("derived_qtype.downcast",
{Literal(GetWeakFloatQType()),
WithQTypeAnnotation(Leaf("x"), GetQType<double>())}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "double",
.is_entire_expr_status_or = false,
.qtype = GetQType<double>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_EQ(op.assignments.size(), 1 + 1 );
int64_t tmp0_id = MinUnused({input_x_id});
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable())
<< "used for output, but export is inside of the expression";
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "double",
.is_entire_expr_status_or = false,
.qtype = GetQType<double>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFirst,
.operator_returns_status_or = false,
.code = "",
.argument_ids = {input_x_id}}));
EXPECT_EQ(op.output_id, tmp0_id);
}
TEST(CodegenTest, SmokeTestWithExportUnusedForMainOutput) {
ASSERT_OK_AND_ASSIGN(
auto get_first_op,
expr::MakeLambdaOperator(expr::ExprOperatorSignature({{"x"}, {"y"}}),
expr::Placeholder("x")));
ASSERT_OK_AND_ASSIGN(
auto expr,
expr::CallOp(
get_first_op,
{WithExportAnnotation(
WithQTypeAnnotation(Leaf("y"), GetQType<float>()),
"named_main_output"),
WithExportAnnotation(
expr::CallOp("math.add",
{WithQTypeAnnotation(Leaf("x"), GetQType<float>()),
Literal(1.f)}),
"output")}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.headers,
ElementsAre(
"arolla/"
"qexpr/operators/math/arithmetic.h"));
EXPECT_THAT(op.deps,
ElementsAre("
"arolla/"
"qexpr/operators/math:lib"));
EXPECT_THAT(op.inputs, ElementsAre(Pair("x", _), Pair("y", _)));
int64_t input_x_id = op.inputs["x"];
EXPECT_THAT(op.assignments[input_x_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_x_id].rvalue(), Eq(RValue::CreateInput()));
int64_t input_y_id = op.inputs["y"];
EXPECT_THAT(op.assignments[input_y_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kInput}));
EXPECT_THAT(op.assignments[input_y_id].rvalue(), Eq(RValue::CreateInput()));
EXPECT_EQ(op.assignments.size(), 5 + 2 );
int64_t tmp0_id = MinUnused({input_x_id, input_y_id});
ASSERT_LT(tmp0_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp0_id].is_inlinable())
<< "used for output, but export is inside of the expression";
EXPECT_THAT(op.assignments[tmp0_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp0_id].rvalue(),
Eq(RValue{.kind = RValueKind::kOutput,
.operator_returns_status_or = false,
.code = "0",
.argument_ids = {input_y_id}}));
int64_t literal_id = MinUnused({input_x_id, input_y_id, tmp0_id});
ASSERT_LT(literal_id, op.assignments.size());
EXPECT_THAT(op.assignments[literal_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLiteral}));
EXPECT_THAT(op.assignments[literal_id].rvalue(),
Eq(RValue::CreateLiteral("float{1.}")));
int64_t tmp1_id = MinUnused({input_x_id, input_y_id, literal_id, tmp0_id});
ASSERT_LT(tmp1_id, op.assignments.size());
EXPECT_TRUE(op.assignments[tmp1_id].is_inlinable());
EXPECT_THAT(op.assignments[tmp1_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp1_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFunctionCall,
.operator_returns_status_or = false,
.code = "::arolla::AddOp{}",
.argument_ids = {input_x_id, literal_id}}));
int64_t tmp2_id = 5;
EXPECT_THAT(op.assignments[tmp2_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp2_id].rvalue(),
Eq(RValue{.kind = RValueKind::kOutput,
.operator_returns_status_or = false,
.code = "1",
.argument_ids = {tmp1_id}}));
int64_t tmp3_id = 6;
EXPECT_THAT(op.assignments[tmp3_id].lvalue(),
Eq(LValue{.type_name = "float",
.is_entire_expr_status_or = false,
.qtype = GetQType<float>(),
.kind = LValueKind::kLocal}));
EXPECT_THAT(op.assignments[tmp3_id].rvalue(),
Eq(RValue{.kind = RValueKind::kFirst,
.operator_returns_status_or = false,
.code = "",
.argument_ids = {tmp0_id, tmp2_id}}));
EXPECT_EQ(op.output_id, tmp3_id);
EXPECT_THAT(op.side_outputs, ElementsAre(Pair("named_main_output", tmp0_id),
Pair("output", tmp2_id)));
}
TEST(CodegenTest, LambdaAndFunctionSinityTest) {
auto lx = WithQTypeAnnotation(Leaf("x"), GetQType<float>());
auto ly = WithQTypeAnnotation(Leaf("y"), GetQType<float>());
auto x = expr::CallOp("math.add", {lx, ly});
auto y = expr::CallOp("math.subtract", {lx, ly});
auto a = expr::CallOp("math.add", {x, y});
auto b = expr::CallOp("math.subtract", {x, y});
constexpr int64_t kChainLength = 500;
for (int i = 0; i != kChainLength; ++i) {
auto na = expr::CallOp("math.mod", {a, x});
x = a;
a = na;
auto nb = expr::CallOp("math.mod", {b, y});
y = b;
b = nb;
}
ASSERT_OK_AND_ASSIGN(auto expr, expr::CallOp("math.add", {a, b}));
ASSERT_OK_AND_ASSIGN(
OperatorCodegenData op,
GenerateOperatorCode(expr, true));
EXPECT_THAT(op.functions.size(), Eq(3));
for (int i = 0; i != 2; ++i) {
EXPECT_THAT(op.functions[i].assignment_ids, IsEmpty()) << i;
}
EXPECT_THAT(op.functions[2].assignment_ids.size(), Eq(4));
EXPECT_THAT(op.lambdas.size(), Eq(2));
EXPECT_THAT(op.lambdas[0].assignment_ids.size(), Eq(kChainLength - 1));
EXPECT_THAT(op.lambdas[1].assignment_ids.size(), Eq(kChainLength - 1));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/expr/codegen_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/codegen/expr/codegen_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9850da70-eed9-4092-9462-9cdcc5013f48 | cpp | google/quiche | quic_time_accumulator | quiche/quic/core/quic_time_accumulator.h | quiche/quic/core/quic_time_accumulator_test.cc | #ifndef QUICHE_QUIC_CORE_QUIC_TIME_ACCUMULATOR_H_
#define QUICHE_QUIC_CORE_QUIC_TIME_ACCUMULATOR_H_
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/platform/api/quic_export.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
class QUICHE_EXPORT QuicTimeAccumulator {
static constexpr QuicTime NotRunningSentinel() {
return QuicTime::Infinite();
}
public:
bool IsRunning() const { return last_start_time_ != NotRunningSentinel(); }
void Start(QuicTime now) {
QUICHE_DCHECK(!IsRunning());
last_start_time_ = now;
QUICHE_DCHECK(IsRunning());
}
void Stop(QuicTime now) {
QUICHE_DCHECK(IsRunning());
if (now > last_start_time_) {
total_elapsed_ = total_elapsed_ + (now - last_start_time_);
}
last_start_time_ = NotRunningSentinel();
QUICHE_DCHECK(!IsRunning());
}
QuicTime::Delta GetTotalElapsedTime() const { return total_elapsed_; }
QuicTime::Delta GetTotalElapsedTime(QuicTime now) const {
if (!IsRunning()) {
return total_elapsed_;
}
if (now <= last_start_time_) {
return total_elapsed_;
}
return total_elapsed_ + (now - last_start_time_);
}
private:
QuicTime::Delta total_elapsed_ = QuicTime::Delta::Zero();
QuicTime last_start_time_ = NotRunningSentinel();
};
}
#endif | #include "quiche/quic/core/quic_time_accumulator.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/mock_clock.h"
namespace quic {
namespace test {
TEST(QuicTimeAccumulator, DefaultConstruct) {
MockClock clock;
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicTimeAccumulator acc;
EXPECT_FALSE(acc.IsRunning());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
EXPECT_EQ(QuicTime::Delta::Zero(), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::Zero(), acc.GetTotalElapsedTime(clock.Now()));
}
TEST(QuicTimeAccumulator, StartStop) {
MockClock clock;
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
QuicTimeAccumulator acc;
acc.Start(clock.Now());
EXPECT_TRUE(acc.IsRunning());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
acc.Stop(clock.Now());
EXPECT_FALSE(acc.IsRunning());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10),
acc.GetTotalElapsedTime(clock.Now()));
acc.Start(clock.Now());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(15),
acc.GetTotalElapsedTime(clock.Now()));
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20),
acc.GetTotalElapsedTime(clock.Now()));
acc.Stop(clock.Now());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20),
acc.GetTotalElapsedTime(clock.Now()));
}
TEST(QuicTimeAccumulator, ClockStepBackwards) {
MockClock clock;
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
QuicTimeAccumulator acc;
acc.Start(clock.Now());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(-10));
acc.Stop(clock.Now());
EXPECT_EQ(QuicTime::Delta::Zero(), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::Zero(), acc.GetTotalElapsedTime(clock.Now()));
acc.Start(clock.Now());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(50));
acc.Stop(clock.Now());
acc.Start(clock.Now());
clock.AdvanceTime(QuicTime::Delta::FromMilliseconds(-80));
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(50), acc.GetTotalElapsedTime());
EXPECT_EQ(QuicTime::Delta::FromMilliseconds(50),
acc.GetTotalElapsedTime(clock.Now()));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_time_accumulator.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_time_accumulator_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a8637437-dc05-47df-af5a-2291897bf2a2 | cpp | tensorflow/tensorflow | onednn_layer_norm | third_party/xla/xla/service/cpu/onednn_layer_norm.cc | third_party/xla/xla/service/cpu/tests/onednn_layer_norm_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_layer_norm.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#define EIGEN_USE_THREADS
#include "absl/base/dynamic_annotations.h"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::layer_normalization_forward;
using dnnl::memory;
using dnnl::normalization_flags;
using dnnl::prop_kind;
using dnnl::stream;
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnLayerNorm(
void* result, void** args) {
int arg_indx = 1;
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
engine cpu_engine(engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream =
stream(dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnNormConfig ln_config;
ln_config.ParseFromString(config_str);
MemrefInfo layer_minfo(args[arg_indx++]);
MemrefInfo gamma_minfo(args[arg_indx++]);
MemrefInfo beta_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto src_md = layer_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto scaleshift_md = beta_minfo.GetOneDnnMemDesc();
auto src_mem = memory(src_md, cpu_engine, layer_minfo.Data());
auto dst_mem = memory(dst_md, cpu_engine, result_minfo.Data());
auto scale_mem = memory(scaleshift_md, cpu_engine, gamma_minfo.Data());
auto shift_mem = memory(scaleshift_md, cpu_engine, beta_minfo.Data());
float epsilon;
*(reinterpret_cast<int32_t*>(&epsilon)) = ln_config.epsilon_typecast();
auto lnorm_pd = layer_normalization_forward::primitive_desc(
cpu_engine, prop_kind::forward_inference, src_md, dst_md, epsilon,
normalization_flags::use_scale | normalization_flags::use_shift);
auto lnorm_prim = layer_normalization_forward(lnorm_pd);
std::unordered_map<int, memory> ln_args;
ln_args.insert({DNNL_ARG_SRC, src_mem});
ln_args.insert({DNNL_ARG_SCALE, scale_mem});
ln_args.insert({DNNL_ARG_SHIFT, shift_mem});
ln_args.insert({DNNL_ARG_DST, dst_mem});
lnorm_prim.execute(onednn_stream, ln_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace {
class LayerNormTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* onednn_layer_norm_ =
R"(
; CHECK: custom_call_target="__onednn$layernorm",
; CHECK: backend_config={
; CHECK-DAG: "onednn_layer_norm_config":{
; CHECK-DAG: "rescale":"SCALE_AND_SHIFT"
; CHECK-DAG: }
; CHECK: }
)";
std::string common_hlo_region_ =
R"(
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
)";
std::string common_hlo_entry_computation_block_ =
R"(
Arg_0.2 = f32[768]{0} parameter(1), sharding={replicated}
Arg_0.3 = f32[768]{0} parameter(2), sharding={replicated}
convert.290 = f32[84,197,768]{2,1,0} convert(Arg_0.1)
constant.291 = f32[] constant(0)
convert.292 = f32[] convert(constant.291)
reduce.297 = f32[84,197]{1,0} reduce(convert.290, convert.292), dimensions={2}, to_apply=region_add
constant.298 = s32[] constant(768)
convert.299 = f32[] convert(constant.298)
broadcast.300 = f32[84,197]{1,0} broadcast(convert.299), dimensions={}
divide.301 = f32[84,197]{1,0} divide(reduce.297, broadcast.300)
convert.302 = f32[84,197]{1,0} convert(divide.301)
reshape.303 = f32[84,197,1]{2,1,0} reshape(convert.302)
reshape.304 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.305 = f32[84,197,768]{2,1,0} broadcast(reshape.304), dimensions={0,1}
subtract.306 = f32[84,197,768]{2,1,0} subtract(Arg_0.1, broadcast.305)
multiply.307 = f32[84,197,768]{2,1,0} multiply(subtract.306, subtract.306)
convert.308 = f32[84,197,768]{2,1,0} convert(multiply.307)
constant.309 = f32[] constant(0)
convert.310 = f32[] convert(constant.309)
reduce.315 = f32[84,197]{1,0} reduce(convert.308, convert.310), dimensions={2}, to_apply=region_add
constant.316 = s32[] constant(768)
convert.317 = f32[] convert(constant.316)
broadcast.318 = f32[84,197]{1,0} broadcast(convert.317), dimensions={}
divide.319 = f32[84,197]{1,0} divide(reduce.315, broadcast.318)
convert.320 = f32[84,197]{1,0} convert(divide.319)
reshape.321 = f32[84,197,1]{2,1,0} reshape(convert.320)
constant.322 = f32[] constant(1e-12)
broadcast.323 = f32[84,197,1]{2,1,0} broadcast(constant.322), dimensions={}
add.324 = f32[84,197,1]{2,1,0} add(reshape.321, broadcast.323)
rsqrt.325 = f32[84,197,1]{2,1,0} rsqrt(add.324)
reshape.328 = f32[84,197]{1,0} reshape(rsqrt.325)
broadcast.329 = f32[84,197,768]{2,1,0} broadcast(reshape.328), dimensions={0,1}
broadcast.327 = f32[84,197,768]{2,1,0} broadcast(Arg_0.2), dimensions={2}
multiply.330 = f32[84,197,768]{2,1,0} multiply(broadcast.329, broadcast.327)
multiply.331 = f32[84,197,768]{2,1,0} multiply(Arg_0.1, multiply.330)
broadcast.336 = f32[84,197,768]{2,1,0} broadcast(Arg_0.3), dimensions={2}
reshape.332 = f32[84,197]{1,0} reshape(reshape.303)
broadcast.333 = f32[84,197,768]{2,1,0} broadcast(reshape.332), dimensions={0,1}
multiply.334 = f32[84,197,768]{2,1,0} multiply(multiply.330, broadcast.333)
subtract.337 = f32[84,197,768]{2,1,0} subtract(broadcast.336, multiply.334)
)";
};
TEST_F(LayerNormTest, LayerNormTest0_FP32) {
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f32[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f32[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1 = f32[84,197,768]{2,1,0} parameter(0), sharding={replicated}
)" + common_hlo_entry_computation_block_ +
R"(
ROOT add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(bf16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->bf16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = bf16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = bf16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest0_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
std::string layer_norm_module_str =
R"(HloModule layer_norm.test, entry_computation_layout={(f16[84,197,768]{2,1,0}, f32[768]{0}, f32[768]{0})->f16[84,197,768]{2,1,0}})" +
common_hlo_region_ + R"(
ENTRY main {
Arg_0.1.0 = f16[84,197,768]{2,1,0} parameter(0), sharding={replicated}
Arg_0.1 = f32[84,197,768]{2,1,0} convert(Arg_0.1.0)
)" + common_hlo_entry_computation_block_ +
R"(
add.338 = f32[84,197,768]{2,1,0} add(multiply.331, subtract.337)
ROOT convert.339 = f16[84,197,768]{2,1,0} convert(add.338)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2 = f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(Arg_2, broadcast_1)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_2 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1 = f32[2,4] divide(reduce_2, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast_3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(Arg_2, multiply_1)
constant_5 = f16[8]{0} constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_6 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_6)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest2_F16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add_0 = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_2 = f16[2,4,8] parameter(0), sharding={replicated}
convert_0 = f32[2,4,8] convert(Arg_2)
constant_0 = f32[] constant(0)
convert_1 = f32[] convert(constant_0)
reduce_0 = f32[2,4] reduce(convert_0, convert_1), dimensions={2}, to_apply=region_add
constant_1 = s32[] constant(8)
convert_2 = f32[] convert(constant_1)
broadcast_0 = f32[2,4] broadcast(convert_2), dimensions={}
divide_0 = f32[2,4] divide(reduce_0, broadcast_0)
convert_3 = f16[2,4] convert(divide_0)
reshape_0 = f16[2,4,1] reshape(convert_3)
reshape_1 = f16[2,4] reshape(reshape_0)
broadcast_1 = f16[2,4,8] broadcast(reshape_1), dimensions={0,1}
subtract_0 = f16[2,4,8] subtract(broadcast_1, Arg_2)
multiply_0 = f16[2,4,8] multiply(subtract_0, subtract_0)
convert_4 = f32[2,4,8] convert(multiply_0)
constant_2 = f32[] constant(0)
convert_5 = f32[] convert(constant_2)
reduce_1 = f32[2,4] reduce(convert_4, convert_5), dimensions={2}, to_apply=region_add
constant_3 = s32[] constant(8)
convert_6 = f32[] convert(constant_3)
broadcast_2 = f32[2,4] broadcast(convert_6), dimensions={}
divide_1 = f32[2,4] divide(reduce_1, broadcast_2)
convert_7 = f16[2,4] convert(divide_1)
reshape_2 = f16[2,4,1] reshape(convert_7)
rsqrt_0 = f16[2,4,1] rsqrt(reshape_2)
reshape_3 = f16[2,4] reshape(rsqrt_0)
broadcast_3 = f16[2,4,8] broadcast(reshape_3), dimensions={0,1}
constant_4 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_4 = f16[2,4,8] broadcast(constant_4), dimensions={2}
multiply_1 = f16[2,4,8] multiply(broadcast_3, broadcast_4)
multiply_2 = f16[2,4,8] multiply(multiply_1, Arg_2)
constant_5 = f16[8] constant({1,1,1,1,1,1,1,1})
broadcast_5 = f16[2,4,8] broadcast(constant_5), dimensions={2}
reshape_4 = f16[2,4] reshape(reshape_0)
broadcast_6 = f16[2,4,8] broadcast(reshape_4), dimensions={0,1}
multiply_3 = f16[2,4,8] multiply(multiply_1, broadcast_6)
subtract_1 = f16[2,4,8] subtract(broadcast_5, multiply_3)
ROOT add_1 = f16[2,4,8] add(multiply_2, subtract_1)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
TEST_F(LayerNormTest, LayerNormTest1_BF16) {
if (!xla::cpu::IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* layer_norm_module_str = R"(
HloModule layer_norm.test
region_add {
Arg_0.7555 = f32[] parameter(0)
Arg_1.7556 = f32[] parameter(1)
ROOT add.7557 = f32[] add(Arg_0.7555, Arg_1.7556)
}
ENTRY main {
Arg_0.1 = bf16[160,197,768] parameter(0), sharding={replicated}
Arg_0.2 = bf16[768] parameter(1), sharding={replicated}
Arg_0.3 = bf16[768] parameter(2), sharding={replicated}
convert.80 = f32[160,197,768] convert(Arg_0.1)
constant.81 = f32[] constant(0)
convert.82 = f32[] convert(constant.81)
reduce.87 = f32[160,197] reduce(convert.80, convert.82), dimensions={2}, to_apply=region_add
constant.88 = s32[] constant(768)
convert.89 = f32[] convert(constant.88)
broadcast.90 = f32[160,197] broadcast(convert.89), dimensions={}
divide.91 = f32[160,197] divide(reduce.87, broadcast.90)
convert.92 = bf16[160,197] convert(divide.91)
reshape.93 = bf16[160,197,1] reshape(convert.92)
reshape.94 = bf16[160,197] reshape(reshape.93)
broadcast.95 = bf16[160,197,768] broadcast(reshape.94), dimensions={0,1}
subtract.96 = bf16[160,197,768] subtract(Arg_0.1, broadcast.95)
multiply.97 = bf16[160,197,768] multiply(subtract.96, subtract.96)
convert.98 = f32[160,197,768] convert(multiply.97)
constant.99 = f32[] constant(0)
convert.100 = f32[] convert(constant.99)
reduce.105 = f32[160,197] reduce(convert.98, convert.100), dimensions={2}, to_apply=region_add
constant.106 = s32[] constant(768)
convert.107 = f32[] convert(constant.106)
broadcast.108 = f32[160,197] broadcast(convert.107), dimensions={}
divide.109 = f32[160,197] divide(reduce.105, broadcast.108)
convert.110 = bf16[160,197] convert(divide.109)
reshape.111 = bf16[160,197,1] reshape(convert.110)
constant.112 = bf16[] constant(1.002e-12)
broadcast.113 = bf16[160,197,1] broadcast(constant.112), dimensions={}
add.114 = bf16[160,197,1] add(reshape.111, broadcast.113)
rsqrt.115 = bf16[160,197,1] rsqrt(add.114)
reshape.118 = bf16[160,197] reshape(rsqrt.115)
broadcast.119 = bf16[160,197,768] broadcast(reshape.118), dimensions={0,1}
broadcast.117 = bf16[160,197,768] broadcast(Arg_0.2), dimensions={2}
multiply.120 = bf16[160,197,768] multiply(broadcast.119, broadcast.117)
multiply.121 = bf16[160,197,768] multiply(Arg_0.1, multiply.120)
broadcast.126 = bf16[160,197,768] broadcast(Arg_0.3), dimensions={2}
reshape.122 = bf16[160,197] reshape(reshape.93)
broadcast.123 = bf16[160,197,768] broadcast(reshape.122), dimensions={0,1}
multiply.124 = bf16[160,197,768] multiply(multiply.120, broadcast.123)
subtract.127 = bf16[160,197,768] subtract(broadcast.126, multiply.124)
ROOT add.128 = bf16[160,197,768] add(multiply.121, subtract.127)
}
)";
EXPECT_TRUE(RunAndCompare(layer_norm_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(layer_norm_module_str, onednn_layer_norm_);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_layer_norm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_layer_norm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d20b59d0-6eb9-4929-88d1-5a612e569453 | cpp | abseil/abseil-cpp | log_severity | absl/base/log_severity.cc | absl/base/log_severity_test.cc | #include "absl/base/log_severity.h"
#include <ostream>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) {
if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s);
return os << "absl::LogSeverity(" << static_cast<int>(s) << ")";
}
std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) {
switch (s) {
case absl::LogSeverityAtLeast::kInfo:
case absl::LogSeverityAtLeast::kWarning:
case absl::LogSeverityAtLeast::kError:
case absl::LogSeverityAtLeast::kFatal:
return os << ">=" << static_cast<absl::LogSeverity>(s);
case absl::LogSeverityAtLeast::kInfinity:
return os << "INFINITY";
}
return os;
}
std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) {
switch (s) {
case absl::LogSeverityAtMost::kInfo:
case absl::LogSeverityAtMost::kWarning:
case absl::LogSeverityAtMost::kError:
case absl::LogSeverityAtMost::kFatal:
return os << "<=" << static_cast<absl::LogSeverity>(s);
case absl::LogSeverityAtMost::kNegativeInfinity:
return os << "NEGATIVE_INFINITY";
}
return os;
}
ABSL_NAMESPACE_END
} | #include "absl/base/log_severity.h"
#include <cstdint>
#include <ios>
#include <limits>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/flags/internal/flag.h"
#include "absl/flags/marshalling.h"
#include "absl/strings/str_cat.h"
namespace {
using ::testing::Eq;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::TestWithParam;
using ::testing::Values;
template <typename T>
std::string StreamHelper(T value) {
std::ostringstream stream;
stream << value;
return stream.str();
}
TEST(StreamTest, Works) {
EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(-100)),
Eq("absl::LogSeverity(-100)"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR"));
EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL"));
EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(4)),
Eq("absl::LogSeverity(4)"));
}
static_assert(absl::flags_internal::FlagUseValueAndInitBitStorage<
absl::LogSeverity>::value,
"Flags of type absl::LogSeverity ought to be lock-free.");
using ParseFlagFromOutOfRangeIntegerTest = TestWithParam<int64_t>;
INSTANTIATE_TEST_SUITE_P(
Instantiation, ParseFlagFromOutOfRangeIntegerTest,
Values(static_cast<int64_t>(std::numeric_limits<int>::min()) - 1,
static_cast<int64_t>(std::numeric_limits<int>::max()) + 1));
TEST_P(ParseFlagFromOutOfRangeIntegerTest, ReturnsError) {
const std::string to_parse = absl::StrCat(GetParam());
absl::LogSeverity value;
std::string error;
EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value;
}
using ParseFlagFromAlmostOutOfRangeIntegerTest = TestWithParam<int>;
INSTANTIATE_TEST_SUITE_P(Instantiation,
ParseFlagFromAlmostOutOfRangeIntegerTest,
Values(std::numeric_limits<int>::min(),
std::numeric_limits<int>::max()));
TEST_P(ParseFlagFromAlmostOutOfRangeIntegerTest, YieldsExpectedValue) {
const auto expected = static_cast<absl::LogSeverity>(GetParam());
const std::string to_parse = absl::StrCat(GetParam());
absl::LogSeverity value;
std::string error;
ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
EXPECT_THAT(value, Eq(expected));
}
using ParseFlagFromIntegerMatchingEnumeratorTest =
TestWithParam<std::tuple<absl::string_view, absl::LogSeverity>>;
INSTANTIATE_TEST_SUITE_P(
Instantiation, ParseFlagFromIntegerMatchingEnumeratorTest,
Values(std::make_tuple("0", absl::LogSeverity::kInfo),
std::make_tuple(" 0", absl::LogSeverity::kInfo),
std::make_tuple("-0", absl::LogSeverity::kInfo),
std::make_tuple("+0", absl::LogSeverity::kInfo),
std::make_tuple("00", absl::LogSeverity::kInfo),
std::make_tuple("0 ", absl::LogSeverity::kInfo),
std::make_tuple("0x0", absl::LogSeverity::kInfo),
std::make_tuple("1", absl::LogSeverity::kWarning),
std::make_tuple("+1", absl::LogSeverity::kWarning),
std::make_tuple("2", absl::LogSeverity::kError),
std::make_tuple("3", absl::LogSeverity::kFatal)));
TEST_P(ParseFlagFromIntegerMatchingEnumeratorTest, YieldsExpectedValue) {
const absl::string_view to_parse = std::get<0>(GetParam());
const absl::LogSeverity expected = std::get<1>(GetParam());
absl::LogSeverity value;
std::string error;
ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
EXPECT_THAT(value, Eq(expected));
}
using ParseFlagFromOtherIntegerTest =
TestWithParam<std::tuple<absl::string_view, int>>;
INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromOtherIntegerTest,
Values(std::make_tuple("-1", -1),
std::make_tuple("4", 4),
std::make_tuple("010", 10),
std::make_tuple("0x10", 16)));
TEST_P(ParseFlagFromOtherIntegerTest, YieldsExpectedValue) {
const absl::string_view to_parse = std::get<0>(GetParam());
const auto expected = static_cast<absl::LogSeverity>(std::get<1>(GetParam()));
absl::LogSeverity value;
std::string error;
ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
EXPECT_THAT(value, Eq(expected));
}
using ParseFlagFromEnumeratorTest =
TestWithParam<std::tuple<absl::string_view, absl::LogSeverity>>;
INSTANTIATE_TEST_SUITE_P(
Instantiation, ParseFlagFromEnumeratorTest,
Values(std::make_tuple("INFO", absl::LogSeverity::kInfo),
std::make_tuple("info", absl::LogSeverity::kInfo),
std::make_tuple("kInfo", absl::LogSeverity::kInfo),
std::make_tuple("iNfO", absl::LogSeverity::kInfo),
std::make_tuple("kInFo", absl::LogSeverity::kInfo),
std::make_tuple("WARNING", absl::LogSeverity::kWarning),
std::make_tuple("warning", absl::LogSeverity::kWarning),
std::make_tuple("kWarning", absl::LogSeverity::kWarning),
std::make_tuple("WaRnInG", absl::LogSeverity::kWarning),
std::make_tuple("KwArNiNg", absl::LogSeverity::kWarning),
std::make_tuple("ERROR", absl::LogSeverity::kError),
std::make_tuple("error", absl::LogSeverity::kError),
std::make_tuple("kError", absl::LogSeverity::kError),
std::make_tuple("eRrOr", absl::LogSeverity::kError),
std::make_tuple("kErRoR", absl::LogSeverity::kError),
std::make_tuple("FATAL", absl::LogSeverity::kFatal),
std::make_tuple("fatal", absl::LogSeverity::kFatal),
std::make_tuple("kFatal", absl::LogSeverity::kFatal),
std::make_tuple("FaTaL", absl::LogSeverity::kFatal),
std::make_tuple("KfAtAl", absl::LogSeverity::kFatal),
std::make_tuple("DFATAL", absl::kLogDebugFatal),
std::make_tuple("dfatal", absl::kLogDebugFatal),
std::make_tuple("kLogDebugFatal", absl::kLogDebugFatal),
std::make_tuple("dFaTaL", absl::kLogDebugFatal),
std::make_tuple("kLoGdEbUgFaTaL", absl::kLogDebugFatal)));
TEST_P(ParseFlagFromEnumeratorTest, YieldsExpectedValue) {
const absl::string_view to_parse = std::get<0>(GetParam());
const absl::LogSeverity expected = std::get<1>(GetParam());
absl::LogSeverity value;
std::string error;
ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
EXPECT_THAT(value, Eq(expected));
}
using ParseFlagFromGarbageTest = TestWithParam<absl::string_view>;
INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromGarbageTest,
Values("", "\0", " ", "garbage", "kkinfo", "I",
"kDFATAL", "LogDebugFatal", "lOgDeBuGfAtAl"));
TEST_P(ParseFlagFromGarbageTest, ReturnsError) {
const absl::string_view to_parse = GetParam();
absl::LogSeverity value;
std::string error;
EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value;
}
using UnparseFlagToEnumeratorTest =
TestWithParam<std::tuple<absl::LogSeverity, absl::string_view>>;
INSTANTIATE_TEST_SUITE_P(
Instantiation, UnparseFlagToEnumeratorTest,
Values(std::make_tuple(absl::LogSeverity::kInfo, "INFO"),
std::make_tuple(absl::LogSeverity::kWarning, "WARNING"),
std::make_tuple(absl::LogSeverity::kError, "ERROR"),
std::make_tuple(absl::LogSeverity::kFatal, "FATAL")));
TEST_P(UnparseFlagToEnumeratorTest, ReturnsExpectedValueAndRoundTrips) {
const absl::LogSeverity to_unparse = std::get<0>(GetParam());
const absl::string_view expected = std::get<1>(GetParam());
const std::string stringified_value = absl::UnparseFlag(to_unparse);
EXPECT_THAT(stringified_value, Eq(expected));
absl::LogSeverity reparsed_value;
std::string error;
EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error),
IsTrue());
EXPECT_THAT(reparsed_value, Eq(to_unparse));
}
using UnparseFlagToOtherIntegerTest = TestWithParam<int>;
INSTANTIATE_TEST_SUITE_P(Instantiation, UnparseFlagToOtherIntegerTest,
Values(std::numeric_limits<int>::min(), -1, 4,
std::numeric_limits<int>::max()));
TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) {
const absl::LogSeverity to_unparse =
static_cast<absl::LogSeverity>(GetParam());
const std::string expected = absl::StrCat(GetParam());
const std::string stringified_value = absl::UnparseFlag(to_unparse);
EXPECT_THAT(stringified_value, Eq(expected));
absl::LogSeverity reparsed_value;
std::string error;
EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error),
IsTrue());
EXPECT_THAT(reparsed_value, Eq(to_unparse));
}
TEST(LogThresholdTest, LogSeverityAtLeastTest) {
EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal);
EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo);
EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError);
EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo);
}
TEST(LogThresholdTest, LogSeverityAtMostTest) {
EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning);
EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal);
EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError);
EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError);
}
TEST(LogThresholdTest, Extremes) {
EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity);
EXPECT_GT(absl::LogSeverity::kInfo,
absl::LogSeverityAtMost::kNegativeInfinity);
}
TEST(LogThresholdTest, Output) {
EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning),
Eq(">=WARNING"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity),
Eq("INFINITY"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL"));
EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity),
Eq("NEGATIVE_INFINITY"));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/log_severity.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/base/log_severity_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
93b256e8-a954-4c10-a627-a30c48df4eab | cpp | tensorflow/tensorflow | model | tensorflow/lite/delegates/gpu/common/model.cc | tensorflow/lite/delegates/gpu/common/model_test.cc | #include "tensorflow/lite/delegates/gpu/common/model.h"
#include <stdint.h>
#include <algorithm>
#include <iterator>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
namespace tflite {
namespace gpu {
std::vector<Node*> GraphFloat32::nodes() const {
return FilterNodes([](const NodeDef&) { return true; });
}
std::vector<Value*> GraphFloat32::values() const {
return FilterValues([](const ValueDef&) { return true; });
}
std::vector<Value*> GraphFloat32::inputs() const {
return FilterValues([](const ValueDef& v) { return v.producer == nullptr; });
}
std::vector<Value*> GraphFloat32::variable_inputs() const {
return FilterValues(
[](const ValueDef& v) { return v.value->tensor.is_variable_input; });
}
std::vector<Value*> GraphFloat32::outputs() const {
std::vector<Value*> values;
std::vector<Value*> values_known_graph_outputs;
values.reserve(values_.size());
values_known_graph_outputs.reserve(values_.size());
for (auto& v : values_) {
auto value_ptr = v.value.get();
if (value_ptr == nullptr) continue;
if (v.consumers.empty()) {
values.push_back(v.value.get());
} else if (std::find(known_graph_outputs_.begin(),
known_graph_outputs_.end(),
value_ptr) != known_graph_outputs_.end()) {
values_known_graph_outputs.push_back(v.value.get());
}
}
values.insert(values.end(), values_known_graph_outputs.begin(),
values_known_graph_outputs.end());
return values;
}
std::vector<Value*> GraphFloat32::FindInputs(NodeId id) const {
if (id >= nodes_.size()) {
return {};
}
return nodes_.at(id).inputs;
}
std::vector<Value*> GraphFloat32::FindOutputs(NodeId id) const {
if (id >= nodes_.size()) {
return {};
}
return nodes_.at(id).outputs;
}
bool GraphFloat32::IsGraphInput(ValueId id) const {
if (id >= values_.size()) {
return false;
}
return values_[id].producer == nullptr;
}
bool GraphFloat32::IsGraphOutput(ValueId id) const {
if (id >= values_.size()) {
return false;
}
if (std::find(known_graph_outputs_.begin(), known_graph_outputs_.end(),
values_[id].value.get()) != known_graph_outputs_.end()) {
return true;
}
return values_[id].consumers.empty();
}
Node* GraphFloat32::FindProducer(ValueId id) const {
if (id >= values_.size()) {
return nullptr;
}
return values_[id].producer;
}
std::vector<Node*> GraphFloat32::FindConsumers(ValueId id) const {
if (id >= values_.size()) {
return {};
}
return values_[id].consumers;
}
Node* GraphFloat32::GetNode(NodeId id) const {
if (id >= nodes_.size()) {
return {};
}
return nodes_.at(id).node.get();
}
Value* GraphFloat32::GetValue(ValueId id) const {
if (id >= values_.size()) {
return nullptr;
}
return values_[id].value.get();
}
Node* GraphFloat32::NewNode() {
const NodeId new_id = nodes_.size();
NodeDef def;
def.node = std::make_unique<Node>(Node{static_cast<NodeId>(new_id), {}});
Node* node = def.node.get();
nodes_[new_id] = std::move(def);
execution_plan_.push_back(new_id);
return node;
}
absl::Status GraphFloat32::InsertNodeAfter(NodeId id, Node** new_node) {
if (id >= nodes_.size()) {
return absl::OutOfRangeError("NodeId is out of range");
}
int idx = 0;
while (idx < execution_plan_.size()) {
if (execution_plan_[idx] == id) break;
++idx;
}
if (idx == execution_plan_.size()) {
return absl::OutOfRangeError("NodeId not in execution plan");
}
const NodeId new_id = nodes_.size();
NodeDef def;
def.node = std::make_unique<Node>(Node{static_cast<NodeId>(new_id), {}});
*new_node = def.node.get();
nodes_[new_id] = std::move(def);
execution_plan_.insert(execution_plan_.begin() + idx + 1, new_id);
return absl::OkStatus();
}
Value* GraphFloat32::NewValue() {
ValueDef def;
def.value =
std::make_unique<Value>(Value{static_cast<ValueId>(values_.size()), {}});
Value* value = def.value.get();
values_.push_back(std::move(def));
return value;
}
absl::Status GraphFloat32::SetProducer(NodeId producer, ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(producer, &n));
Node* node_ptr = n->node.get();
if (node_ptr == v->producer) {
return absl::AlreadyExistsError(absl::StrCat(
"Node ", producer, " is already a producer of the value ", value));
}
if (IsInput(producer, value)) {
return absl::InvalidArgumentError("Node is a consumer of the value");
}
if (v->producer != nullptr) {
Erase(&nodes_[v->producer->id].outputs, value_ptr);
}
v->producer = node_ptr;
n->outputs.push_back(value_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::RemoveProducer(ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
if (v->producer == nullptr) {
return absl::InvalidArgumentError("Value does not have a producer");
}
Erase(&nodes_[v->producer->id].outputs, value_ptr);
v->producer = nullptr;
return absl::OkStatus();
}
absl::Status GraphFloat32::AddConsumer(NodeId consumer, ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(consumer, &n));
Node* node_ptr = n->node.get();
if (node_ptr == v->producer) {
return absl::InvalidArgumentError("Node is a producer of the value");
}
if (IsInput(consumer, value)) {
return absl::AlreadyExistsError(absl::StrCat(
"Node ", consumer, " is already a consumer of the value ", value));
}
n->inputs.push_back(value_ptr);
v->consumers.push_back(node_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::ReplaceInput(NodeId node, ValueId old_value,
ValueId new_value) {
ValueDef* v_old;
RETURN_IF_ERROR(LookupValue(old_value, &v_old));
Value* value_old_ptr = v_old->value.get();
ValueDef* v_new;
RETURN_IF_ERROR(LookupValue(new_value, &v_new));
Value* value_new_ptr = v_new->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(node, &n));
Node* node_ptr = n->node.get();
if (!IsInput(node, old_value)) {
return absl::InvalidArgumentError("old_value must be input of node.");
}
if (IsInput(node, new_value)) {
return absl::InvalidArgumentError("new_value can not be input of node.");
}
if (node_ptr == v_new->producer) {
return absl::InvalidArgumentError("new_value can not be output of node.");
}
for (int i = 0; i < n->inputs.size(); ++i) {
if (n->inputs[i] == value_old_ptr) {
n->inputs[i] = value_new_ptr;
break;
}
}
v_new->consumers.push_back(node_ptr);
Erase(&v_old->consumers, node_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::RemoveConsumer(NodeId consumer, ValueId value) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(value, &v));
Value* value_ptr = v->value.get();
NodeDef* n;
RETURN_IF_ERROR(LookupNode(consumer, &n));
Node* node_ptr = n->node.get();
if (!IsInput(consumer, value)) {
return absl::InvalidArgumentError("Node is not a consumer of the value");
}
Erase(&n->inputs, value_ptr);
Erase(&v->consumers, node_ptr);
return absl::OkStatus();
}
absl::Status GraphFloat32::DeleteNode(NodeId id) {
NodeDef* n;
RETURN_IF_ERROR(LookupNode(id, &n));
Node* node_ptr = n->node.get();
for (auto value : n->inputs) {
Erase(&values_[value->id].consumers, node_ptr);
}
for (auto value : n->outputs) {
values_[value->id].producer = nullptr;
}
n->inputs.clear();
n->outputs.clear();
n->node.reset();
return absl::OkStatus();
}
absl::Status GraphFloat32::DeleteValue(ValueId id) {
ValueDef* v;
RETURN_IF_ERROR(LookupValue(id, &v));
Value* value_ptr = v->value.get();
if (v->producer != nullptr) {
Erase(&nodes_[v->producer->id].outputs, value_ptr);
}
if (!v->consumers.empty()) {
for (auto node : v->consumers) {
Erase(&nodes_[node->id].inputs, value_ptr);
}
}
v->producer = nullptr;
v->consumers.clear();
v->value.reset();
return absl::OkStatus();
}
absl::Status GraphFloat32::MakeExactCopy(GraphFloat32* model) const {
model->nodes_.clear();
model->execution_plan_.clear();
model->values_.clear();
model->known_graph_outputs_.clear();
for (auto& value_def : values_) {
model->values_.push_back({});
if (value_def.value) {
model->values_.back().value = std::make_unique<Value>(*value_def.value);
if (std::find(known_graph_outputs_.begin(), known_graph_outputs_.end(),
value_def.value.get()) != known_graph_outputs_.end()) {
model->known_graph_outputs_.push_back(
model->values_.back().value.get());
}
}
}
for (auto node_id : execution_plan_) {
model->execution_plan_.push_back(node_id);
model->nodes_[node_id] = {};
auto& node_def = nodes_.at(node_id);
if (node_def.node) {
model->nodes_[node_id].node = std::make_unique<Node>(*node_def.node);
}
}
for (auto node_id : execution_plan_) {
auto& node_def = nodes_.at(node_id);
if (node_def.node) {
for (auto output : node_def.outputs) {
RETURN_IF_ERROR(model->SetProducer(node_def.node->id, output->id));
}
for (auto input : node_def.inputs) {
RETURN_IF_ERROR(model->AddConsumer(node_def.node->id, input->id));
}
}
}
return absl::OkStatus();
}
bool GraphFloat32::IsInput(NodeId node, ValueId value) {
if (node >= nodes_.size() || value >= values_.size()) {
return false;
}
const NodeDef& n = nodes_[node];
const ValueDef& v = values_[value];
if (!n.node || !v.value) {
return false;
}
return std::find(n.inputs.begin(), n.inputs.end(), v.value.get()) !=
n.inputs.end();
}
absl::Status GraphFloat32::LookupNode(NodeId id, NodeDef** node_def) {
if (id >= nodes_.size()) {
return absl::OutOfRangeError("NodeId is out of range");
}
auto& n = nodes_[id];
if (!n.node) {
return absl::OutOfRangeError("Node is already deleted");
}
*node_def = &n;
return absl::OkStatus();
}
absl::Status GraphFloat32::LookupValue(ValueId id, ValueDef** value_def) {
if (id >= values_.size()) {
return absl::OutOfRangeError("ValueId is out of range");
}
auto& v = values_[id];
if (!v.value) {
return absl::OutOfRangeError("Value is already deleted");
}
*value_def = &v;
return absl::OkStatus();
}
absl::Status RemovePrecedingNode(GraphFloat32* graph, const Node* to_remove,
const Node* to_keep) {
for (auto output : graph->FindOutputs(to_remove->id)) {
auto consumers = graph->FindConsumers(output->id);
if (consumers.size() > 1 ||
(consumers.size() == 1 && consumers[0] != to_keep)) {
return absl::InvalidArgumentError(
"Output from to_remove node has other consumers");
}
}
for (auto input : graph->FindInputs(to_remove->id)) {
RETURN_IF_ERROR(graph->AddConsumer(to_keep->id, input->id));
}
for (auto output : graph->FindOutputs(to_remove->id)) {
RETURN_IF_ERROR(graph->DeleteValue(output->id));
}
return graph->DeleteNode(to_remove->id);
}
absl::Status RemoveFollowingNode(GraphFloat32* graph, const Node* to_remove,
const Node* to_keep) {
for (auto input : graph->FindInputs(to_remove->id)) {
Node* producer = graph->FindProducer(input->id);
if (producer->id != to_keep->id) {
return absl::InvalidArgumentError("To_remove node has other inputs");
}
}
for (auto input : graph->FindInputs(to_remove->id)) {
RETURN_IF_ERROR(graph->DeleteValue(input->id));
}
for (auto output : graph->FindOutputs(to_remove->id)) {
RETURN_IF_ERROR(graph->SetProducer(to_keep->id, output->id));
}
return graph->DeleteNode(to_remove->id);
}
absl::Status RemoveSimpleNodeKeepInput(GraphFloat32* graph,
const Node* simple_node) {
const auto inputs = graph->FindInputs(simple_node->id);
const auto outputs = graph->FindOutputs(simple_node->id);
if (inputs.size() != 1 || outputs.size() != 1) {
return absl::FailedPreconditionError(
"simple_node node must have 1 input and 1 output");
}
const auto input_id = inputs[0]->id;
const auto output_id = outputs[0]->id;
const Node* producer = graph->FindProducer(input_id);
const auto consumers = graph->FindConsumers(output_id);
RETURN_IF_ERROR(graph->DeleteNode(simple_node->id));
for (auto& consumer : consumers) {
RETURN_IF_ERROR(graph->ReplaceInput(consumer->id, output_id, input_id));
}
RETURN_IF_ERROR(graph->DeleteValue(output_id));
if (!producer && consumers.empty()) {
RETURN_IF_ERROR(graph->DeleteValue(input_id));
}
return absl::OkStatus();
}
absl::Status RemoveSimpleNodeKeepOutput(GraphFloat32* graph,
const Node* simple_node) {
const auto inputs = graph->FindInputs(simple_node->id);
const auto outputs = graph->FindOutputs(simple_node->id);
if (inputs.size() != 1 || outputs.size() != 1) {
return absl::FailedPreconditionError(
"simple_node must have 1 input and 1 output");
}
const auto input_id = inputs[0]->id;
const auto output_id = outputs[0]->id;
const Node* producer = graph->FindProducer(input_id);
const auto input_consumers = graph->FindConsumers(input_id);
if (input_consumers.size() != 1) {
return absl::FailedPreconditionError(
"simple_node should be the only consumer on the node.");
}
RETURN_IF_ERROR(graph->DeleteNode(simple_node->id));
if (producer) {
RETURN_IF_ERROR(graph->RemoveProducer(input_id));
RETURN_IF_ERROR(graph->SetProducer(producer->id, output_id));
}
RETURN_IF_ERROR(graph->DeleteValue(input_id));
const auto output_consumers = graph->FindConsumers(output_id);
if (!producer && output_consumers.empty()) {
RETURN_IF_ERROR(graph->DeleteValue(output_id));
}
return absl::OkStatus();
}
absl::Status AddOutput(GraphFloat32* graph, const Node* from_node,
Value** output) {
auto link = graph->NewValue();
RETURN_IF_ERROR(graph->SetProducer(from_node->id, link->id));
*output = link;
return absl::OkStatus();
}
absl::Status ConnectTwoNodes(GraphFloat32* graph, const Node* from_node,
const Node* to_node, Value** output) {
const Node* output_producer =
*output ? graph->FindProducer((*output)->id) : nullptr;
if (*output && output_producer && output_producer->id != from_node->id) {
return absl::InvalidArgumentError("Wrong output is passed.");
}
if (*output) {
RETURN_IF_ERROR(graph->AddConsumer(to_node->id, (*output)->id));
} else {
Value* link;
RETURN_IF_ERROR(AddOutput(graph, from_node, &link));
RETURN_IF_ERROR(graph->AddConsumer(to_node->id, link->id));
*output = link;
}
return absl::OkStatus();
}
absl::Status CheckBatchSizeForAllValues(const GraphFloat32& model) {
if (model.values().empty()) return absl::OkStatus();
const int32_t b = model.values()[0]->tensor.shape.b;
for (auto value : model.values()) {
if (value->tensor.shape.b != b) {
return absl::InvalidArgumentError(
absl::StrCat("Batch size mismatch, expected ", b, " but got ",
value->tensor.shape.b));
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/lite/delegates/gpu/common/model.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
namespace tflite {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
TEST(Model, SingleNode) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node->id, graph_output->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node));
EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_input, graph_output));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindInputs(node->id), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.FindOutputs(node->id), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindConsumers(graph_input->id), UnorderedElementsAre(node));
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(node));
EXPECT_THAT(graph.FindConsumers(graph_output->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindProducer(graph_input->id), ::testing::Eq(nullptr));
}
TEST(Model, SingleNodeMultipleOutputs) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output1 = graph.NewValue();
Value* graph_output2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node->id, graph_output1->id).ok());
ASSERT_TRUE(graph.SetProducer(node->id, graph_output2->id).ok());
EXPECT_THAT(graph.FindOutputs(node->id),
UnorderedElementsAre(graph_output1, graph_output2));
EXPECT_THAT(graph.FindProducer(graph_output1->id), ::testing::Eq(node));
EXPECT_THAT(graph.FindProducer(graph_output2->id), ::testing::Eq(node));
}
TEST(Model, SetSameConsumer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input->id).ok());
EXPECT_FALSE(graph.AddConsumer(node->id, graph_input->id).ok());
}
TEST(Model, RemoveConsumer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_input1 = graph.NewValue();
Value* graph_input2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input1->id).ok());
ASSERT_TRUE(graph.AddConsumer(node->id, graph_input2->id).ok());
EXPECT_THAT(graph.FindConsumers(graph_input1->id),
UnorderedElementsAre(node));
EXPECT_THAT(graph.FindConsumers(graph_input2->id),
UnorderedElementsAre(node));
EXPECT_THAT(graph.FindInputs(node->id),
UnorderedElementsAre(graph_input1, graph_input2));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre());
ASSERT_TRUE(graph.RemoveConsumer(node->id, graph_input1->id).ok());
EXPECT_THAT(graph.FindConsumers(graph_input1->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindInputs(node->id), UnorderedElementsAre(graph_input2));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_input1));
ASSERT_FALSE(graph.RemoveConsumer(node->id, graph_input1->id).ok());
}
TEST(Model, SetSameProducer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.SetProducer(node->id, graph_output->id).ok());
EXPECT_FALSE(graph.SetProducer(node->id, graph_output->id).ok());
}
TEST(Model, ReplaceInput) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* v0 = graph.NewValue();
Value* v1 = graph.NewValue();
Value* v2 = graph.NewValue();
Value* v3 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, v0->id).ok());
ASSERT_TRUE(graph.AddConsumer(node->id, v1->id).ok());
ASSERT_TRUE(graph.AddConsumer(node->id, v2->id).ok());
EXPECT_THAT(graph.FindInputs(node->id), ElementsAre(v0, v1, v2));
ASSERT_TRUE(graph.ReplaceInput(node->id, v1->id, v3->id).ok());
EXPECT_THAT(graph.FindInputs(node->id), ElementsAre(v0, v3, v2));
}
TEST(Model, RemoveProducer) {
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.SetProducer(node->id, graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre());
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(node));
ASSERT_TRUE(graph.RemoveProducer(graph_output->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(nullptr));
ASSERT_FALSE(graph.RemoveProducer(graph_output->id).ok());
}
class OneNodeModel : public testing::Test {
protected:
void SetUp() override {
node_ = graph_.NewNode();
Value* graph_input = graph_.NewValue();
Value* graph_output = graph_.NewValue();
ASSERT_TRUE(graph_.AddConsumer(node_->id, graph_input->id).ok());
ASSERT_TRUE(graph_.SetProducer(node_->id, graph_output->id).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph_.nodes(), ElementsAre(node_));
}
GraphFloat32 graph_;
Node* node_;
};
TEST_F(OneNodeModel, DeleteNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, node_).ok());
EXPECT_TRUE(graph_.inputs().empty());
EXPECT_TRUE(graph_.outputs().empty());
EXPECT_TRUE(graph_.nodes().empty());
}
TEST_F(OneNodeModel, DeleteNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, node_).ok());
EXPECT_TRUE(graph_.inputs().empty());
EXPECT_TRUE(graph_.outputs().empty());
EXPECT_TRUE(graph_.nodes().empty());
}
class TwoNodesModel : public testing::Test {
protected:
void SetUp() override {
graph_input_ = graph_.NewValue();
first_node_ = graph_.NewNode();
value_ = graph_.NewValue();
second_node_ = graph_.NewNode();
graph_output_ = graph_.NewValue();
ASSERT_TRUE(graph_.AddConsumer(first_node_->id, graph_input_->id).ok());
ASSERT_TRUE(graph_.SetProducer(first_node_->id, value_->id).ok());
ASSERT_TRUE(graph_.AddConsumer(second_node_->id, value_->id).ok());
ASSERT_TRUE(graph_.SetProducer(second_node_->id, graph_output_->id).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_, second_node_));
}
GraphFloat32 graph_;
Node* first_node_;
Node* second_node_;
Value* graph_input_;
Value* value_;
Value* graph_output_;
};
TEST_F(TwoNodesModel, DeleteFirstNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, first_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(second_node_));
}
TEST_F(TwoNodesModel, DeleteFirstNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, first_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(value_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(second_node_));
}
TEST_F(TwoNodesModel, DeleteSecondNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(value_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_));
}
TEST_F(TwoNodesModel, DeleteSecondNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_));
}
class ThreeNodesModel : public testing::Test {
protected:
void SetUp() override {
first_node_ = graph_.NewNode();
second_node_ = graph_.NewNode();
third_node_ = graph_.NewNode();
graph_input_ = graph_.NewValue();
value0_ = graph_.NewValue();
value1_ = graph_.NewValue();
graph_output_ = graph_.NewValue();
ASSERT_TRUE(graph_.AddConsumer(first_node_->id, graph_input_->id).ok());
ASSERT_TRUE(graph_.SetProducer(first_node_->id, value0_->id).ok());
ASSERT_TRUE(graph_.AddConsumer(second_node_->id, value0_->id).ok());
ASSERT_TRUE(graph_.SetProducer(second_node_->id, value1_->id).ok());
ASSERT_TRUE(graph_.AddConsumer(third_node_->id, value1_->id).ok());
ASSERT_TRUE(graph_.SetProducer(third_node_->id, graph_output_->id).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(),
ElementsAre(first_node_, second_node_, third_node_));
}
GraphFloat32 graph_;
Node* first_node_;
Node* second_node_;
Node* third_node_;
Value* graph_input_;
Value* value0_;
Value* value1_;
Value* graph_output_;
};
TEST_F(ThreeNodesModel, DeleteMiddleNodeKeepInput) {
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_, third_node_));
EXPECT_THAT(graph_.values(),
UnorderedElementsAre(graph_input_, value0_, graph_output_));
}
TEST_F(ThreeNodesModel, DeleteMiddleNodeKeepOutput) {
ASSERT_TRUE(RemoveSimpleNodeKeepOutput(&graph_, second_node_).ok());
EXPECT_THAT(graph_.inputs(), UnorderedElementsAre(graph_input_));
EXPECT_THAT(graph_.outputs(), UnorderedElementsAre(graph_output_));
EXPECT_THAT(graph_.nodes(), ElementsAre(first_node_, third_node_));
EXPECT_THAT(graph_.values(),
UnorderedElementsAre(graph_input_, value1_, graph_output_));
}
TEST(Model, RemoveSimpleNodeKeepInputComplexCase) {
GraphFloat32 graph;
Node* n0 = graph.NewNode();
Node* n1 = graph.NewNode();
Node* n2 = graph.NewNode();
Value* v0 = graph.NewValue();
Value* v1 = graph.NewValue();
Value* v2 = graph.NewValue();
Value* v3 = graph.NewValue();
Value* o1 = graph.NewValue();
Value* o2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(n0->id, v0->id).ok());
ASSERT_TRUE(graph.AddConsumer(n0->id, v1->id).ok());
ASSERT_TRUE(graph.SetProducer(n0->id, o1->id).ok());
ASSERT_TRUE(graph.AddConsumer(n1->id, v1->id).ok());
ASSERT_TRUE(graph.SetProducer(n1->id, v2->id).ok());
ASSERT_TRUE(graph.AddConsumer(n2->id, v2->id).ok());
ASSERT_TRUE(graph.AddConsumer(n2->id, v3->id).ok());
ASSERT_TRUE(graph.SetProducer(n2->id, o2->id).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(v0, v1, v3));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(o1, o2));
EXPECT_THAT(graph.nodes(), ElementsAre(n0, n1, n2));
ASSERT_FALSE(RemoveSimpleNodeKeepOutput(&graph, n1).ok());
ASSERT_TRUE(RemoveSimpleNodeKeepInput(&graph, n1).ok());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(v0, v1, v3));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(o1, o2));
EXPECT_THAT(graph.nodes(), ElementsAre(n0, n2));
EXPECT_THAT(graph.values(), UnorderedElementsAre(v0, v1, v3, o1, o2));
EXPECT_THAT(graph.FindInputs(n0->id), ElementsAre(v0, v1));
EXPECT_THAT(graph.FindInputs(n2->id), ElementsAre(v1, v3));
}
TEST(Model, CircularDependency) {
{
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node->id, value->id).ok());
EXPECT_FALSE(graph.SetProducer(node->id, value->id).ok());
}
{
GraphFloat32 graph;
Node* node = graph.NewNode();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.SetProducer(node->id, value->id).ok());
EXPECT_FALSE(graph.AddConsumer(node->id, value->id).ok());
}
}
TEST(Model, ReassignValue) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, graph_output->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.FindInputs(node1->id), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.FindOutputs(node1->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindOutputs(node2->id), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindConsumers(graph_input->id),
UnorderedElementsAre(node1, node2));
EXPECT_THAT(graph.FindProducer(graph_output->id), ::testing::Eq(node2));
EXPECT_THAT(graph.FindConsumers(graph_output->id), UnorderedElementsAre());
}
TEST(Model, DeleteValue) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, value->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
EXPECT_THAT(graph.values(),
UnorderedElementsAre(graph_input, graph_output, value));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(node1));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(value));
EXPECT_THAT(graph.FindOutputs(node1->id), UnorderedElementsAre(value));
ASSERT_TRUE(graph.DeleteValue(value->id).ok());
value = nullptr;
EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_input, graph_output));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindOutputs(node1->id), UnorderedElementsAre());
ASSERT_TRUE(graph.DeleteValue(graph_input->id).ok());
graph_input = nullptr;
EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre());
EXPECT_THAT(graph.FindInputs(node1->id), UnorderedElementsAre());
ASSERT_TRUE(graph.DeleteValue(graph_output->id).ok());
graph_output = nullptr;
EXPECT_THAT(graph.values(), UnorderedElementsAre());
EXPECT_THAT(graph.outputs(), UnorderedElementsAre());
EXPECT_THAT(graph.FindOutputs(node2->id), UnorderedElementsAre());
}
TEST(Model, DeleteNode) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Node* node3 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* graph_output2 = graph.NewValue();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node3->id, value->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
ASSERT_TRUE(graph.SetProducer(node3->id, graph_output2->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2, node3));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(),
UnorderedElementsAre(graph_output, graph_output2));
EXPECT_THAT(graph.FindConsumers(value->id),
UnorderedElementsAre(node2, node3));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(node1));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(value));
EXPECT_THAT(graph.FindInputs(node3->id), UnorderedElementsAre(value));
ASSERT_TRUE(graph.DeleteNode(node3->id).ok());
node3 = nullptr;
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input, graph_output2));
EXPECT_THAT(graph.outputs(),
UnorderedElementsAre(graph_output, graph_output2));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
ASSERT_TRUE(graph.DeleteNode(node1->id).ok());
node1 = nullptr;
EXPECT_THAT(graph.nodes(), ElementsAre(node2));
EXPECT_THAT(graph.inputs(),
UnorderedElementsAre(value, graph_output2, graph_input));
EXPECT_THAT(graph.outputs(),
UnorderedElementsAre(graph_input, graph_output, graph_output2));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(nullptr));
ASSERT_TRUE(graph.DeleteNode(node2->id).ok());
node2 = nullptr;
EXPECT_THAT(graph.nodes(), ElementsAre());
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_output, graph_output2,
graph_input, value));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output, graph_output2,
graph_input, value));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre());
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(nullptr));
}
TEST(Model, InsertNodeAfter) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output = graph.NewValue();
Value* value = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, value->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, value->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output->id).ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output));
EXPECT_THAT(graph.FindConsumers(value->id), UnorderedElementsAre(node2));
EXPECT_THAT(graph.FindProducer(value->id), ::testing::Eq(node1));
EXPECT_THAT(graph.FindInputs(node2->id), UnorderedElementsAre(value));
Node* new_node1;
absl::Status status = graph.InsertNodeAfter(node1->id, &new_node1);
ASSERT_TRUE(status.ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, new_node1, node2));
Node* new_node2;
status = graph.InsertNodeAfter(100, &new_node2);
EXPECT_EQ(status.code(), absl::StatusCode::kOutOfRange);
status = graph.InsertNodeAfter(node2->id, &new_node2);
ASSERT_TRUE(status.ok());
EXPECT_THAT(graph.nodes(), ElementsAre(node1, new_node1, node2, new_node2));
}
TEST(BatchMatchingTest, EmptyGraph) {
GraphFloat32 graph;
EXPECT_TRUE(CheckBatchSizeForAllValues(graph).ok());
}
TEST(BatchMatchingTest, AllMatch) {
GraphFloat32 graph;
Value* a = graph.NewValue();
Value* b = graph.NewValue();
a->tensor.shape = BHWC(1, 1, 1, 1);
b->tensor.shape = BHWC(1, 1, 1, 1);
EXPECT_TRUE(CheckBatchSizeForAllValues(graph).ok());
}
TEST(BatchMatchingTest, NotAllMatch) {
GraphFloat32 graph;
Value* a = graph.NewValue();
Value* b = graph.NewValue();
a->tensor.shape = BHWC(1, 1, 1, 1);
b->tensor.shape = BHWC(2, 1, 1, 1);
EXPECT_EQ(CheckBatchSizeForAllValues(graph).code(),
absl::StatusCode::kInvalidArgument);
}
TEST(Model, KnownGraphOutput) {
GraphFloat32 graph;
Node* node1 = graph.NewNode();
Node* node2 = graph.NewNode();
Value* graph_input = graph.NewValue();
Value* graph_output1 = graph.NewValue();
Value* graph_output2 = graph.NewValue();
ASSERT_TRUE(graph.AddConsumer(node1->id, graph_input->id).ok());
ASSERT_TRUE(graph.SetProducer(node1->id, graph_output1->id).ok());
ASSERT_TRUE(graph.AddConsumer(node2->id, graph_output1->id).ok());
ASSERT_TRUE(graph.SetProducer(node2->id, graph_output2->id).ok());
graph.AddKnownGraphOutput(graph_output1);
graph.AddKnownGraphOutput(graph_output2);
EXPECT_THAT(graph.nodes(), ElementsAre(node1, node2));
EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input));
EXPECT_THAT(graph.outputs(), ElementsAre(graph_output2, graph_output1));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37dadc8c-122b-4df4-a56e-0b8662e07769 | cpp | tensorflow/tensorflow | convolution_transposed_3x3 | tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.cc | tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h"
namespace tflite {
namespace gpu {
ConvolutionTransposed3x3::ConvolutionTransposed3x3(
const OperationDef& definition, const GpuInfo& gpu_info, int2 padding)
: GPUOperation(definition), padding_(padding) {
work_group_size_ = int3(8, 4, 1);
work_group_launch_order_ = int3(2, 0, 1);
if (gpu_info.IsApple()) {
if (gpu_info.apple_info.IsBionic()) {
weights_upload_type_ = WeightsUploadType::GLOBAL_MEM;
} else {
weights_upload_type_ = WeightsUploadType::LOCAL_MEM_BY_THREADS;
}
} else if (gpu_info.IsPowerVR()) {
weights_upload_type_ = WeightsUploadType::LOCAL_MEM_ASYNC;
} else if (gpu_info.IsNvidia() || gpu_info.IsIntel()) {
weights_upload_type_ = WeightsUploadType::LOCAL_MEM_BY_THREADS;
} else if (gpu_info.IsAMD()) {
weights_upload_type_ = WeightsUploadType::CONSTANT_MEM;
} else {
weights_upload_type_ = WeightsUploadType::GLOBAL_MEM;
}
if (gpu_info.IsApple()) {
weights_layout_ = WeightsLayout::kOICustomSpatialO4I4;
} else {
weights_layout_ = WeightsLayout::kOICustomSpatialI4O4;
}
code_ = GenerateConvolutionTransposedCode(gpu_info, definition_,
weights_upload_type_, padding_,
work_group_launch_order_);
if (definition_.precision == CalculationsPrecision::F16 &&
gpu_info.IsPowerVR()) {
compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath);
}
}
std::string ConvolutionTransposed3x3::GenerateConvolutionTransposedCode(
const GpuInfo& gpu_info, const OperationDef& op_def,
ConvolutionTransposed3x3::WeightsUploadType weights_upload_type,
int2 padding, int3 work_group_launch_order) {
auto src_desc = op_def.src_tensors[0];
AddSrcTensor("src_tensor", src_desc);
AddDstTensor("dst_tensor", op_def.src_tensors[0]);
if (op_def.src_tensors.size() == 2) {
BufferDescriptor desc;
desc.element_type = op_def.src_tensors[1].GetDataType();
desc.element_size = 4;
desc.memory_type =
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
AddSrcBuffer("weights", desc);
}
args_.AddInt("filter_offset");
args_.AddInt("padding_x");
args_.AddInt("padding_y");
const bool need_local_mem =
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_BY_THREADS ||
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_ASYNC;
std::string c;
if (GetWeightsDescription().IsI4O4()) {
switch (op_def.precision) {
case CalculationsPrecision::F32:
case CalculationsPrecision::F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += SRC.x * weights_cache[F]; \\\n";
c += " R += SRC.y * weights_cache[F + 1]; \\\n";
c += " R += SRC.z * weights_cache[F + 2]; \\\n";
c += " R += SRC.w * weights_cache[F + 3]; \n";
break;
case CalculationsPrecision::F32_F16:
c += "#define CONV(R, SRC, F) \\\n";
c += " R += TO_ACCUM_TYPE(SRC.x * weights_cache[F] + SRC.y * "
"weights_cache[F + 1] + SRC.z * weights_cache[F + 2] + SRC.w * "
"weights_cache[F + 3]);\n";
break;
}
} else {
c += "#define CONV(R, SRC, F) \\\n";
c += " R.x += dot(SRC, weights_cache[F]); \\\n";
c += " R.y += dot(SRC, weights_cache[F + 1]); \\\n";
c += " R.z += dot(SRC, weights_cache[F + 2]); \\\n";
c += " R.w += dot(SRC, weights_cache[F + 3]); \n";
}
const int wg_total_size =
work_group_size_.x * work_group_size_.y * work_group_size_.z;
const std::string barrier =
wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32()
? "SIMD_LOCAL_MEM_BARRIER"
: "LOCAL_MEM_BARRIER";
const std::string weights_space =
weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM
? "__constant"
: "__global";
if (gpu_info.IsApiOpenCl()) {
c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n";
}
c += "MAIN_FUNCTION($0) {\n";
int3 launch_remap;
launch_remap[work_group_launch_order.x] = 0;
launch_remap[work_group_launch_order.y] = 1;
launch_remap[work_group_launch_order.z] = 2;
auto GetGlobalID = [&](int id) {
std::string result;
const std::string sid = std::to_string(id);
if (work_group_launch_order[id] == id) {
return "GLOBAL_ID_" + sid;
} else {
return "GROUP_ID_" + std::to_string(launch_remap[id]) + " * GROUP_SIZE_" +
sid + " + LOCAL_ID_" + sid;
}
};
if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) {
c += " int linear_id = " + GetGlobalID(0) + ";\n";
c += " int X = linear_id / args.dst_tensor.Batch();\n";
c += " int B = linear_id % args.dst_tensor.Batch();\n";
c += " args.src_tensor.SetBatchRef(B);\n";
c += " args.dst_tensor.SetBatchRef(B);\n";
} else {
c += " int X = " + GetGlobalID(0) + ";\n";
}
c += " int DST_X = X * 2;\n";
c += " int SRC_X = X + args.padding_x;\n";
c += " int Y = " + GetGlobalID(1) + ";\n";
c += " int DST_Y = Y * 2;\n";
c += " int SRC_Y = Y + args.padding_y;\n";
c += " int Z = " + GetGlobalID(2) + ";\n";
if (!need_local_mem) {
c += " if (DST_X >= args.dst_tensor.Width() || DST_Y >= "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;\n";
}
c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n";
c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n";
c += " int f_offset = Z * args.filter_offset;\n";
if (need_local_mem) {
c += " __local FLT4 weights_cache[36];\n";
}
if (weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_BY_THREADS) {
c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n";
}
if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) {
c += " bool in_x0 = SRC_X >= 0 && SRC_X < args.src_tensor.Width();\n";
c += " bool in_x1 = SRC_X + 1 >= 0 && SRC_X + 1 < "
"args.src_tensor.Width();\n";
}
if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) {
c += " bool in_y0 = SRC_Y >= 0 && SRC_Y < args.src_tensor.Height();\n";
c += " bool in_y1 = SRC_Y + 1 >= 0 && SRC_Y + 1 < "
"args.src_tensor.Height();\n";
}
auto generate_check = [&](int x, int y) {
std::string check;
const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT};
const std::vector<std::string> names{"in_x" + std::to_string(x),
"in_y" + std::to_string(y)};
for (int i = 0; i < axes.size(); ++i) {
const auto& axis = axes[i];
if (src_desc.HasAxis(axis) &&
!src_desc.SupportsZeroClamp(axis, gpu_info)) {
if (!check.empty()) {
check += " && ";
}
check += names[i];
}
}
return check;
};
if (src_desc.IsLinear()) {
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
c += " int addr_0 = args.src_tensor.GetAddress(SRC_X, SRC_Y, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(SRC_X + 1, SRC_Y, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(SRC_X, SRC_Y + 1, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(SRC_X+1, SRC_Y+1, 0);\n";
c += " addr_0 = select(-1, addr_0, (in_x0 && in_y0));\n";
c += " addr_1 = select(-1, addr_1, (in_x1 && in_y0));\n";
c += " addr_2 = select(-1, addr_2, (in_x0 && in_y1));\n";
c += " addr_3 = select(-1, addr_3, (in_x1 && in_y1));\n";
c += " int dz_0 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y0));\n";
c += " int dz_1 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y0));\n";
c += " int dz_2 = select(0, args.src_tensor.SliceStride(), (in_x0 && "
"in_y1));\n";
c += " int dz_3 = select(0, args.src_tensor.SliceStride(), (in_x1 && "
"in_y1));\n";
} else {
c += " int xc0 = clamp(SRC_X, 0, args.src_tensor.Width() - 1);\n";
c += " int xc1 = clamp(SRC_X + 1, 0, args.src_tensor.Width() - 1);\n";
c += " int yc0 = clamp(SRC_Y, 0, args.src_tensor.Height() - 1);\n";
c += " int yc1 = clamp(SRC_Y + 1, 0, args.src_tensor.Height() - 1);\n";
c += " int addr_0 = args.src_tensor.GetAddress(xc0, yc0, 0);\n";
c += " int addr_1 = args.src_tensor.GetAddress(xc1, yc0, 0);\n";
c += " int addr_2 = args.src_tensor.GetAddress(xc0, yc1, 0);\n";
c += " int addr_3 = args.src_tensor.GetAddress(xc1, yc1, 0);\n";
c += " int dz = args.src_tensor.SliceStride();\n";
}
}
auto read_src = [&](int x, int y) {
if (src_desc.IsLinear()) {
const std::string id = std::to_string(y * 2 + x);
const std::string addr = "addr_" + std::to_string(y * 2 + x);
if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) {
return "args.src_tensor.Read(" + addr + "); " + addr + " += dz_" + id +
";\n";
} else {
return "args.src_tensor.Read(" + addr + ") * INIT_FLT(in_x" +
std::to_string(x) + " && in_y" + std::to_string(y) + "); " +
addr + " += dz;\n";
}
} else {
std::string check = generate_check(x, y);
if (!check.empty()) {
check = " * INIT_FLT(" + check + ")";
}
return "args.src_tensor.Read(SRC_X + " + std::to_string(x) +
", SRC_Y + " + std::to_string(y) + ", s)" + check + ";\n";
}
};
const int padding_x_rem = abs(padding.x) % 2;
const int padding_y_rem = abs(padding.y) % 2;
std::vector<std::pair<int, int>> permutation;
if (padding_x_rem == 1 && padding_y_rem == 1) {
permutation = {{0, 0}, {1, 0}, {1, 1}, {2, 0}, {2, 2},
{3, 0}, {3, 1}, {3, 2}, {3, 3}};
} else if (padding_x_rem == 0 && padding_y_rem == 1) {
permutation = {{0, 0}, {0, 1}, {1, 1}, {2, 0}, {2, 1},
{2, 2}, {2, 3}, {3, 1}, {3, 3}};
} else if (padding_x_rem == 1 && padding_y_rem == 0) {
permutation = {{0, 0}, {0, 2}, {1, 0}, {1, 1}, {1, 2},
{1, 3}, {2, 2}, {3, 2}, {3, 3}};
} else {
permutation = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 1},
{1, 3}, {2, 2}, {2, 3}, {3, 3}};
}
c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
if (weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_ASYNC) {
c += " async_work_group_copy(weights_cache, "
"args.weights.GetPtr(f_offset), 36, "
"0);\n";
} else if (weights_upload_type ==
ConvolutionTransposed3x3::WeightsUploadType::
LOCAL_MEM_BY_THREADS) {
c += " weights_cache[local_id] = args.weights.Read(f_offset + "
"local_id);\n";
c += " if (local_id < 4) {\n";
c += " weights_cache[local_id + 32] = args.weights.Read(f_offset + "
"local_id + "
"32);\n";
c += " };\n";
} else {
c += " " + weights_space +
" FLT4* weights_cache = args.weights.GetPtr(f_offset);\n";
}
c += " FLT4 src0 = " + read_src(0, 0);
c += " FLT4 src1 = " + read_src(1, 0);
c += " FLT4 src2 = " + read_src(0, 1);
c += " FLT4 src3 = " + read_src(1, 1);
c += " f_offset += 36;\n";
if (need_local_mem) {
c += " " + barrier + ";\n";
}
for (int i = 0; i < 9; ++i) {
const std::string r_name = "r" + std::to_string(permutation[i].first);
const std::string s_name = "src" + std::to_string(permutation[i].second);
const std::string w_name = std::to_string(i * 4);
c += " CONV(" + r_name + ", " + s_name + ", " + w_name + ");\n";
}
c += " }\n";
if (need_local_mem) {
c += " if (DST_X >= args.dst_tensor.Width() || DST_Y >= "
"args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;\n";
}
c += " FLT4 bias_val = args.biases.Read(Z);\n";
for (int y = 0; y < 2; ++y) {
for (int x = 0; x < 2; ++x) {
const std::string s_x = std::to_string(x);
const std::string s_y = std::to_string(y);
const std::string id = std::to_string(y * 2 + x);
const std::string x_c = "DST_X + " + s_x;
const std::string y_c = "DST_Y + " + s_y;
c += " if (" + x_c + " < args.dst_tensor.Width() && " + y_c +
" < args.dst_tensor.Height()) {\n";
c += " FLT4 res0 = TO_FLT4(r" + id + ") + bias_val;\n";
c += " args.dst_tensor.Write(res0, " + x_c + ", " + y_c + ", Z);\n";
c += " }\n";
}
}
c += "}\n";
return c;
}
absl::Status ConvolutionTransposed3x3::BindArguments(ArgumentsBinder* args) {
RETURN_IF_ERROR(args->SetInt("filter_offset", 4 * 9 * src_[0]->Slices()));
const int padding_x =
padding_.x >= 1 ? (padding_.x - 1) / 2 : (padding_.x - 2) / 2;
const int padding_y =
padding_.y >= 1 ? (padding_.y - 1) / 2 : (padding_.y - 2) / 2;
RETURN_IF_ERROR(args->SetInt("padding_x", padding_x));
return args->SetInt("padding_y", padding_y);
}
void ConvolutionTransposed3x3::GetPossibleKernelWorkGroups(
TuningType tuning_type, const GpuInfo& gpu_info,
const KernelInfo& kernel_info, std::vector<int3>* work_groups) const {
if (weights_upload_type_ == WeightsUploadType::LOCAL_MEM_ASYNC ||
weights_upload_type_ == WeightsUploadType::LOCAL_MEM_BY_THREADS) {
work_groups->push_back(work_group_size_);
return;
}
GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_,
work_groups);
}
int3 ConvolutionTransposed3x3::GetGridSize() const {
const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch();
const int grid_y = DivideRoundUp(dst_[0]->Height(), 2);
const int grid_z = dst_[0]->Slices();
return int3(grid_x, grid_y, grid_z);
}
std::vector<int> ConvolutionTransposed3x3::GetSpatialWeightsRemap() const {
const int padding_x_rem = abs(padding_.x) % 2;
const int padding_y_rem = abs(padding_.y) % 2;
std::vector<int> remap;
if (padding_x_rem == 1 && padding_y_rem == 1) {
return std::vector<int>{4, 5, 3, 7, 1, 8, 6, 2, 0};
} else if (padding_x_rem == 0 && padding_y_rem == 1) {
return std::vector<int>{5, 3, 4, 8, 6, 2, 0, 7, 1};
} else if (padding_x_rem == 1 && padding_y_rem == 0) {
return std::vector<int>{7, 1, 8, 6, 2, 0, 4, 5, 3};
} else {
return std::vector<int>{8, 6, 2, 0, 7, 1, 5, 3, 4};
}
}
void ConvolutionTransposed3x3::UploadWeights(
const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights) {
const auto weights_desc = GetWeightsDescription();
const int flt_count =
GetTotalElementsCountForLayout(weights_desc, weights.shape);
BufferDescriptor desc;
desc.element_type = weights_desc.type;
desc.element_size = 4;
desc.memory_type =
weights_upload_type_ ==
ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM
? MemoryType::CONSTANT
: MemoryType::GLOBAL;
desc.size = flt_count * SizeOf(desc.element_type);
desc.data.resize(desc.size);
RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data));
args_.AddObject("weights",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
bool IsConvolutionTransposed3x3Supported(
const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 &&
attr.stride.w == 2 && attr.stride.h == 2;
}
ConvolutionTransposed3x3 CreateConvolutionTransposed3x3(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h);
ConvolutionTransposed3x3 result(definition, gpu_info, padding);
result.UploadWeights(attr.weights);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
ConvolutionTransposed3x3 CreateConvolutionTransposed3x3DynamicWeights(
const GpuInfo& gpu_info, const OperationDef& definition,
const ConvolutionTransposedAttributes& attr) {
OperationDef new_def = definition;
new_def.src_tensors = {
definition.src_tensors[0]};
const DataType weights_type = definition.GetDataType();
new_def.src_tensors.push_back(
{weights_type, TensorStorageType::BUFFER, Layout::HWC});
const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h);
ConvolutionTransposed3x3 result(new_def, gpu_info, padding);
TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor(
gpu_info, definition.src_tensors[0].GetDataType(), attr.bias);
result.args_.AddObject("biases", std::make_unique<TensorDescriptor>(
std::move(bias_tensor_desc)));
return result;
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3) {
auto status = ConvolutionTransposed3x3Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
80d3466f-ab27-4cba-9f89-e18caad37262 | cpp | google/arolla | factory_ops | arolla/qexpr/operators/dense_array/factory_ops.cc | arolla/qexpr/operators/dense_array/factory_ops_test.cc | #include "arolla/qexpr/operators/dense_array/factory_ops.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/derived_qtype.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/unit.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
template <typename T>
class MakeDenseArrayOperator : public QExprOperator {
public:
explicit MakeDenseArrayOperator(size_t tuple_size)
: QExprOperator(QExprOperatorSignature::Get(
std::vector<QTypePtr>(tuple_size, ::arolla::GetQType<T>()),
GetDenseArrayQType<strip_optional_t<T>>())) {}
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const final {
return MakeBoundOperator(
[input_slots =
std::vector<TypedSlot>(input_slots.begin(), input_slots.end()),
output_slot](EvaluationContext* ctx, FramePtr frame) {
DenseArrayBuilder<strip_optional_t<T>> builder(
input_slots.size(), &ctx->buffer_factory());
for (size_t i = 0; i < input_slots.size(); ++i) {
const T& value = frame.Get(input_slots[i].UnsafeToSlot<T>());
if constexpr (is_optional_v<T>) {
if (value.present) {
builder.Add(i, value.value);
}
} else {
builder.Add(i, value);
}
}
frame.Set(output_slot.UnsafeToSlot<DenseArray<strip_optional_t<T>>>(),
std::move(builder).Build());
});
}
};
absl::StatusOr<OperatorPtr> ConstructMakeDenseArrayOperator(QTypePtr value_type,
size_t size) {
#define CONSTRUCT_OPERATOR_IF(t) \
if (value_type == GetQType<t>()) { \
return OperatorPtr(std::make_shared<MakeDenseArrayOperator<t>>(size)); \
}
CONSTRUCT_OPERATOR_IF(OptionalValue<Unit>);
CONSTRUCT_OPERATOR_IF(OptionalValue<bool>);
CONSTRUCT_OPERATOR_IF(OptionalValue<int32_t>);
CONSTRUCT_OPERATOR_IF(OptionalValue<int64_t>);
CONSTRUCT_OPERATOR_IF(OptionalValue<uint64_t>);
CONSTRUCT_OPERATOR_IF(OptionalValue<float>);
CONSTRUCT_OPERATOR_IF(OptionalValue<double>);
CONSTRUCT_OPERATOR_IF(OptionalValue<Bytes>);
CONSTRUCT_OPERATOR_IF(OptionalValue<Text>);
#undef CONSTRUCT_OPERATOR_IF
return absl::UnimplementedError(
absl::StrFormat("array.make_dense_array operator is not implemented "
"for %s arguments",
value_type->name()));
}
}
absl::StatusOr<OperatorPtr> MakeDenseArrayOperatorFamily::DoGetOperator(
absl::Span<const QTypePtr> input_types, QTypePtr output_type) const {
QTypePtr value_qtype = DecayDerivedQType(output_type)->value_qtype();
if (value_qtype == nullptr) {
return absl::InvalidArgumentError(absl::StrFormat(
"unexpected return type for array.make_dense_array operator: %s",
output_type->name()));
}
ASSIGN_OR_RETURN(auto arg_type, ToOptionalQType(value_qtype));
return ConstructMakeDenseArrayOperator(arg_type, input_types.size());
}
} | #include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/buffer.h"
#include "arolla/qexpr/operators.h"
#include "arolla/util/unit.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
TEST(FactoryOpsTest, DenseArrayShapeOfOp) {
EXPECT_THAT(InvokeOperator<DenseArrayShape>("core._array_shape_of",
DenseArray<Unit>{VoidBuffer(3)}),
IsOkAndHolds(DenseArrayShape{3}));
}
TEST(FactoryOpsTest, DenseArrayConstWithShapeOp) {
ASSERT_OK_AND_ASSIGN(auto res, InvokeOperator<DenseArray<int>>(
"core.const_with_shape._array_shape",
DenseArrayShape{3}, 57));
EXPECT_THAT(res, ElementsAre(57, 57, 57));
}
TEST(FactoryOpsTest, ArrayShapeSize_DenseArray) {
EXPECT_THAT(
InvokeOperator<int64_t>("array.array_shape_size", DenseArrayShape{3}),
IsOkAndHolds(Eq(3)));
}
TEST(FactoryOpsTest, ResizeArrayShape_DenseArray) {
EXPECT_THAT(InvokeOperator<DenseArrayShape>("array.resize_array_shape",
DenseArrayShape{3}, int64_t{5}),
IsOkAndHolds(DenseArrayShape{5}));
EXPECT_THAT(InvokeOperator<DenseArrayShape>("array.resize_array_shape",
DenseArrayShape{3}, int64_t{-1}),
StatusIs(absl::StatusCode::kInvalidArgument, "bad size: -1"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/dense_array/factory_ops.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/dense_array/factory_ops_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
aa61fef8-ae7b-4e7f-b284-21e5c94ec56b | cpp | tensorflow/tensorflow | where | tensorflow/lite/kernels/where.cc | tensorflow/lite/kernels/where_test.cc | #include <stdint.h>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace where {
constexpr int kInputConditionTensor = 0;
constexpr int kOutputTensor = 0;
template <typename T>
TfLiteStatus ResizeOutputTensor(TfLiteContext* context,
const TfLiteTensor* cond_tensor,
TfLiteTensor* output_tensor) {
const RuntimeShape& cond_shape = GetTensorShape(cond_tensor);
const int size = cond_shape.FlatSize();
const int cond_rank = cond_shape.DimensionsCount();
const T* cond_data = GetTensorData<T>(cond_tensor);
int true_count = 0;
for (int i = 0; i < size; ++i) {
if (cond_data[i] != T(0)) {
true_count++;
}
}
TfLiteIntArray* output_dims = TfLiteIntArrayCreate(2);
output_dims->data[0] = true_count;
output_dims->data[1] = cond_rank;
return context->ResizeTensor(context, output_tensor, output_dims);
}
template <typename T>
TfLiteStatus PrepareOutput(TfLiteContext* context,
const TfLiteTensor* cond_tensor,
TfLiteTensor* output) {
output->type = kTfLiteInt64;
if (!IsConstantOrPersistentTensor(cond_tensor)) {
SetTensorToDynamic(output);
return kTfLiteOk;
}
return ResizeOutputTensor<T>(context, cond_tensor, output);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* cond_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputConditionTensor,
&cond_tensor));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
switch (cond_tensor->type) {
case kTfLiteBool:
return PrepareOutput<bool>(context, cond_tensor, output);
case kTfLiteFloat32:
return PrepareOutput<float>(context, cond_tensor, output);
case kTfLiteInt64:
return PrepareOutput<int64_t>(context, cond_tensor, output);
case kTfLiteInt32:
return PrepareOutput<int32_t>(context, cond_tensor, output);
case kTfLiteInt8:
return PrepareOutput<int8_t>(context, cond_tensor, output);
case kTfLiteUInt8:
return PrepareOutput<uint8_t>(context, cond_tensor, output);
case kTfLiteUInt32:
return PrepareOutput<uint32_t>(context, cond_tensor, output);
default:
TF_LITE_KERNEL_LOG(context,
"Condition tensor has unsupported type: '%s'.",
TfLiteTypeGetName(cond_tensor->type));
return kTfLiteError;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* cond_tensor;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputConditionTensor,
&cond_tensor));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (IsDynamicTensor(output)) {
switch (cond_tensor->type) {
case kTfLiteBool:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<bool>(context, cond_tensor, output));
break;
case kTfLiteFloat32:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<float>(context, cond_tensor, output));
break;
case kTfLiteInt64:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<int64_t>(context, cond_tensor, output));
break;
case kTfLiteInt32:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<int32_t>(context, cond_tensor, output));
break;
case kTfLiteInt8:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<int8_t>(context, cond_tensor, output));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_OK(
context, ResizeOutputTensor<uint8_t>(context, cond_tensor, output));
break;
case kTfLiteUInt32:
TF_LITE_ENSURE_OK(context, ResizeOutputTensor<uint32_t>(
context, cond_tensor, output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Condition tensor has unsupported type: '%s'.",
TfLiteTypeGetName(cond_tensor->type));
return kTfLiteError;
}
}
TfLiteIntArray* dims = cond_tensor->dims;
if (dims->size == 0) {
TF_LITE_KERNEL_LOG(context, "Where op requires condition w/ rank > 0");
return kTfLiteError;
}
switch (cond_tensor->type) {
case kTfLiteBool:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<bool>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteFloat32:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<float>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteInt64:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<int64_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteInt32:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<int32_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteInt8:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<int8_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteUInt8:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<uint8_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
case kTfLiteUInt32:
reference_ops::SelectTrueCoords(GetTensorShape(cond_tensor),
GetTensorData<uint32_t>(cond_tensor),
GetTensorData<int64_t>(output));
break;
default:
TF_LITE_KERNEL_LOG(context,
"Condition tensor has unsupported type: '%s'.",
TfLiteTypeGetName(cond_tensor->type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_WHERE() {
static TfLiteRegistration r = { nullptr, nullptr,
where::Prepare, where::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
using ::testing::Test;
class BaseWhereOpModel : public SingleOpModel {
public:
BaseWhereOpModel(const TensorData& input, const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_WHERE, BuiltinOptions_WhereOptions,
CreateWhereOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
protected:
int input_;
int output_;
};
class IntegerWhereOpModel : public BaseWhereOpModel {
public:
using BaseWhereOpModel::BaseWhereOpModel;
std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
};
template <typename T1>
class ConstInputWhereOpModel : public SingleOpModel {
public:
ConstInputWhereOpModel(T1 constant_values, const TensorData& output) {
input_ = AddConstInput(GetTensorType<T1>(), {constant_values}, {});
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_WHERE, BuiltinOptions_WhereOptions,
CreateWhereOptions(builder_).Union());
BuildInterpreter({{}});
}
int input() { return input_; }
std::vector<int64_t> GetOutput() { return ExtractVector<int64_t>(output_); }
protected:
int input_;
int output_;
};
template <typename T>
TensorType GetTfLiteType();
template <>
TensorType GetTfLiteType<bool>() {
return TensorType_BOOL;
}
template <>
TensorType GetTfLiteType<float>() {
return TensorType_FLOAT32;
}
template <>
TensorType GetTfLiteType<int8_t>() {
return TensorType_INT8;
}
template <>
TensorType GetTfLiteType<uint8_t>() {
return TensorType_UINT8;
}
template <>
TensorType GetTfLiteType<int32_t>() {
return TensorType_INT32;
}
template <>
TensorType GetTfLiteType<uint32_t>() {
return TensorType_UINT32;
}
template <>
TensorType GetTfLiteType<int64_t>() {
return TensorType_INT64;
}
template <typename T>
std::vector<T> GetCompatibleData(const std::initializer_list<bool>& data) {
std::vector<T> result;
for (auto item : data)
if (item)
result.push_back(T(1));
else
result.push_back(T(0));
return result;
}
template <typename T>
class WhereOpTest : public Test {
public:
using List = std::list<T>;
static T shared_;
T value_;
};
using MyTypes =
::testing::Types<bool, float, int32_t, uint32_t, int64_t, int8_t, uint8_t>;
TYPED_TEST_SUITE(WhereOpTest, MyTypes);
TYPED_TEST(WhereOpTest, ScalarValueFail) {
ConstInputWhereOpModel<bool> m(false, {TensorType_INT64, {}});
EXPECT_EQ(m.Invoke(), kTfLiteError);
}
TYPED_TEST(WhereOpTest, SelectFromVectorNoResult) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({false, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput().size(), 0);
}
TYPED_TEST(WhereOpTest, SelectFromVector) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromMatrixNoResult) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({false, false, false,
false, false, false,
false, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput().size(), 0);
}
TYPED_TEST(WhereOpTest, SelectFromMatrix1) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 1}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0,
2, 0}));
}
TYPED_TEST(WhereOpTest, SelectFromMatrix2) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, true, false,
true, false, false,
true, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0,
0, 1,
1, 0,
2, 0,
2, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromMatrix3) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {3, 5}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(),
GetCompatibleData<TypeParam>({true, false, false, true, true,
false, true, true, false, false,
true, false, true, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0,
0, 3,
0, 4,
1, 1,
1, 2,
2, 0,
2, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromRank3TensorNoResult) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 2, 2}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({false, false, false, false,
false, false, false, false}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_EQ(m.GetOutput().size(), 0);
}
TYPED_TEST(WhereOpTest, SelectFromRank3Tensor1) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 1, 3}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, false, true,
false, false, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0,
0, 0, 2,
1, 0, 2}));
}
TYPED_TEST(WhereOpTest, SelectFromRank3Tensor2) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 2, 2}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(), GetCompatibleData<TypeParam>({true, true, false, true,
false, false, true, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0,
0, 0, 1,
0, 1, 1,
1, 1, 0,
1, 1, 1}));
}
TYPED_TEST(WhereOpTest, SelectFromRank3Tensor3) {
IntegerWhereOpModel m({GetTfLiteType<TypeParam>(), {2, 3, 2}},
{TensorType_INT64, {}});
m.PopulateTensor<TypeParam>(
m.input(),
GetCompatibleData<TypeParam>({true, true, false, true, false, false,
false, false, true, false, true, true}));
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0,
0, 0, 1,
0, 1, 1,
1, 1, 0,
1, 2, 0,
1, 2, 1}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/where.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/where_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4e99a540-5aba-4af7-a26a-89e32aee742d | cpp | google/cel-cpp | proto_util | internal/proto_util.cc | internal/proto_util_test.cc | #include "internal/proto_util.h"
#include "google/protobuf/any.pb.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/struct.pb.h"
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/wrappers.pb.h"
#include "absl/status/status.h"
#include "internal/status_macros.h"
namespace google {
namespace api {
namespace expr {
namespace internal {
absl::Status ValidateStandardMessageTypes(
const google::protobuf::DescriptorPool& descriptor_pool) {
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::Any>(descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::BoolValue>(
descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::BytesValue>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::DoubleValue>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::Duration>(descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::FloatValue>(
descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::Int32Value>(
descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::Int64Value>(
descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::ListValue>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::StringValue>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::Struct>(descriptor_pool));
CEL_RETURN_IF_ERROR(ValidateStandardMessageType<google::protobuf::Timestamp>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::UInt32Value>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::UInt64Value>(
descriptor_pool));
CEL_RETURN_IF_ERROR(
ValidateStandardMessageType<google::protobuf::Value>(descriptor_pool));
return absl::OkStatus();
}
}
}
}
} | #include "internal/proto_util.h"
#include "google/protobuf/duration.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "google/protobuf/descriptor.h"
#include "eval/public/structs/cel_proto_descriptor_pool_builder.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
using google::api::expr::internal::ValidateStandardMessageType;
using google::api::expr::internal::ValidateStandardMessageTypes;
using google::api::expr::runtime::AddStandardMessageTypesToDescriptorPool;
using google::api::expr::runtime::GetStandardMessageTypesFileDescriptorSet;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
TEST(ProtoUtil, ValidateStandardMessageTypesOk) {
google::protobuf::DescriptorPool descriptor_pool;
ASSERT_OK(AddStandardMessageTypesToDescriptorPool(descriptor_pool));
EXPECT_OK(ValidateStandardMessageTypes(descriptor_pool));
}
TEST(ProtoUtil, ValidateStandardMessageTypesRejectsMissing) {
google::protobuf::DescriptorPool descriptor_pool;
EXPECT_THAT(ValidateStandardMessageTypes(descriptor_pool),
StatusIs(absl::StatusCode::kNotFound,
HasSubstr("not found in descriptor pool")));
}
TEST(ProtoUtil, ValidateStandardMessageTypesRejectsIncompatible) {
google::protobuf::DescriptorPool descriptor_pool;
google::protobuf::FileDescriptorSet standard_fds =
GetStandardMessageTypesFileDescriptorSet();
const google::protobuf::Descriptor* descriptor =
google::protobuf::DescriptorPool::generated_pool()->FindMessageTypeByName(
"google.protobuf.Duration");
ASSERT_NE(descriptor, nullptr);
google::protobuf::FileDescriptorProto file_descriptor_proto;
descriptor->file()->CopyTo(&file_descriptor_proto);
google::protobuf::FieldDescriptorProto seconds_desc_proto;
google::protobuf::FieldDescriptorProto nanos_desc_proto;
descriptor->FindFieldByName("seconds")->CopyTo(&seconds_desc_proto);
descriptor->FindFieldByName("nanos")->CopyTo(&nanos_desc_proto);
nanos_desc_proto.set_name("millis");
file_descriptor_proto.mutable_message_type(0)->clear_field();
*file_descriptor_proto.mutable_message_type(0)->add_field() =
seconds_desc_proto;
*file_descriptor_proto.mutable_message_type(0)->add_field() =
nanos_desc_proto;
descriptor_pool.BuildFile(file_descriptor_proto);
EXPECT_THAT(
ValidateStandardMessageType<google::protobuf::Duration>(descriptor_pool),
StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("differs")));
}
TEST(ProtoUtil, ValidateStandardMessageTypesIgnoredJsonName) {
google::protobuf::DescriptorPool descriptor_pool;
google::protobuf::FileDescriptorSet standard_fds =
GetStandardMessageTypesFileDescriptorSet();
bool modified = false;
for (int i = 0; i < standard_fds.file_size(); ++i) {
if (standard_fds.file(i).name() == "google/protobuf/duration.proto") {
google::protobuf::FileDescriptorProto* fdp = standard_fds.mutable_file(i);
for (int j = 0; j < fdp->message_type_size(); ++j) {
if (fdp->message_type(j).name() == "Duration") {
google::protobuf::DescriptorProto* dp = fdp->mutable_message_type(j);
for (int k = 0; k < dp->field_size(); ++k) {
if (dp->field(k).name() == "seconds") {
dp->mutable_field(k)->set_json_name("FOOBAR");
modified = true;
}
}
}
}
}
}
ASSERT_TRUE(modified);
for (int i = 0; i < standard_fds.file_size(); ++i) {
descriptor_pool.BuildFile(standard_fds.file(i));
}
EXPECT_OK(ValidateStandardMessageTypes(descriptor_pool));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_util.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/proto_util_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c2483977-5d43-4a8a-b8d1-d284e3bc2329 | cpp | tensorflow/tensorflow | c_api_opaque_internal | tensorflow/lite/c/c_api_opaque_internal.cc | tensorflow/lite/c/c_api_opaque_internal_test.cc | #include "tensorflow/lite/c/c_api_opaque_internal.h"
#include <memory>
#include <unordered_map>
#include <utility>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/c/operator.h"
#include "tensorflow/lite/core/subgraph.h"
namespace tflite {
namespace internal {
namespace {
TfLiteOperator* MakeOperator(const TfLiteRegistration* registration,
int node_index) {
auto* registration_external = TfLiteOperatorCreate(
static_cast<TfLiteBuiltinOperator>(registration->builtin_code),
registration->custom_name, registration->version,
nullptr);
registration_external->node_index = node_index;
return registration_external;
}
}
TfLiteOperator* CommonOpaqueConversionUtil::CachedObtainOperator(
OperatorsCache* registration_externals_cache,
const TfLiteRegistration* registration, int node_index) {
OpResolver::OpId op_id{registration->builtin_code, registration->custom_name,
registration->version};
auto it = registration_externals_cache->find(op_id);
if (it != registration_externals_cache->end()) {
return it->second.get();
}
auto* registration_external = MakeOperator(registration, node_index);
registration_externals_cache->insert(
it, std::make_pair(op_id, registration_external));
return registration_external;
}
TfLiteOperator* CommonOpaqueConversionUtil::ObtainOperator(
TfLiteContext* context, const TfLiteRegistration* registration,
int node_index) {
auto* subgraph = static_cast<tflite::Subgraph*>(context->impl_);
if (!subgraph->registration_externals_) {
subgraph->registration_externals_ = std::make_shared<OperatorsCache>();
}
return CachedObtainOperator(subgraph->registration_externals_.get(),
registration, node_index);
}
}
} | #include "tensorflow/lite/c/c_api_opaque_internal.h"
#include <memory>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/model_builder.h"
using tflite::FlatBufferModel;
using tflite::Interpreter;
using tflite::InterpreterBuilder;
using tflite::internal::CommonOpaqueConversionUtil;
using tflite::ops::builtin::BuiltinOpResolver;
TEST(ObtainRegistrationFromContext, ProducesValidResult) {
BuiltinOpResolver op_resolver;
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<FlatBufferModel> model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
InterpreterBuilder builder(*model, op_resolver);
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
TfLiteContext* context = interpreter->primary_subgraph().context();
const TfLiteRegistration* registration = tflite::ops::builtin::Register_ADD();
TfLiteOperator* registration_external =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 42);
ASSERT_EQ(registration_external->builtin_code, kTfLiteBuiltinAdd);
ASSERT_EQ(registration_external->version, registration->version);
ASSERT_EQ(registration_external->custom_name, registration->custom_name);
ASSERT_EQ(registration_external->node_index, 42);
}
TEST(ObtainRegistrationFromContext, CachingWorks) {
BuiltinOpResolver op_resolver;
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<FlatBufferModel> model = FlatBufferModel::BuildFromFile(
"tensorflow/lite/testdata/add.bin");
ASSERT_NE(model, nullptr);
InterpreterBuilder builder(*model, op_resolver);
ASSERT_EQ(builder(&interpreter), kTfLiteOk);
ASSERT_NE(interpreter, nullptr);
TfLiteContext* context = interpreter->primary_subgraph().context();
const TfLiteRegistration* registration = tflite::ops::builtin::Register_ADD();
TfLiteOperator* registration_external1 =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 0);
TfLiteOperator* registration_external2 =
CommonOpaqueConversionUtil::ObtainOperator(context, registration, 1);
ASSERT_EQ(registration_external1, registration_external2);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/c/c_api_opaque_internal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/c/c_api_opaque_internal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b2be1087-b57b-4417-9ce3-1d4c196ea137 | cpp | google/tensorstore | file_key_value_store | tensorstore/kvstore/file/file_key_value_store.cc | tensorstore/kvstore/file/file_key_value_store_test.cc | #include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/functional/function_ref.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/batch.h"
#include "tensorstore/context.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/context_binding.h"
#include "tensorstore/internal/file_io_concurrency_resource.h"
#include "tensorstore/internal/flat_cord_builder.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/internal/metrics/counter.h"
#include "tensorstore/internal/metrics/metadata.h"
#include "tensorstore/internal/os/error_code.h"
#include "tensorstore/internal/os/unique_handle.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/batch_util.h"
#include "tensorstore/kvstore/byte_range.h"
#include "tensorstore/kvstore/common_metrics.h"
#include "tensorstore/kvstore/file/file_resource.h"
#include "tensorstore/kvstore/file/util.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/read_result.h"
#include "tensorstore/kvstore/registry.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/supported_features.h"
#include "tensorstore/kvstore/url_registry.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/executor.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/internal/os/file_lister.h"
#include "tensorstore/internal/os/file_util.h"
using ::tensorstore::internal::OsErrorCode;
using ::tensorstore::internal_file_util::IsKeyValid;
using ::tensorstore::internal_file_util::LongestDirectoryPrefix;
using ::tensorstore::internal_os::FileDescriptor;
using ::tensorstore::internal_os::FileInfo;
using ::tensorstore::internal_os::kLockSuffix;
using ::tensorstore::internal_os::UniqueFileDescriptor;
using ::tensorstore::kvstore::ListEntry;
using ::tensorstore::kvstore::ListReceiver;
using ::tensorstore::kvstore::ReadResult;
using ::tensorstore::kvstore::SupportedFeatures;
namespace tensorstore {
namespace internal_file_kvstore {
namespace {
namespace jb = tensorstore::internal_json_binding;
struct FileMetrics : public internal_kvstore::CommonMetrics {
internal_metrics::Counter<int64_t>& open_read;
internal_metrics::Counter<int64_t>& lock_contention;
};
auto file_metrics = []() -> FileMetrics {
return {TENSORSTORE_KVSTORE_COMMON_METRICS(file),
TENSORSTORE_KVSTORE_COUNTER_IMPL(
file, open_read, "Number of times a file is opened for reading"),
TENSORSTORE_KVSTORE_COUNTER_IMPL(file, lock_contention,
" kvstore::Write lock contention")};
}();
ABSL_CONST_INIT internal_log::VerboseFlag file_logging("file");
bool IsFileKvstorePathValid(std::string_view path) {
if (path.empty() || path == "/") return true;
if (path.back() == '/' || path.back() == '\\') {
path.remove_suffix(1);
}
return IsKeyValid(path, kLockSuffix);
}
struct FileKeyValueStoreSpecData {
Context::Resource<internal::FileIoConcurrencyResource> file_io_concurrency;
Context::Resource<FileIoSyncResource> file_io_sync;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(x.file_io_concurrency, x.file_io_sync);
};
constexpr static auto default_json_binder = jb::Object(
jb::Member(
internal::FileIoConcurrencyResource::id,
jb::Projection<&FileKeyValueStoreSpecData::file_io_concurrency>()),
jb::Member(FileIoSyncResource::id,
jb::Projection<&FileKeyValueStoreSpecData::file_io_sync>())
);
};
class FileKeyValueStoreSpec
: public internal_kvstore::RegisteredDriverSpec<FileKeyValueStoreSpec,
FileKeyValueStoreSpecData> {
public:
static constexpr char id[] = "file";
absl::Status NormalizeSpec(std::string& path) override {
if (!IsFileKvstorePathValid(path)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid file path: ", QuoteString(path)));
}
path = internal::LexicalNormalizePath(path);
return absl::OkStatus();
}
Future<kvstore::DriverPtr> DoOpen() const override;
Result<std::string> ToUrl(std::string_view path) const override {
return absl::StrCat(id, ":
}
};
class FileKeyValueStore
: public internal_kvstore::RegisteredDriver<FileKeyValueStore,
FileKeyValueStoreSpec> {
public:
Future<ReadResult> Read(Key key, ReadOptions options) override;
Future<TimestampedStorageGeneration> Write(Key key,
std::optional<Value> value,
WriteOptions options) override;
Future<const void> DeleteRange(KeyRange range) override;
void ListImpl(ListOptions options, ListReceiver receiver) override;
const Executor& executor() { return spec_.file_io_concurrency->executor; }
std::string DescribeKey(std::string_view key) override {
return absl::StrCat("local file ", QuoteString(key));
}
absl::Status GetBoundSpecData(FileKeyValueStoreSpecData& spec) const {
spec = spec_;
return absl::OkStatus();
}
SupportedFeatures GetSupportedFeatures(
const KeyRange& key_range) const final {
return SupportedFeatures::kSingleKeyAtomicReadModifyWrite |
SupportedFeatures::kAtomicWriteWithoutOverwrite;
}
bool sync() const { return *spec_.file_io_sync; }
SpecData spec_;
};
absl::Status ValidateKey(std::string_view key) {
if (!IsKeyValid(key, kLockSuffix)) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid key: ", QuoteString(key)));
}
return absl::OkStatus();
}
absl::Status ValidateKeyRange(const KeyRange& range) {
auto prefix = LongestDirectoryPrefix(range);
if (prefix.empty()) return absl::OkStatus();
return ValidateKey(prefix);
}
StorageGeneration GetFileGeneration(const FileInfo& info) {
return StorageGeneration::FromValues(
internal_os::GetDeviceId(info), internal_os::GetFileId(info),
absl::ToUnixNanos(internal_os::GetMTime(info)));
}
Result<UniqueFileDescriptor> OpenParentDirectory(std::string path) {
size_t end_pos = path.size();
Result<UniqueFileDescriptor> fd;
while (true) {
size_t separator_pos = end_pos;
while (separator_pos != 0 &&
!internal_os::IsDirSeparator(path[separator_pos - 1])) {
--separator_pos;
}
--separator_pos;
const char* dir_path;
if (separator_pos == std::string::npos) {
dir_path = ".";
} else if (separator_pos == 0) {
dir_path = "/";
} else {
path[separator_pos] = '\0';
dir_path = path.c_str();
end_pos = separator_pos;
}
fd = internal_os::OpenDirectoryDescriptor(dir_path);
if (!fd.ok()) {
if (absl::IsNotFound(fd.status())) {
assert(separator_pos != 0 && separator_pos != std::string::npos);
end_pos = separator_pos - 1;
continue;
}
return fd.status();
}
if (dir_path == path.c_str()) path[separator_pos] = '/';
break;
}
while (true) {
size_t separator_pos = path.find('\0', end_pos);
if (separator_pos == std::string::npos) {
return fd;
}
TENSORSTORE_RETURN_IF_ERROR(internal_os::MakeDirectory(path));
fd = internal_os::OpenDirectoryDescriptor(path);
TENSORSTORE_RETURN_IF_ERROR(fd.status());
path[separator_pos] = '/';
end_pos = separator_pos + 1;
}
}
Result<UniqueFileDescriptor> OpenValueFile(const std::string& path,
StorageGeneration* generation,
int64_t* size = nullptr) {
auto fd = internal_os::OpenExistingFileForReading(path);
if (!fd.ok()) {
if (absl::IsNotFound(fd.status())) {
*generation = StorageGeneration::NoValue();
return UniqueFileDescriptor{};
}
return fd;
}
FileInfo info;
TENSORSTORE_RETURN_IF_ERROR(internal_os::GetFileInfo(fd->get(), &info));
if (!internal_os::IsRegularFile(info)) {
return absl::FailedPreconditionError(
absl::StrCat("Not a regular file: ", QuoteString(path)));
}
if (size) *size = internal_os::GetSize(info);
*generation = GetFileGeneration(info);
return fd;
}
Result<absl::Cord> ReadFromFileDescriptor(FileDescriptor fd,
ByteRange byte_range) {
file_metrics.batch_read.Increment();
absl::Time start_time = absl::Now();
internal::FlatCordBuilder buffer(byte_range.size(), false);
size_t offset = 0;
while (offset < buffer.size()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto n, internal_os::ReadFromFile(fd, buffer.data() + offset,
buffer.size() - offset,
byte_range.inclusive_min + offset));
if (n > 0) {
file_metrics.bytes_read.IncrementBy(n);
offset += n;
buffer.set_inuse(offset);
continue;
}
if (n == 0) {
return absl::UnavailableError("Length changed while reading");
}
}
file_metrics.read_latency_ms.Observe(
absl::ToInt64Milliseconds(absl::Now() - start_time));
return std::move(buffer).Build();
}
class BatchReadTask;
using BatchReadTaskBase = internal_kvstore_batch::BatchReadEntry<
FileKeyValueStore,
internal_kvstore_batch::ReadRequest<kvstore::ReadGenerationConditions>,
std::string >;
class BatchReadTask final
: public BatchReadTaskBase,
public internal::AtomicReferenceCount<BatchReadTask> {
private:
TimestampedStorageGeneration stamp_;
UniqueFileDescriptor fd_;
int64_t size_;
public:
BatchReadTask(BatchEntryKey&& batch_entry_key_)
: BatchReadTaskBase(std::move(batch_entry_key_)),
internal::AtomicReferenceCount<BatchReadTask>(1) {
}
void Submit(Batch::View batch) final {
if (request_batch.requests.empty()) return;
driver().executor()(
[self = internal::IntrusivePtr<BatchReadTask>(
this, internal::adopt_object_ref)] { self->ProcessBatch(); });
}
Result<kvstore::ReadResult> DoByteRangeRead(ByteRange byte_range) {
absl::Cord value;
TENSORSTORE_ASSIGN_OR_RETURN(
value, ReadFromFileDescriptor(fd_.get(), byte_range),
tensorstore::MaybeAnnotateStatus(_, "Error reading from open file"));
return kvstore::ReadResult::Value(std::move(value), stamp_);
}
void ProcessBatch() {
ABSL_LOG_IF(INFO, file_logging)
<< "BatchReadTask " << std::get<std::string>(batch_entry_key);
stamp_.time = absl::Now();
file_metrics.open_read.Increment();
auto& requests = request_batch.requests;
TENSORSTORE_ASSIGN_OR_RETURN(
fd_,
OpenValueFile(std::get<std::string>(batch_entry_key),
&stamp_.generation, &size_),
internal_kvstore_batch::SetCommonResult(requests, std::move(_)));
if (!fd_.valid()) {
internal_kvstore_batch::SetCommonResult(
requests, kvstore::ReadResult::Missing(stamp_.time));
return;
}
internal_kvstore_batch::ValidateGenerationsAndByteRanges(requests, stamp_,
size_);
if (requests.empty()) return;
if (requests.size() == 1) {
auto& byte_range_request =
std::get<internal_kvstore_batch::ByteRangeReadRequest>(requests[0]);
byte_range_request.promise.SetResult(
DoByteRangeRead(byte_range_request.byte_range.AsByteRange()));
return;
}
const auto& executor = driver().executor();
internal_kvstore_batch::CoalescingOptions coalescing_options;
coalescing_options.max_extra_read_bytes = 255;
internal_kvstore_batch::ForEachCoalescedRequest<Request>(
requests, coalescing_options,
[&](ByteRange coalesced_byte_range,
tensorstore::span<Request> coalesced_requests) {
auto self = internal::IntrusivePtr<BatchReadTask>(this);
executor([self = std::move(self), coalesced_byte_range,
coalesced_requests] {
self->ProcessCoalescedRead(coalesced_byte_range,
coalesced_requests);
});
});
}
void ProcessCoalescedRead(ByteRange coalesced_byte_range,
tensorstore::span<Request> coalesced_requests) {
TENSORSTORE_ASSIGN_OR_RETURN(auto read_result,
DoByteRangeRead(coalesced_byte_range),
internal_kvstore_batch::SetCommonResult(
coalesced_requests, std::move(_)));
internal_kvstore_batch::ResolveCoalescedRequests(
coalesced_byte_range, coalesced_requests, std::move(read_result));
}
};
Future<ReadResult> FileKeyValueStore::Read(Key key, ReadOptions options) {
file_metrics.read.Increment();
TENSORSTORE_RETURN_IF_ERROR(ValidateKey(key));
auto [promise, future] = PromiseFuturePair<kvstore::ReadResult>::Make();
BatchReadTask::MakeRequest<BatchReadTask>(
*this, {std::move(key)}, options.batch, options.staleness_bound,
BatchReadTask::Request{{std::move(promise), options.byte_range},
std::move(options.generation_conditions)});
return std::move(future);
}
Result<StorageGeneration> WriteWithSyncAndRename(
FileDescriptor fd, const std::string& fd_path, absl::Cord value, bool sync,
const std::string& rename_path) {
auto start_write = absl::Now();
while (!value.empty()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto n, internal_os::WriteCordToFile(fd, value),
MaybeAnnotateStatus(
_, absl::StrCat("Failed writing: ", QuoteString(fd_path))));
file_metrics.bytes_written.IncrementBy(n);
if (n == value.size()) break;
value.RemovePrefix(n);
}
if (sync) {
TENSORSTORE_RETURN_IF_ERROR(internal_os::FsyncFile(fd));
}
FileInfo info;
TENSORSTORE_RETURN_IF_ERROR(internal_os::GetFileInfo(fd, &info));
TENSORSTORE_RETURN_IF_ERROR(
internal_os::RenameOpenFile(fd, fd_path, rename_path));
#if 0
FileInfo debug_info;
ABSL_CHECK_OK(internal_os::GetFileInfo(fd, &debug_info));
ABSL_CHECK_EQ(GetFileGeneration(info), GetFileGeneration(debug_info));
#endif
file_metrics.write_latency_ms.Observe(
absl::ToInt64Milliseconds(absl::Now() - start_write));
return GetFileGeneration(info);
}
Result<UniqueFileDescriptor> OpenLockFile(const std::string& path,
FileInfo* info) {
auto fd = internal_os::OpenFileForWriting(path);
if (!fd.ok()) return fd;
TENSORSTORE_RETURN_IF_ERROR(internal_os::GetFileInfo(fd->get(), info));
if (!internal_os::IsRegularFile(*info)) {
return absl::FailedPreconditionError(
absl::StrCat("Not a regular file: ", path));
}
return fd;
}
struct WriteLockHelper {
std::string lock_path;
UniqueFileDescriptor lock_fd;
std::optional<internal_os::UnlockFn> unlock_fn;
WriteLockHelper(const std::string& path)
: lock_path(absl::StrCat(path, kLockSuffix)) {}
~WriteLockHelper() { Unlock(); }
absl::Status CreateAndAcquire() {
FileInfo a, b;
FileInfo* info = &a;
TENSORSTORE_ASSIGN_OR_RETURN(lock_fd, OpenLockFile(lock_path, info));
while (true) {
TENSORSTORE_ASSIGN_OR_RETURN(
unlock_fn, internal_os::AcquireFdLock(lock_fd.get()),
MaybeAnnotateStatus(_,
absl::StrCat("Failed to acquire lock on file: ",
QuoteString(lock_path))));
FileInfo* other_info = info == &a ? &b : &a;
TENSORSTORE_ASSIGN_OR_RETURN(UniqueFileDescriptor other_fd,
OpenLockFile(lock_path, other_info));
if (internal_os::GetDeviceId(a) == internal_os::GetDeviceId(b) &&
internal_os::GetFileId(a) == internal_os::GetFileId(b)) {
return absl::OkStatus();
}
Unlock();
info = other_info;
lock_fd = std::move(other_fd);
file_metrics.lock_contention.Increment();
}
}
absl::Status Delete() {
auto status = internal_os::DeleteOpenFile(lock_fd.get(), lock_path);
if (status.ok() || absl::IsNotFound(status)) {
return absl::OkStatus();
}
return MaybeAnnotateStatus(std::move(status), "Failed to clean lock file");
}
void Unlock() {
if (unlock_fn) {
std::move (*unlock_fn)(lock_fd.get());
unlock_fn = std::nullopt;
}
}
};
struct WriteTask {
std::string full_path;
absl::Cord value;
kvstore::WriteOptions options;
bool sync;
Result<TimestampedStorageGeneration> operator()() const {
ABSL_LOG_IF(INFO, file_logging) << "WriteTask " << full_path;
TimestampedStorageGeneration r;
r.time = absl::Now();
TENSORSTORE_ASSIGN_OR_RETURN(auto dir_fd, OpenParentDirectory(full_path));
WriteLockHelper lock_helper(full_path);
TENSORSTORE_RETURN_IF_ERROR(lock_helper.CreateAndAcquire());
bool delete_lock_file = true;
FileDescriptor fd = lock_helper.lock_fd.get();
const std::string& lock_path = lock_helper.lock_path;
auto generation_result = [&]() -> Result<StorageGeneration> {
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_equal)) {
StorageGeneration generation;
TENSORSTORE_ASSIGN_OR_RETURN(UniqueFileDescriptor value_fd,
OpenValueFile(full_path, &generation));
if (generation != options.generation_conditions.if_equal) {
return StorageGeneration::Unknown();
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto generation,
WriteWithSyncAndRename(fd, lock_path, value, sync, full_path));
delete_lock_file = false;
if (sync) {
TENSORSTORE_RETURN_IF_ERROR(
internal_os::FsyncDirectory(dir_fd.get()),
MaybeAnnotateStatus(
_, absl::StrCat("Error calling fsync on parent directory of: ",
full_path)));
}
lock_helper.Unlock();
return generation;
}();
if (delete_lock_file) {
lock_helper.Delete().IgnoreError();
}
if (!generation_result) {
return std::move(generation_result).status();
}
r.generation = *std::move(generation_result);
return r;
}
};
struct DeleteTask {
std::string full_path;
kvstore::WriteOptions options;
bool sync;
Result<TimestampedStorageGeneration> operator()() const {
ABSL_LOG_IF(INFO, file_logging) << "DeleteTask " << full_path;
TimestampedStorageGeneration r;
r.time = absl::Now();
WriteLockHelper lock_helper(full_path);
TENSORSTORE_ASSIGN_OR_RETURN(auto dir_fd, OpenParentDirectory(full_path));
TENSORSTORE_RETURN_IF_ERROR(lock_helper.CreateAndAcquire());
bool fsync_directory = false;
auto generation_result = [&]() -> Result<StorageGeneration> {
if (!StorageGeneration::IsUnknown(
options.generation_conditions.if_equal)) {
StorageGeneration generation;
TENSORSTORE_ASSIGN_OR_RETURN(UniqueFileDescriptor value_fd,
OpenValueFile(full_path, &generation));
if (generation != options.generation_conditions.if_equal) {
return StorageGeneration::Unknown();
}
}
auto status = internal_os::DeleteFile(full_path);
if (!status.ok() && !absl::IsNotFound(status)) {
return status;
}
fsync_directory = sync;
return StorageGeneration::NoValue();
}();
TENSORSTORE_RETURN_IF_ERROR(lock_helper.Delete());
if (fsync_directory) {
TENSORSTORE_RETURN_IF_ERROR(
internal_os::FsyncDirectory(dir_fd.get()),
MaybeAnnotateStatus(
_, absl::StrCat("Error calling fsync on parent directory of: ",
QuoteString(full_path))));
}
if (!generation_result) {
return std::move(generation_result).status();
}
r.generation = *std::move(generation_result);
return r;
}
};
Future<TimestampedStorageGeneration> FileKeyValueStore::Write(
Key key, std::optional<Value> value, WriteOptions options) {
file_metrics.write.Increment();
TENSORSTORE_RETURN_IF_ERROR(ValidateKey(key));
if (value) {
return MapFuture(executor(), WriteTask{std::move(key), *std::move(value),
std::move(options), this->sync()});
} else {
return MapFuture(executor(), DeleteTask{std::move(key), std::move(options),
this->sync()});
}
}
struct DeleteRangeTask {
KeyRange range;
void operator()(Promise<void> promise) {
ABSL_LOG_IF(INFO, file_logging) << "DeleteRangeTask " << range;
std::string prefix(internal_file_util::LongestDirectoryPrefix(range));
absl::Status delete_status;
auto status = internal_os::RecursiveFileList(
prefix,
[&](std::string_view path) {
return tensorstore::IntersectsPrefix(range, path);
},
[&](auto entry) -> absl::Status {
if (!promise.result_needed()) return absl::CancelledError("");
bool do_delete = false;
if (entry.IsDirectory()) {
do_delete = tensorstore::ContainsPrefix(range, entry.GetFullPath());
} else {
do_delete = tensorstore::Contains(range, entry.GetFullPath());
}
if (do_delete) {
auto s = entry.Delete();
if (!s.ok() && !absl::IsNotFound(s) &&
!absl::IsFailedPrecondition(s)) {
ABSL_LOG_IF(INFO, file_logging) << s;
delete_status.Update(s);
}
}
return absl::OkStatus();
});
if (!status.ok()) {
promise.SetResult(MakeResult(std::move(status)));
}
promise.SetResult(MakeResult(std::move(delete_status)));
}
};
Future<const void> FileKeyValueStore::DeleteRange(KeyRange range) {
file_metrics.delete_range.Increment();
if (range.empty()) return absl::OkStatus();
TENSORSTORE_RETURN_IF_ERROR(ValidateKeyRange(range));
return PromiseFuturePair<void>::Link(
WithExecutor(executor(), DeleteRangeTask{std::move(range)}))
.future;
}
struct ListTask {
kvstore::ListOptions options;
ListReceiver receiver;
void operator()() {
ABSL_LOG_IF(INFO, file_logging) << "ListTask " << options.range;
std::atomic<bool> cancelled = false;
execution::set_starting(receiver, [&cancelled] {
cancelled.store(true, std::memory_order_relaxed);
});
std::string prefix(
internal_file_util::LongestDirectoryPrefix(options.range));
auto status = internal_os::RecursiveFileList(
prefix,
[&](std::string_view path) {
return tensorstore::IntersectsPrefix(options.range, path);
},
[&](auto entry) -> absl::Status {
if (cancelled.load(std::memory_order_relaxed)) {
return absl::CancelledError("");
}
if (entry.IsDirectory()) return absl::OkStatus();
std::string_view path = entry.GetFullPath();
if (tensorstore::Contains(options.range, path) &&
!absl::EndsWith(path, kLockSuffix)) {
path.remove_prefix(options.strip_prefix_length);
execution::set_value(receiver,
ListEntry{std::string(path), entry.GetSize()});
}
return absl::OkStatus();
});
if (!status.ok() && !cancelled.load(std::memory_order_relaxed)) {
execution::set_error(receiver, std::move(status));
execution::set_stopping(receiver);
return;
}
execution::set_done(receiver);
execution::set_stopping(receiver);
}
};
void FileKeyValueStore::ListImpl(ListOptions options, ListReceiver receiver) {
file_metrics.list.Increment();
if (options.range.empty()) {
execution::set_starting(receiver, [] {});
execution::set_done(receiver);
execution::set_stopping(receiver);
return;
}
if (auto error = ValidateKeyRange(options.range); !error.ok()) {
execution::set_starting(receiver, [] {});
execution::set_error(receiver, std::move(error));
execution::set_stopping(receiver);
return;
}
executor()(ListTask{std::move(options), std::move(receiver)});
}
Future<kvstore::DriverPtr> FileKeyValueStoreSpec::DoOpen() const {
auto driver_ptr = internal::MakeIntrusivePtr<FileKeyValueStore>();
driver_ptr->spec_ = data_;
return driver_ptr;
}
Result<kvstore::Spec> ParseFileUrl(std::string_view url) {
auto parsed = internal::ParseGenericUri(url);
assert(parsed.scheme == internal_file_kvstore::FileKeyValueStoreSpec::id);
if (!parsed.query.empty()) {
return absl::InvalidArgumentError("Query string not supported");
}
if (!parsed.fragment.empty()) {
return absl::InvalidArgumentError("Fragment identifier not supported");
}
std::string path = internal::PercentDecode(parsed.authority_and_path);
auto driver_spec = internal::MakeIntrusivePtr<FileKeyValueStoreSpec>();
driver_spec->data_.file_io_concurrency =
Context::Resource<internal::FileIoConcurrencyResource>::DefaultSpec();
driver_spec->data_.file_io_sync =
Context::Resource<FileIoSyncResource>::DefaultSpec();
return {std::in_place, std::move(driver_spec), std::move(path)};
}
}
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_NOT_REQUIRED(
tensorstore::internal_file_kvstore::FileKeyValueStore)
namespace {
const tensorstore::internal_kvstore::DriverRegistration<
tensorstore::internal_file_kvstore::FileKeyValueStoreSpec>
registration;
const tensorstore::internal_kvstore::UrlSchemeRegistration
url_scheme_registration{
tensorstore::internal_file_kvstore::FileKeyValueStoreSpec::id,
tensorstore::internal_file_kvstore::ParseFileUrl};
} | #include <errno.h>
#include <stddef.h>
#include <cstring>
#include <fstream>
#include <string>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/synchronization/notification.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context.h"
#include "tensorstore/internal/os/filesystem.h"
#include "tensorstore/internal/testing/scoped_directory.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/spec.h"
#include "tensorstore/kvstore/test_matchers.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/status_testutil.h"
#ifndef _WIN32
#include <sys/stat.h>
#include <unistd.h>
#endif
namespace {
namespace kvstore = tensorstore::kvstore;
using ::tensorstore::CompletionNotifyingReceiver;
using ::tensorstore::IsOkAndHolds;
using ::tensorstore::KeyRange;
using ::tensorstore::KvStore;
using ::tensorstore::MatchesStatus;
using ::tensorstore::StorageGeneration;
using ::tensorstore::internal::MatchesKvsReadResultNotFound;
using ::tensorstore::internal::MatchesListEntry;
using ::tensorstore::internal::MatchesTimestampedStorageGeneration;
using ::tensorstore::internal_os::GetDirectoryContents;
using ::tensorstore::internal_testing::ScopedCurrentWorkingDirectory;
using ::tensorstore::internal_testing::ScopedTemporaryDirectory;
using ::testing::HasSubstr;
KvStore GetStore(std::string root) {
return kvstore::Open({{"driver", "file"}, {"path", root + "/"}}).value();
}
TEST(FileKeyValueStoreTest, Basic) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(FileKeyValueStoreTest, InvalidKey) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
EXPECT_THAT(kvstore::Read(store, "this_is_a_long_key").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(
kvstore::Read(store, "").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, std::string("\0", 1)).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Write(store, "", {}).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "/").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, ".").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "..").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/./b").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/../b").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a.__lock").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "a/b.__lock/c").result(),
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
EXPECT_THAT(
kvstore::Read(store, "
MatchesStatus(absl::StatusCode::kInvalidArgument, "Invalid key: .*"));
}
TEST(FileKeyValueStoreTest, LockFiles) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
TENSORSTORE_ASSERT_OK(
kvstore::Write(store, "a/foo", absl::Cord("xyz"),
{StorageGeneration::NoValue()})
.result());
EXPECT_THAT(GetDirectoryContents(root),
::testing::UnorderedElementsAre("a", "a/foo"));
EXPECT_THAT(
kvstore::Write(store, "a/foo", absl::Cord("qqq"),
{StorageGeneration::NoValue()})
.result(),
MatchesTimestampedStorageGeneration(StorageGeneration::Unknown()));
EXPECT_THAT(GetDirectoryContents(root),
::testing::UnorderedElementsAre("a", "a/foo"));
{ std::ofstream x(root + "/a/foo.__lock"); }
EXPECT_THAT(GetDirectoryContents(root),
::testing::UnorderedElementsAre("a", "a/foo", "a/foo.__lock"));
EXPECT_THAT(
ListFuture(store).result(),
IsOkAndHolds(::testing::UnorderedElementsAre(MatchesListEntry("a/foo"))));
TENSORSTORE_ASSERT_OK(
kvstore::Write(store, "a/foo", absl::Cord("xyz")).result());
{ std::ofstream x(root + "/a/foo.__lock"); }
TENSORSTORE_EXPECT_OK(kvstore::DeleteRange(store, KeyRange::Prefix("a/")));
EXPECT_THAT(GetDirectoryContents(root), ::testing::UnorderedElementsAre("a"));
}
TEST(FileKeyValueStoreTest, NestedDirectories) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "a/foo", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(
kvstore::Write(store, "a/ba/ccc/dddd", absl::Cord("xyz")));
TENSORSTORE_EXPECT_OK(
kvstore::Write(store, "a/ba/ccc/foo", absl::Cord("xyz")));
EXPECT_THAT(
kvstore::Write(store, "a/ba/ccc", absl::Cord("xyz")).result(),
::testing::AnyOf(MatchesStatus(absl::StatusCode::kPermissionDenied),
MatchesStatus(absl::StatusCode::kFailedPrecondition)));
}
TEST(FileKeyValueStoreTest, ConcurrentWrites) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
tensorstore::internal::TestConcurrentWritesOptions options;
options.get_store = [&] { return GetStore(root); };
tensorstore::internal::TestConcurrentWrites(options);
}
#ifndef _WIN32
TEST(FileKeyValueStoreTest, Permissions) {
if (::geteuid() == 0) {
return;
}
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
TENSORSTORE_ASSERT_OK(
kvstore::Write(store, "foo", absl::Cord("xyz")).result());
ASSERT_EQ(0, ::chmod(root.c_str(), 0500))
<< "Error " << errno << ": " << ::strerror(errno);
struct RestoreWritePermission {
std::string path;
~RestoreWritePermission() {
EXPECT_EQ(0, ::chmod(path.c_str(), 0700))
<< "Error " << errno << ": " << ::strerror(errno);
}
};
RestoreWritePermission restore{root};
EXPECT_EQ("xyz", kvstore::Read(store, "foo").value().value);
EXPECT_THAT(kvstore::Write(store, "foo", absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
EXPECT_EQ("xyz", kvstore::Read(store, "foo").value().value);
EXPECT_THAT(kvstore::Write(store, "bar", absl::Cord("abc")).result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
EXPECT_THAT(kvstore::Read(store, "bar").result(),
MatchesKvsReadResultNotFound());
EXPECT_THAT(kvstore::Delete(store, "foo").result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
ASSERT_EQ(0, ::chmod((root + "/foo").c_str(), 0))
<< "Error " << errno << ": " << ::strerror(errno);
EXPECT_THAT(kvstore::Read(store, "foo").result(),
MatchesStatus(absl::StatusCode::kPermissionDenied));
}
#endif
TEST(FileKeyValueStoreTest, DeletePrefix) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST(FileKeyValueStoreTest, DeleteRange) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST(FileKeyValueStoreTest, DeleteRangeToEnd) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST(FileKeyValueStoreTest, DeleteRangeFromBeginning) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
#if 0
TEST(FileKeyValueStoreTest, CopyRange) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreCopyRange(store);
}
#endif
TEST(FileKeyValueStoreTest, ListErrors) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
{
absl::Notification notification;
std::vector<std::string> log;
tensorstore::execution::submit(
kvstore::List(store, {KeyRange::Prefix("a
CompletionNotifyingReceiver{¬ification,
tensorstore::LoggingReceiver{&log}});
notification.WaitForNotification();
EXPECT_THAT(log,
::testing::ElementsAre(
"set_starting",
HasSubstr("set_error: INVALID_ARGUMENT: Invalid key: "),
"set_stopping"));
}
}
TEST(FileKeyValueStoreTest, List) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto store = GetStore(root);
tensorstore::internal::TestKeyValueStoreList(store, false);
}
TEST(FileKeyValueStoreTest, SpecRoundtrip) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {{"driver", "file"}, {"path", root}};
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(FileKeyValueStoreTest, SpecRoundtripSync) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
tensorstore::internal::KeyValueStoreSpecRoundtripOptions options;
options.full_spec = {
{"driver", "file"},
{"path", root},
{"file_io_sync", false},
{"context",
{
{"file_io_concurrency", ::nlohmann::json::object_t()},
}},
};
options.spec_request_options.Set(tensorstore::retain_context).IgnoreError();
tensorstore::internal::TestKeyValueStoreSpecRoundtrip(options);
}
TEST(FileKeyValueStoreTest, InvalidSpec) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
auto context = tensorstore::Context::Default();
EXPECT_THAT(
kvstore::Open({{"driver", "file"}, {"path", root}, {"extra", "key"}},
context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
kvstore::Open({{"driver", "file"}, {"path", 5}}, context).result(),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(kvstore::Open({{"driver", "file"}, {"path", "/a/../b/"}}, context)
.result(),
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid file path.*"));
}
TEST(FileKeyValueStoreTest, UrlRoundtrip) {
tensorstore::internal::TestKeyValueStoreUrlRoundtrip({{"driver", "file"}},
"file:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "file"}, {"path", "/abc/"}}, "file:
tensorstore::internal::TestKeyValueStoreUrlRoundtrip(
{{"driver", "file"}, {"path", "/abc def/"}}, "file:
}
TEST(FileKeyValueStoreTest, UrlOpen) {
ScopedTemporaryDirectory tempdir;
std::string root = tempdir.path() + "/root";
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto store,
kvstore::Open("file:
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST(FileKeyValueStoreTest, InvalidUri) {
EXPECT_THAT(kvstore::Spec::FromUrl("file:
EXPECT_THAT(kvstore::Spec::FromUrl("file:
EXPECT_THAT(kvstore::Spec::FromUrl("file:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Query string not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("file:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*: Fragment identifier not supported"));
EXPECT_THAT(kvstore::Spec::FromUrl("file:
MatchesStatus(absl::StatusCode::kInvalidArgument,
".*Invalid file path.*"));
}
TEST(FileKeyValueStoreTest, RelativePath) {
ScopedTemporaryDirectory tempdir;
ScopedCurrentWorkingDirectory scoped_cwd(tempdir.path());
auto store = GetStore("tmp/dataset");
TENSORSTORE_EXPECT_OK(kvstore::Write(store, "abc", {}).result());
}
TEST(FileKeyValueStoreTest, BatchRead) {
ScopedTemporaryDirectory tempdir;
auto store = GetStore(tempdir.path());
tensorstore::internal::BatchReadGenericCoalescingTestOptions options;
options.coalescing_options.max_extra_read_bytes = 255;
options.metric_prefix = "/tensorstore/kvstore/file/";
options.has_file_open_metric = true;
tensorstore::internal::TestBatchReadGenericCoalescing(store, options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/file_key_value_store.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/file/file_key_value_store_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
faca4e48-e6e7-4077-b1d1-06e856a81276 | cpp | tensorflow/tensorflow | tiled_hlo_computation | third_party/xla/xla/service/gpu/model/tiled_hlo_computation.cc | third_party/xla/xla/service/gpu/model/tiled_hlo_computation_test.cc | #include "xla/service/gpu/model/tiled_hlo_computation.h"
#include <sstream>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/name_uniquer.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
std::string TiledHloComputation::ToString() const {
std::stringstream ss;
NameUniquer name_uniquer("_");
absl::flat_hash_map<const TiledHloInstruction*, std::string> tile_names;
for (const auto* tiled_hlo : instructions()) {
std::string tile_name = name_uniquer.GetUniqueName(
absl::StrCat(tiled_hlo->hlo()->name(), ".tile_0"));
tile_names[tiled_hlo] = tile_name;
absl::InlinedVector<std::string, 4> operand_names;
for (const auto& operand : tiled_hlo->operands()) {
operand_names.push_back(tile_names.at(operand));
}
ss << tile_name << " = " << HloOpcodeString(tiled_hlo->hlo()->opcode())
<< "(" << absl::StrJoin(operand_names, ", ") << ")\n";
ss << tiled_hlo->ToString() << "\n";
}
return ss.str();
}
}
} | #include "xla/service/gpu/model/tiled_hlo_computation.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(BlockLevelParametersTest,
BlockLevelParametersCanBeParsedFromBlockLevelFusionConfig) {
BlockLevelFusionConfig block_level_fusion_config;
block_level_fusion_config.mutable_output_tile_sizes()->Add(18);
block_level_fusion_config.mutable_output_tile_sizes()->Add(19);
block_level_fusion_config.set_num_warps(12);
BlockLevelParameters block_level_parameters =
BlockLevelParameters::FromBlockLevelFusionConfig(
block_level_fusion_config);
EXPECT_THAT(block_level_parameters.output_tile_sizes, ElementsAre(18, 19));
EXPECT_THAT(block_level_parameters.num_warps, 12);
}
TEST(BlockLevelParametersTest,
BlockLevelParametersCanBeConvertedToBlockLevelFusionConfig) {
BlockLevelParameters block_level_parameters;
block_level_parameters.output_tile_sizes = {18, 19};
block_level_parameters.num_warps = 12;
BlockLevelFusionConfig block_level_fusion_config =
block_level_parameters.ToBlockLevelFusionConfig();
EXPECT_THAT(block_level_fusion_config.output_tile_sizes(),
ElementsAre(18, 19));
EXPECT_THAT(block_level_fusion_config.num_warps(), 12);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/tiled_hlo_computation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/tiled_hlo_computation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa7a940d-b8e5-4e31-bdf3-0fefd4811866 | cpp | google/tensorstore | utf8 | tensorstore/internal/utf8.cc | tensorstore/internal/utf8_test.cc | #include "tensorstore/internal/utf8.h"
#include <cstdint>
#include <string_view>
namespace tensorstore {
namespace internal {
namespace {
namespace utf8_decode {
using State = uint32_t;
constexpr State kAccept = 0;
#if 0
constexpr State kReject = 1;
#endif
const uint8_t utf8d[400] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
0xa,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x3,0x4,0x3,0x3,
0xb,0x6,0x6,0x6,0x5,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,0x8,
0x0,0x1,0x2,0x3,0x5,0x8,0x7,0x1,0x1,0x1,0x4,0x6,0x1,0x1,0x1,0x1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,1,1,
1,2,1,1,1,1,1,2,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,
1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,1,1,1,3,1,3,1,1,1,1,1,1,
1,3,1,1,1,1,1,3,1,3,1,1,1,1,1,1,1,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
};
inline State Decode(State* state, char32_t* codep, uint8_t byte) {
uint32_t type = utf8d[byte];
*codep = (*state != kAccept) ? (byte & 0x3fu) | (*codep << 6)
: (0xff >> type) & (byte);
*state = utf8d[256 + *state * 16 + type];
return *state;
}
}
}
bool IsValidUtf8(std::string_view code_units) {
using utf8_decode::kAccept;
utf8_decode::State state = utf8_decode::kAccept;
char32_t codep;
for (const char x : code_units) {
utf8_decode::Decode(&state, &codep, x);
}
return state == kAccept;
}
}
} | #include "tensorstore/internal/utf8.h"
#include <string_view>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::IsValidUtf8;
TEST(IsValidUtf8Test, Empty) {
EXPECT_TRUE(IsValidUtf8(""));
}
TEST(IsValidUtf8Test, Ascii) {
EXPECT_TRUE(IsValidUtf8("ascii"));
EXPECT_TRUE(IsValidUtf8(std::string_view("\0", 1)));
}
TEST(IsValidUtf8Test, TwoByte) {
EXPECT_TRUE(IsValidUtf8("\xc2\x80"));
EXPECT_TRUE(IsValidUtf8("\xc2\x80hello\xc2\xbf"));
}
TEST(IsValidUtf8Test, ThreeByte) {
EXPECT_TRUE(IsValidUtf8("\xe0\xa0\x80"));
}
TEST(IsValidUtf8Test, FourByte) {
EXPECT_TRUE(IsValidUtf8("\xf0\x90\x80\x80"));
}
TEST(IsValidUtf8Test, Surrogate) {
EXPECT_FALSE(IsValidUtf8("\xed\xa0\x80"));
EXPECT_FALSE(IsValidUtf8("\xed\xb0\x80"));
EXPECT_FALSE(IsValidUtf8("\xed\xa0\x80\xed\xb0\x80"));
}
TEST(IsValidUtf8Test, IllFormedFirstByte) {
EXPECT_FALSE(IsValidUtf8("\x80"));
EXPECT_FALSE(IsValidUtf8("\xC1"));
EXPECT_FALSE(IsValidUtf8("\xF5"));
EXPECT_FALSE(IsValidUtf8("\xFF"));
}
TEST(IsValidUtf8Test, OverlongNul) {
EXPECT_FALSE(IsValidUtf8("\xc0\x80"));
EXPECT_FALSE(IsValidUtf8("\xe0\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xf0\x80\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xf8\x80\x80\x80\x80"));
EXPECT_FALSE(IsValidUtf8("\xfc\x80\x80\x80\x80\x80"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/utf8.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/utf8_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bbcb9a75-88c7-401a-a921-5db53309d6d1 | cpp | tensorflow/tensorflow | kernel_util | tensorflow/lite/kernels/kernel_util.cc | tensorflow/lite/kernels/kernel_util_test.cc | #include "tensorflow/lite/kernels/kernel_util.h"
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <complex>
#include <limits>
#include <memory>
#ifndef TF_LITE_STATIC_MEMORY
#include <string>
#include "tensorflow/lite/array.h"
#endif
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#if defined(__APPLE__)
#include "TargetConditionals.h"
#endif
namespace tflite {
namespace {
inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context,
int tensor_index) {
if (context->tensors != nullptr) {
return &context->tensors[tensor_index];
} else {
return context->GetTensor(context, tensor_index);
}
}
inline TfLiteStatus ValidateTensorIndexingSafe(const TfLiteContext* context,
int index, int max_size,
const int* tensor_indices,
int* tensor_index) {
if (index < 0 || index >= max_size) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Invalid tensor index %d (not in [0, %d))\n", index,
max_size);
return kTfLiteError;
}
if (tensor_indices[index] == kTfLiteOptionalTensor) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Tensor at index %d was optional but was expected\n",
index);
return kTfLiteError;
}
*tensor_index = tensor_indices[index];
return kTfLiteOk;
}
inline int ValidateTensorIndexing(const TfLiteContext* context, int index,
int max_size, const int* tensor_indices) {
if (index >= 0 && index < max_size) {
const int tensor_index = tensor_indices[index];
if (tensor_index != kTfLiteOptionalTensor) {
return tensor_index;
}
}
return -1;
}
inline TfLiteTensor* GetMutableInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->inputs->size, node->inputs->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
inline TfLiteStatus GetMutableInputSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
const TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(
context, ValidateTensorIndexingSafe(context, index, node->inputs->size,
node->inputs->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
}
const TfLiteTensor* GetInput(const TfLiteContext* context,
const TfLiteNode* node, int index) {
return GetMutableInput(context, node, index);
}
TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, const TfLiteTensor** tensor) {
return GetMutableInputSafe(context, node, index, tensor);
}
TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node,
int index) {
TfLiteTensor* tensor = GetMutableInput(context, node, index);
if (tensor == nullptr) return nullptr;
return tensor->is_variable ? tensor : nullptr;
}
TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->outputs->size, node->outputs->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetOutputSafe(const TfLiteContext* context, const TfLiteNode* node,
int index, TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(
context, ValidateTensorIndexingSafe(context, index, node->outputs->size,
node->outputs->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
const TfLiteTensor* GetOptionalInputTensor(const TfLiteContext* context,
const TfLiteNode* node, int index) {
return GetInput(context, node, index);
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteTensor* GetTemporary(TfLiteContext* context, const TfLiteNode* node,
int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->temporaries->size, node->temporaries->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetTemporarySafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
context, index, node->temporaries->size,
node->temporaries->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
const TfLiteTensor* GetIntermediates(TfLiteContext* context,
const TfLiteNode* node, int index) {
const int tensor_index = ValidateTensorIndexing(
context, index, node->intermediates->size, node->intermediates->data);
if (tensor_index < 0) {
return nullptr;
}
return GetTensorAtIndex(context, tensor_index);
}
TfLiteStatus GetIntermediatesSafe(const TfLiteContext* context,
const TfLiteNode* node, int index,
TfLiteTensor** tensor) {
int tensor_index;
TF_LITE_ENSURE_OK(context, ValidateTensorIndexingSafe(
context, index, node->intermediates->size,
node->intermediates->data, &tensor_index));
*tensor = GetTensorAtIndex(context, tensor_index);
return kTfLiteOk;
}
#endif
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift) {
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
return PopulateConvolutionQuantizationParams(
context, input, filter, bias, output, activation, multiplier, shift,
output_activation_min, output_activation_max, per_channel_multiplier,
per_channel_shift, affine_quantization->scale->size);
}
TfLiteStatus PopulateConvolutionQuantizationParams(
TfLiteContext* context, const TfLiteTensor* input,
const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
int32_t* output_activation_min, int32_t* output_activation_max,
int32_t* per_channel_multiplier, int32_t* per_channel_shift,
int num_channels) {
TF_LITE_ENSURE_EQ(context, input->quantization.type,
kTfLiteAffineQuantization);
TF_LITE_ENSURE_EQ(context, filter->quantization.type,
kTfLiteAffineQuantization);
const auto* affine_quantization =
reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params);
TF_LITE_ENSURE(context, affine_quantization);
TF_LITE_ENSURE(context, affine_quantization->scale);
const bool is_per_channel = affine_quantization->scale->size > 1;
if (is_per_channel) {
TF_LITE_ENSURE(context,
input->type == kTfLiteInt8 || input->type == kTfLiteInt16);
TF_LITE_ENSURE(context,
filter->type == kTfLiteInt8 || filter->type == kTfLiteInt4);
TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels);
TF_LITE_ENSURE_EQ(
context, num_channels,
filter->dims->data[affine_quantization->quantized_dimension]);
}
const float input_scale = input->params.scale;
const float output_scale = output->params.scale;
const float* filter_scales = affine_quantization->scale->data;
for (int i = 0; i < num_channels; ++i) {
const float scale = is_per_channel ? filter_scales[i] : filter_scales[0];
const double filter_scale = static_cast<double>(scale);
const double effective_output_scale = static_cast<double>(input_scale) *
filter_scale /
static_cast<double>(output_scale);
int32_t significand;
int channel_shift;
QuantizeMultiplier(effective_output_scale, &significand, &channel_shift);
per_channel_multiplier[i] = significand;
per_channel_shift[i] = channel_shift;
}
if (input->type == kTfLiteUInt8) {
double real_multiplier = 0.0;
TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler(
context, input, filter, bias, output, &real_multiplier));
int exponent;
QuantizeMultiplier(real_multiplier, multiplier, &exponent);
*shift = -exponent;
}
if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 ||
input->type == kTfLiteInt16) {
TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized(
context, activation, output, output_activation_min,
output_activation_max));
}
return kTfLiteOk;
}
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
const TfLiteTensor* bias,
TfLiteTensor* output,
double* multiplier) {
const double input_product_scale = static_cast<double>(input->params.scale) *
static_cast<double>(filter->params.scale);
if (bias) {
const double bias_scale = static_cast<double>(bias->params.scale);
const double scale_diff = std::abs(input_product_scale - bias_scale);
const double output_scale = static_cast<double>(output->params.scale);
TF_LITE_ENSURE(context, scale_diff / output_scale <= 0.02);
}
return GetQuantizedConvolutionMultipler(context, input, filter, output,
multiplier);
}
TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
const TfLiteTensor* input,
const TfLiteTensor* filter,
TfLiteTensor* output,
double* multiplier) {
const double input_product_scale =
static_cast<double>(input->params.scale * filter->params.scale);
TF_LITE_ENSURE(context, input_product_scale >= 0);
*multiplier = input_product_scale / static_cast<double>(output->params.scale);
return kTfLiteOk;
}
namespace {
inline TfLiteStatus Quantize(TfLiteContext* context, float scale,
int32_t zero_point, float f, int32_t& q) {
const float tmp = TfLiteRound(f / scale);
const bool no_integer_overflow_from_quantization =
(tmp >= static_cast<float>(std::numeric_limits<int32_t>::min()) &&
tmp <= static_cast<float>(std::numeric_limits<int32_t>::max()));
TF_LITE_ENSURE(context, no_integer_overflow_from_quantization);
q = zero_point + static_cast<int32_t>(tmp);
return kTfLiteOk;
}
TfLiteStatus CalculateActivationRangeQuantizedImpl(
TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin,
int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) {
const auto scale = output->params.scale;
const auto zero_point = output->params.zero_point;
int32_t tmp_q;
if (activation == kTfLiteActRelu) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 0.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
*act_max = qmax;
} else if (activation == kTfLiteActRelu6) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 0.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 6.0, tmp_q));
*act_max = std::min(qmax, tmp_q);
} else if (activation == kTfLiteActReluN1To1) {
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, -1.0, tmp_q));
*act_min = std::max(qmin, tmp_q);
TF_LITE_ENSURE_OK(context,
Quantize(context, scale, zero_point, 1.0, tmp_q));
*act_max = std::min(qmax, tmp_q);
} else {
*act_min = qmin;
*act_max = qmax;
}
return kTfLiteOk;
}
}
TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
TfLiteFusedActivation activation,
TfLiteTensor* output,
int32_t* act_min,
int32_t* act_max) {
int32_t qmin = 0;
int32_t qmax = 0;
if (output->type == kTfLiteUInt8) {
qmin = std::numeric_limits<uint8_t>::min();
qmax = std::numeric_limits<uint8_t>::max();
} else if (output->type == kTfLiteInt8) {
qmin = std::numeric_limits<int8_t>::min();
qmax = std::numeric_limits<int8_t>::max();
} else if (output->type == kTfLiteInt16) {
qmin = std::numeric_limits<int16_t>::min();
qmax = std::numeric_limits<int16_t>::max();
} else {
TF_LITE_ENSURE(context, false);
}
return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax,
output, act_min, act_max);
}
bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) {
return TfLiteIntArrayEqual(input1->dims, input2->dims);
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteIntArray** output_shape) {
if (NumDimensions(input) != 1) {
TF_LITE_KERNEL_LOG(const_cast<TfLiteContext*>(context),
"Invalid %dD input tensor (must be a 1D tensor).",
NumDimensions(input));
return kTfLiteError;
}
const int output_dims = SizeOfDimension(input, 0);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(output_dims));
for (int i = 0; i < output_dims; i++) {
shape->data[i] = input->data.i32[i];
}
*output_shape = shape.release();
return kTfLiteOk;
}
std::string GetShapeDebugString(const TfLiteIntArray* shape) {
std::string str;
for (int d = 0; d < shape->size; ++d) {
if (str.empty())
str = "[" + std::to_string(shape->data[d]);
else
str += "," + std::to_string(shape->data[d]);
}
if (str.empty()) {
str = "[]";
} else {
str += "]";
}
return str;
}
std::string GetTensorDebugString(const TfLiteTensor* tensor) {
return std::string("{\n type: ") + TfLiteTypeGetName(tensor->type) +
"\n data: {...}\n dims: " + GetShapeDebugString(tensor->dims) +
"\n}";
}
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
TfLiteIntArray** output_shape) {
const int dims1 = NumDimensions(input1);
const int dims2 = NumDimensions(input2);
const int out_dims = std::max(dims1, dims2);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(out_dims));
for (int i = 0; i < out_dims; ++i) {
const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
if (!(d1 == d2 || d1 == 1 || d2 == 1)) {
TF_LITE_KERNEL_LOG(context,
"Given shapes, %s and %s, are not broadcastable.",
GetShapeDebugString(input1->dims).c_str(),
GetShapeDebugString(input2->dims).c_str());
return kTfLiteError;
}
if (d1 == 0 || d2 == 0) {
shape->data[out_dims - i - 1] = 0;
} else {
shape->data[out_dims - i - 1] = std::max(d1, d2);
}
}
*output_shape = shape.release();
return kTfLiteOk;
}
TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
const TfLiteTensor* input1,
const TfLiteTensor* input2,
const TfLiteTensor* input3,
TfLiteIntArray** output_shape) {
const int dims1 = NumDimensions(input1);
const int dims2 = NumDimensions(input2);
const int dims3 = NumDimensions(input3);
const int out_dims = std::max(std::max(dims1, dims2), dims3);
IntArrayUniquePtr shape(TfLiteIntArrayCreate(out_dims));
for (int i = 0; i < out_dims; ++i) {
const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1);
const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1);
const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1);
const int min_value = std::min(std::min(d1, d2), d3);
int max_value = std::max(std::max(d1, d2), d3);
if (min_value == 0) max_value = 0;
if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) ||
!(d3 == 1 || d3 == max_value)) {
TF_LITE_KERNEL_LOG(context,
"Given shapes, %s, %s and %s, are not broadcastable.",
GetShapeDebugString(input1->dims).c_str(),
GetShapeDebugString(input2->dims).c_str(),
GetShapeDebugString(input3->dims).c_str());
return kTfLiteError;
}
shape->data[out_dims - i - 1] = max_value;
}
*output_shape = shape.release();
return kTfLiteOk;
}
#endif
int TfLiteTypeGetSize(TfLiteType type) {
switch (type) {
case kTfLiteUInt8:
static_assert(sizeof(uint8_t) == 1, "");
return 1;
case kTfLiteInt8:
static_assert(sizeof(int8_t) == 1, "");
return 1;
case kTfLiteBool:
return sizeof(bool);
case kTfLiteUInt16:
static_assert(sizeof(uint16_t) == 2, "");
return 2;
case kTfLiteInt16:
static_assert(sizeof(int16_t) == 2, "");
return 2;
case kTfLiteFloat16:
static_assert(sizeof(int16_t) == 2, "");
return 2;
case kTfLiteFloat32:
static_assert(sizeof(float) == 4, "");
return 4;
case kTfLiteInt32:
static_assert(sizeof(int32_t) == 4, "");
return 4;
case kTfLiteUInt32:
static_assert(sizeof(uint32_t) == 4, "");
return 4;
case kTfLiteInt64:
static_assert(sizeof(int64_t) == 8, "");
return 8;
case kTfLiteUInt64:
static_assert(sizeof(uint64_t) == 8, "");
return 8;
case kTfLiteFloat64:
static_assert(sizeof(double) == 8, "");
return 8;
case kTfLiteComplex64:
static_assert(sizeof(std::complex<float>) == 8, "");
return 8;
case kTfLiteComplex128:
static_assert(sizeof(std::complex<double>) == 16, "");
return 16;
default:
return 0;
}
}
bool IsMobilePlatform() {
#if defined(ANDROID) || defined(__ANDROID__)
return true;
#elif defined(__APPLE__) && (TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE)
return true;
#else
return false;
#endif
}
bool HasUnspecifiedDimension(const TfLiteTensor* tensor) {
#ifndef TF_LITE_STATIC_MEMORY
if (tensor->dims_signature) {
for (int i : TfLiteIntArrayView(tensor->dims_signature)) {
if (i == -1) return true;
}
}
#endif
return false;
}
} | #include "tensorflow/lite/kernels/kernel_util.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/match.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
struct TestContext : public TfLiteContext {
string error;
};
void ReportError(TfLiteContext* context, const char* format, ...) {
TestContext* c = static_cast<TestContext*>(context);
const size_t kBufferSize = 1024;
char temp_buffer[kBufferSize];
va_list args;
va_start(args, format);
vsnprintf(temp_buffer, kBufferSize, format, args);
va_end(args);
c->error = temp_buffer;
}
class TestWithTfLiteContext : public ::testing::Test {
public:
TestWithTfLiteContext() { context_.ReportError = ReportError; }
TensorUniquePtr BuildTfLiteTensorForTest(std::initializer_list<int> dims) {
return BuildTfLiteTensor(kTfLiteInt32, dims, kTfLiteDynamic);
}
protected:
TestContext context_;
};
class HaveSameShapeTest : public TestWithTfLiteContext {};
TEST_F(HaveSameShapeTest, NullPointerIsSameShape) {
TensorUniquePtr t1 = BuildTfLiteTensor();
t1->dims = nullptr;
TensorUniquePtr t2 = BuildTfLiteTensor();
t2->dims = nullptr;
EXPECT_TRUE(HaveSameShapes(t1.get(), t2.get()));
}
TEST_F(HaveSameShapeTest, NotSameShapeFalse) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({2, 3});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3});
EXPECT_FALSE(HaveSameShapes(t1.get(), t2.get()));
}
TEST_F(HaveSameShapeTest, EmptyShapeEqualTrue) {
TensorUniquePtr t1 = BuildTfLiteTensor();
TensorUniquePtr t2 = BuildTfLiteTensor();
EXPECT_TRUE(HaveSameShapes(t1.get(), t2.get()));
}
class BroadcastShapeTest : public TestWithTfLiteContext {};
TEST_F(BroadcastShapeTest, IncompatibleDimNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* output = nullptr;
EXPECT_NE(kTfLiteOk,
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &output));
EXPECT_EQ(output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,2] and [1,3], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, IncompatibleDimWithZeroNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 0});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* output = nullptr;
EXPECT_NE(kTfLiteOk,
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &output));
EXPECT_EQ(output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,0] and [1,3], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, BroadCastSecondDimension) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* raw_output;
auto status =
CalculateShapeForBroadcast(&context_, t1.get(), t2.get(), &raw_output);
ASSERT_EQ(kTfLiteOk, status);
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 3}));
}
TEST_F(BroadcastShapeTest, ScalarAnd2dBroadcastsTo2d) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2}));
}
TEST_F(BroadcastShapeTest, DifferentRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 2});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 1, 2}));
}
TEST_F(BroadcastShapeTest, ZeroDimDifferentRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 0, 2});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST_F(BroadcastShapeTest, ZeroDimSameRankBroadcastsToHigherRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 0, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
&raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST_F(BroadcastShapeTest, IncompatibleDimOnThreeTensorsNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 4});
TfLiteIntArray* raw_output = nullptr;
EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
EXPECT_EQ(raw_output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,2], [1,3] and [1,4], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, IncompatibleDimWithZeroOnThreeTensorsNullptr) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 0});
TfLiteIntArray* raw_output = nullptr;
EXPECT_NE(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
EXPECT_EQ(raw_output, nullptr);
EXPECT_EQ(context_.error,
"Given shapes, [1,1], [1,3] and [1,0], are not broadcastable.");
}
TEST_F(BroadcastShapeTest, ThreeTensorsBroadcastToLarger2ndDim) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 3});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 3}));
}
TEST_F(BroadcastShapeTest, TwoScalarsBroadcastTo2d) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2}));
}
TEST_F(BroadcastShapeTest, DifferentSizesOnThreeTensorsBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({3, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 3, 2}));
}
TEST_F(BroadcastShapeTest,
DifferentSizesOnThreeTensors4dBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({3, 4});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({1, 3, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({1, 2, 1, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({1, 2, 3, 4}));
}
TEST_F(BroadcastShapeTest, ZeroOnThreeTensorsBroadcastToLargerRank) {
TensorUniquePtr t1 = BuildTfLiteTensorForTest({1, 2});
TensorUniquePtr t2 = BuildTfLiteTensorForTest({3, 1, 1});
TensorUniquePtr t3 = BuildTfLiteTensorForTest({0, 1});
TfLiteIntArray* raw_output;
EXPECT_EQ(kTfLiteOk, CalculateShapeForBroadcast(&context_, t1.get(), t2.get(),
t3.get(), &raw_output));
IntArrayUniquePtr output(raw_output);
EXPECT_THAT(output.get(), DimsAre({3, 0, 2}));
}
TEST(GetShapeDebugStringTest, GetShapeDebugString) {
IntArrayUniquePtr dims0 = BuildTfLiteArray({});
EXPECT_EQ("[]", GetShapeDebugString(dims0.get()));
IntArrayUniquePtr dims1 = BuildTfLiteArray({1});
dims1->data[0] = 1;
EXPECT_EQ("[1]", GetShapeDebugString(dims1.get()));
IntArrayUniquePtr dims2 = BuildTfLiteArray({2, 3});
dims2->data[0] = 2;
dims2->data[1] = 3;
EXPECT_EQ("[2,3]", GetShapeDebugString(dims2.get()));
IntArrayUniquePtr dims3 = BuildTfLiteArray({4, 5, 6});
dims3->data[0] = 4;
dims3->data[1] = 5;
dims3->data[2] = 6;
EXPECT_EQ("[4,5,6]", GetShapeDebugString(dims3.get()));
}
class QuantizationParamsTest : public TestWithTfLiteContext {};
TEST_F(QuantizationParamsTest, PerChannelConvolution) {
TensorUniquePtr input = BuildTfLiteTensor();
input->type = kTfLiteInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {0.5, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 0.5;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
TensorUniquePtr filter = BuildTfLiteTensor();
filter->type = kTfLiteInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {0.25, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(3);
filter_params->scale->data[0] = 0.25;
filter_params->scale->data[1] = 0.125;
filter_params->scale->data[2] = 0.25;
filter_params->zero_point = TfLiteIntArrayCreate(3);
filter_params->zero_point->data[0] = 0;
filter_params->zero_point->data[1] = 0;
filter_params->zero_point->data[2] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
TensorUniquePtr bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {0.125, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = 0.125;
bias_params->scale->data[1] = 0.0625;
bias_params->scale->data[2] = 0.125;
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
TensorUniquePtr output = BuildTfLiteTensor();
output->type = kTfLiteInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {0.5, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 0.5;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int32_t> per_channel_shift(3);
auto status = PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data());
EXPECT_EQ(kTfLiteOk, status);
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-1, -2, -1));
}
TEST_F(QuantizationParamsTest, CheckAndPopulateShift) {
TensorUniquePtr input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {0.5, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 0.5;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
TensorUniquePtr filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {0.25, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
filter_params->scale->data[0] = 0.25;
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
TensorUniquePtr bias = BuildTfLiteTensor();
bias->type = kTfLiteUInt8;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {0.125, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = 0.125;
bias_params->scale->data[1] = 0.0625;
bias_params->scale->data[2] = 0.125;
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
TensorUniquePtr output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {0.5, 128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 0.5;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = 128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-1, -1, -1));
EXPECT_EQ(shift, 1);
EXPECT_EQ(multiplier, 1073741824);
}
#ifndef __APPLE__
TEST_F(QuantizationParamsTest, CheckAndPopulateZeroValue) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(3);
filter_params->scale->data[0] = std::ldexp(1.0f, -31);
filter_params->scale->data[1] = std::ldexp(1.0f, -32);
filter_params->scale->data[2] = std::ldexp(1.0f, -33);
filter_params->zero_point = TfLiteIntArrayCreate(3);
filter_params->zero_point->data[0] = 0;
filter_params->zero_point->data[1] = 0;
filter_params->zero_point->data[2] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(3);
bias_params->scale->data[0] = std::ldexp(1.0f, -31);
bias_params->scale->data[1] = std::ldexp(1.0f, -32);
bias_params->scale->data[2] = std::ldexp(1.0f, -33);
bias_params->zero_point = TfLiteIntArrayCreate(3);
bias_params->zero_point->data[0] = 11;
bias_params->zero_point->data[1] = 12;
bias_params->zero_point->data[2] = 15;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier, ElementsAre(1073741824, 1073741824, 0));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -31, 0));
}
#endif
TEST_F(QuantizationParamsTest, CheckAndPopulateUint8) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
int32_t two_pow_neg_31 = 0x30000000;
filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto bias = BuildTfLiteTensor();
bias->type = kTfLiteInt32;
bias->allocation_type = kTfLiteArenaRw;
bias->dims = TfLiteIntArrayCreate(4);
TfLiteQuantizationParams bias_quant = {4.6566129e-10, 9};
bias->params = bias_quant;
bias->quantization.type = kTfLiteAffineQuantization;
auto* bias_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
bias_params->scale = TfLiteFloatArrayCreate(1);
bias_params->scale->data[0] = 4.6566129e-10;
bias_params->zero_point = TfLiteIntArrayCreate(1);
bias_params->zero_point->data[0] = 11;
bias->quantization.params = reinterpret_cast<void*>(bias_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), bias.get(), output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
}
TEST_F(QuantizationParamsTest, CheckAndPopulateWithoutBias) {
auto input = BuildTfLiteTensor();
input->type = kTfLiteUInt8;
input->allocation_type = kTfLiteArenaRw;
input->dims = TfLiteIntArrayCreate(1);
input->dims->data[0] = 2;
TfLiteQuantizationParams input_quant = {1, 5};
input->params = input_quant;
input->quantization.type = kTfLiteAffineQuantization;
auto* input_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
input_params->scale = TfLiteFloatArrayCreate(1);
input_params->scale->data[0] = 1;
input_params->zero_point = TfLiteIntArrayCreate(1);
input_params->zero_point->data[0] = 5;
input->quantization.params = reinterpret_cast<void*>(input_params);
auto filter = BuildTfLiteTensor();
filter->type = kTfLiteUInt8;
filter->allocation_type = kTfLiteArenaRw;
filter->dims = TfLiteIntArrayCreate(4);
filter->dims->data[0] = 3;
filter->dims->data[1] = 4;
filter->dims->data[2] = 5;
filter->dims->data[3] = 6;
TfLiteQuantizationParams filter_quant = {4.6566129e-10, 0};
filter->params = filter_quant;
filter->quantization.type = kTfLiteAffineQuantization;
auto* filter_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
filter_params->scale = TfLiteFloatArrayCreate(1);
int32_t two_pow_neg_31 = 0x30000000;
filter_params->scale->data[0] = *reinterpret_cast<float*>(&two_pow_neg_31);
filter_params->zero_point = TfLiteIntArrayCreate(1);
filter_params->zero_point->data[0] = 0;
filter_params->quantized_dimension = 0;
filter->quantization.params = reinterpret_cast<void*>(filter_params);
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t multiplier;
int shift;
int32_t output_activation_min;
int32_t output_activation_max;
std::vector<int32_t> per_channel_multiplier(3);
std::vector<int> per_channel_shift(3);
EXPECT_EQ(kTfLiteOk,
PopulateConvolutionQuantizationParams(
&context_, input.get(), filter.get(), nullptr, output.get(),
kTfLiteActRelu, &multiplier, &shift, &output_activation_min,
&output_activation_max, per_channel_multiplier.data(),
per_channel_shift.data(), 3));
EXPECT_THAT(per_channel_multiplier,
ElementsAre(1073741824, 1073741824, 1073741824));
EXPECT_THAT(per_channel_shift, ElementsAre(-30, -30, -30));
}
TEST_F(QuantizationParamsTest, ActivationRangeQuantizedOverflow) {
auto output = BuildTfLiteTensor();
output->type = kTfLiteUInt8;
output->allocation_type = kTfLiteArenaRw;
output->dims = nullptr;
TfLiteQuantizationParams output_quant = {1e-10, -128};
output->params = output_quant;
output->quantization.type = kTfLiteAffineQuantization;
auto* output_params = reinterpret_cast<TfLiteAffineQuantization*>(
malloc(sizeof(TfLiteAffineQuantization)));
output_params->scale = TfLiteFloatArrayCreate(1);
output_params->scale->data[0] = 1;
output_params->zero_point = TfLiteIntArrayCreate(1);
output_params->zero_point->data[0] = -128;
output->quantization.params = reinterpret_cast<void*>(output_params);
int32_t act_min, act_max;
ASSERT_EQ(kTfLiteOk,
CalculateActivationRangeQuantized(
&context_, kTfLiteActRelu, output.get(), &act_min, &act_max));
ASSERT_NE(kTfLiteOk,
CalculateActivationRangeQuantized(
&context_, kTfLiteActRelu6, output.get(), &act_min, &act_max));
EXPECT_TRUE(absl::StrContains(
context_.error, "no_integer_overflow_from_quantization was not true"));
ASSERT_NE(kTfLiteOk, CalculateActivationRangeQuantized(
&context_, kTfLiteActReluN1To1, output.get(),
&act_min, &act_max));
EXPECT_TRUE(absl::StrContains(
context_.error, "no_integer_overflow_from_quantization was not true"));
}
TEST_F(QuantizationParamsTest, IsMobilePlatform) {
#if defined(__ANDROID__)
EXPECT_TRUE(IsMobilePlatform());
#elif defined(__linux__)
EXPECT_FALSE(IsMobilePlatform());
#elif defined(_WIN32)
EXPECT_FALSE(IsMobilePlatform());
#endif
}
TEST(HasUnspecifiedDimensions, ReturnsTrueIfADimIsMinusOne) {
auto tensor = BuildTfLiteTensor(kTfLiteInt32, {1, 1, 3}, kTfLiteDynamic);
tensor->dims_signature = ConvertVectorToTfLiteIntArray({1, -1, 3});
EXPECT_TRUE(HasUnspecifiedDimension(tensor.get()));
}
TEST(HasUnspecifiedDimensions, ReturnsFalseIfAllPostiveDims) {
auto tensor = BuildTfLiteTensor(kTfLiteInt32, {1, 1, 3}, kTfLiteDynamic);
tensor->dims_signature = ConvertVectorToTfLiteIntArray({1, 1, 3});
EXPECT_FALSE(HasUnspecifiedDimension(tensor.get()));
}
class SetTensorAllocationTypeTest : public testing::Test {
public:
SetTensorAllocationTypeTest() {
tensor_->type = kTfLiteInt32;
tensor_->allocation_type = kTfLiteDynamic;
}
protected:
Interpreter interpreter_;
TfLiteContext& context_ = *interpreter_.primary_subgraph().context();
IntArrayUniquePtr dims_ = BuildTfLiteArray({2, 3, 4});
TensorUniquePtr tensor_ = BuildTfLiteTensor();
};
TEST_F(SetTensorAllocationTypeTest,
SetUnallocatedDynamicTensorToDynamicIsANoop) {
tensor_->allocation_type = kTfLiteDynamic;
SetTensorToDynamic(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLiteDynamic);
}
TEST_F(SetTensorAllocationTypeTest, SetAllocatedDynamicTensorToDynamicIsANoop) {
tensor_->allocation_type = kTfLiteDynamic;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
const void* const original_data = tensor_->data.data;
SetTensorToDynamic(tensor_.get());
EXPECT_EQ(tensor_->data.data, original_data);
EXPECT_EQ(tensor_->allocation_type, kTfLiteDynamic);
}
TEST_F(SetTensorAllocationTypeTest,
SetAllocatedPersistentRoTensorToDynamicFreesExistingTensorData) {
tensor_->allocation_type = kTfLitePersistentRo;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
SetTensorToDynamic(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLiteDynamic);
}
TEST_F(SetTensorAllocationTypeTest,
SetUnallocatedPersistentRoTensorToPersistentRoIsANoop) {
tensor_->allocation_type = kTfLitePersistentRo;
SetTensorToPersistentRo(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLitePersistentRo);
}
TEST_F(SetTensorAllocationTypeTest,
SetAllocatedPersistentRoTensorToPersistentRoIsANoop) {
tensor_->allocation_type = kTfLitePersistentRo;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
const void* const original_data = tensor_->data.data;
SetTensorToPersistentRo(tensor_.get());
EXPECT_EQ(tensor_->data.data, original_data);
EXPECT_EQ(tensor_->allocation_type, kTfLitePersistentRo);
}
TEST_F(SetTensorAllocationTypeTest,
SetAllocatedDynamicTensorToPersistentRoFreesExistingTensorData) {
tensor_->allocation_type = kTfLiteDynamic;
ASSERT_EQ(context_.ResizeTensor(&context_, tensor_.get(), dims_.release()),
kTfLiteOk);
SetTensorToPersistentRo(tensor_.get());
EXPECT_EQ(tensor_->data.data, nullptr);
EXPECT_EQ(tensor_->allocation_type, kTfLitePersistentRo);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/kernel_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/kernel_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e400d552-8f2c-4626-b7d9-d0f1ab1d67fe | cpp | google/cel-cpp | trivial_legacy_type_info | eval/public/structs/trivial_legacy_type_info.h | eval/public/structs/trivial_legacy_type_info_test.cc | #ifndef THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_STRUCTS_TRIVIAL_LEGACY_TYPE_INFO_H_
#define THIRD_PARTY_CEL_CPP_EVAL_PUBLIC_STRUCTS_TRIVIAL_LEGACY_TYPE_INFO_H_
#include <string>
#include "absl/base/no_destructor.h"
#include "absl/strings/string_view.h"
#include "eval/public/message_wrapper.h"
#include "eval/public/structs/legacy_type_info_apis.h"
namespace google::api::expr::runtime {
class TrivialTypeInfo : public LegacyTypeInfoApis {
public:
absl::string_view GetTypename(const MessageWrapper& wrapper) const override {
return "opaque";
}
std::string DebugString(const MessageWrapper& wrapper) const override {
return "opaque";
}
const LegacyTypeAccessApis* GetAccessApis(
const MessageWrapper& wrapper) const override {
return nullptr;
}
static const TrivialTypeInfo* GetInstance() {
static absl::NoDestructor<TrivialTypeInfo> kInstance;
return &*kInstance;
}
};
}
#endif | #include "eval/public/structs/trivial_legacy_type_info.h"
#include "eval/public/message_wrapper.h"
#include "internal/testing.h"
namespace google::api::expr::runtime {
namespace {
TEST(TrivialTypeInfo, GetTypename) {
TrivialTypeInfo info;
MessageWrapper wrapper;
EXPECT_EQ(info.GetTypename(wrapper), "opaque");
EXPECT_EQ(TrivialTypeInfo::GetInstance()->GetTypename(wrapper), "opaque");
}
TEST(TrivialTypeInfo, DebugString) {
TrivialTypeInfo info;
MessageWrapper wrapper;
EXPECT_EQ(info.DebugString(wrapper), "opaque");
EXPECT_EQ(TrivialTypeInfo::GetInstance()->DebugString(wrapper), "opaque");
}
TEST(TrivialTypeInfo, GetAccessApis) {
TrivialTypeInfo info;
MessageWrapper wrapper;
EXPECT_EQ(info.GetAccessApis(wrapper), nullptr);
EXPECT_EQ(TrivialTypeInfo::GetInstance()->GetAccessApis(wrapper), nullptr);
}
TEST(TrivialTypeInfo, GetMutationApis) {
TrivialTypeInfo info;
MessageWrapper wrapper;
EXPECT_EQ(info.GetMutationApis(wrapper), nullptr);
EXPECT_EQ(TrivialTypeInfo::GetInstance()->GetMutationApis(wrapper), nullptr);
}
TEST(TrivialTypeInfo, FindFieldByName) {
TrivialTypeInfo info;
MessageWrapper wrapper;
EXPECT_EQ(info.FindFieldByName("foo"), absl::nullopt);
EXPECT_EQ(TrivialTypeInfo::GetInstance()->FindFieldByName("foo"),
absl::nullopt);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/trivial_legacy_type_info.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/structs/trivial_legacy_type_info_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
49d9476b-8f73-4dda-9115-ac8a5af6998f | cpp | tensorflow/tensorflow | hlo_slicer | third_party/xla/xla/tools/hlo_slicer.cc | third_party/xla/xla/tools/hlo_slicer_test.cc | #include "xla/tools/hlo_slicer.h"
#include <deque>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tools/hlo_extractor.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
void ReduceTupleParameterHelper(HloModule* hlo_module,
HloInstruction* tuple_parameter) {
for (HloInstruction* user_inst : tuple_parameter->users()) {
if (user_inst->opcode() != HloOpcode::kGetTupleElement) {
return;
}
}
VLOG(1) << "Parameter instruction to be reduced: "
<< tuple_parameter->ToString()
<< " shape size: " << tuple_parameter->shape().tuple_shapes_size()
<< " users size: " << tuple_parameter->users().size();
std::vector<Shape> used_shapes;
for (HloInstruction* user_inst : tuple_parameter->users()) {
used_shapes.push_back(user_inst->shape());
}
Shape new_tuple_shape =
ShapeUtil::MakeTupleShape(absl::MakeSpan(used_shapes));
tuple_parameter->mutable_shape()->mutable_tuple_shapes()->clear();
for (const auto& shape : used_shapes) {
tuple_parameter->mutable_shape()->mutable_tuple_shapes()->push_back(shape);
}
for (int i = 0; i < tuple_parameter->users().size(); ++i) {
tuple_parameter->users()[i]->set_tuple_index(i);
}
hlo_module->mutable_config().SetComputationLayoutIfExists(
hlo_module->entry_computation()->ComputeProgramShape());
}
void ReduceTupleParameter(HloModule* hlo_module) {
std::vector<HloInstruction*> tuple_parameters;
for (HloInstruction* parameter :
hlo_module->entry_computation()->parameter_instructions()) {
if (parameter->shape().IsTuple()) {
tuple_parameters.push_back(parameter);
}
}
for (HloInstruction* tuple_parameter : tuple_parameters) {
ReduceTupleParameterHelper(hlo_module, tuple_parameter);
}
}
HloInstruction* FindShardingInstruction(HloModule* hlo_module) {
for (HloComputation* computation : hlo_module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCustomCall &&
instruction->custom_call_target() == "Sharding") {
CHECK_EQ(instruction->operand_count(), 1);
return instruction;
}
}
}
return nullptr;
}
void RemoveSharding(HloModule* hlo_module) {
while (HloInstruction* custom_call_instruction =
FindShardingInstruction(hlo_module)) {
for (HloInstruction* user_instruction : custom_call_instruction->users()) {
CHECK_OK(custom_call_instruction->ReplaceUseWith(
user_instruction, custom_call_instruction->mutable_operand(0)));
}
custom_call_instruction->DetachFromOperandsAndUsers();
CHECK_OK(custom_call_instruction->parent()->RemoveInstruction(
custom_call_instruction));
VLOG(1) << "Removed sharding custom-call: "
<< custom_call_instruction->ToString();
HloVerifier verifier(false,
true);
TF_CHECK_OK(verifier.Run(hlo_module).status());
}
}
void IntraComputationSlicing(
const HloComputation* computation,
absl::flat_hash_set<const HloInstruction*>& sliced_instructions,
absl::flat_hash_set<const HloInstruction*>& frontier_instructions,
bool forward_slice, FrontierSelector frontier_selector,
bool ignore_control_dependency) {
std::deque<const HloInstruction*> worklist(sliced_instructions.begin(),
sliced_instructions.end());
while (!worklist.empty()) {
const HloInstruction* inst = worklist.back();
worklist.pop_back();
if (frontier_selector && !frontier_selector(inst)) {
frontier_instructions.insert(inst);
continue;
}
std::vector<HloInstruction*> instructions_to_propagate =
forward_slice ? std::vector<HloInstruction*>(inst->users().begin(),
inst->users().end())
: std::vector<HloInstruction*>(inst->operands().begin(),
inst->operands().end());
if (!ignore_control_dependency) {
if (forward_slice) {
instructions_to_propagate.insert(instructions_to_propagate.end(),
inst->control_successors().begin(),
inst->control_successors().end());
} else {
instructions_to_propagate.insert(instructions_to_propagate.end(),
inst->control_predecessors().begin(),
inst->control_predecessors().end());
}
}
for (auto next_inst : instructions_to_propagate) {
if (!sliced_instructions.contains(next_inst)) {
worklist.push_front(next_inst);
sliced_instructions.insert(next_inst);
}
}
}
}
SliceOutput SliceModuleHelper(
const HloModule* hlo_module,
absl::Span<const HloInstruction*> slice_starting_instructions,
FrontierSelector frontier_selector, bool ignore_control_dependency,
bool forward_slice, bool nearest_common_ancestor_as_root) {
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloInstruction*>>
sliced_computation_instructions_map;
for (auto inst : slice_starting_instructions) {
sliced_computation_instructions_map[inst->parent()].insert(inst);
}
absl::flat_hash_map<const HloComputation*,
absl::flat_hash_set<const HloInstruction*>>
frontier_computation_instructions_map;
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(hlo_module);
std::vector<HloComputation*> post_order_computations =
hlo_module->MakeComputationPostOrder();
std::vector<HloComputation*> computations_to_traverse =
forward_slice
? post_order_computations
: std::vector<HloComputation*>(post_order_computations.rbegin(),
post_order_computations.rend());
absl::flat_hash_set<const HloComputation*>
nearest_common_ancestor_computations;
if (nearest_common_ancestor_as_root) {
std::vector<const HloComputation*> starting_computations;
for (const auto& [computation, instructions] :
sliced_computation_instructions_map) {
starting_computations.push_back(computation);
}
nearest_common_ancestor_computations =
call_graph->NearestCommonAncestorComputations(starting_computations);
CHECK(!nearest_common_ancestor_computations.empty());
}
for (auto computation : computations_to_traverse) {
if (sliced_computation_instructions_map.contains(computation)) {
auto slicing_starting_instructions = std::vector<const HloInstruction*>(
sliced_computation_instructions_map[computation].begin(),
sliced_computation_instructions_map[computation].end());
IntraComputationSlicing(
computation, sliced_computation_instructions_map[computation],
frontier_computation_instructions_map[computation], forward_slice,
frontier_selector, ignore_control_dependency);
if (forward_slice) {
if (nearest_common_ancestor_as_root &&
nearest_common_ancestor_computations.contains(computation)) {
const HloInstruction* nearest_common_ancestor_instruction =
*(call_graph->NearestCommonAncestorInstructions(
slicing_starting_instructions))
.begin();
CHECK_NE(nearest_common_ancestor_instruction, nullptr);
return SliceOutput{sliced_computation_instructions_map,
frontier_computation_instructions_map,
nearest_common_ancestor_instruction};
}
if (!sliced_computation_instructions_map[computation].contains(
computation->root_instruction()) ||
frontier_computation_instructions_map[computation].contains(
computation->root_instruction())) {
continue;
}
for (auto caller_inst :
call_graph->GetComputationCallers(computation)) {
sliced_computation_instructions_map[caller_inst->parent()].insert(
caller_inst);
}
}
if (!forward_slice) {
for (const auto& callsite :
call_graph->GetNode(computation).callsites()) {
if (sliced_computation_instructions_map[computation].contains(
callsite.instruction())) {
for (auto callee : callsite.called_computations()) {
sliced_computation_instructions_map[callee].insert(
callee->root_instruction());
}
}
}
}
}
}
return SliceOutput{sliced_computation_instructions_map,
frontier_computation_instructions_map};
}
}
SliceOutput SliceModule(
const HloModule* hlo_module,
absl::Span<const HloInstruction*> slice_starting_instructions,
FrontierSelector frontier_selector, bool ignore_control_dependency,
bool forward_slice, bool nearest_common_ancestor_as_root) {
if (forward_slice) {
if (!nearest_common_ancestor_as_root) {
return SliceModuleHelper(hlo_module, slice_starting_instructions,
frontier_selector, ignore_control_dependency,
true,
false);
} else {
CHECK(forward_slice) << "Option `nearest_common_ancestor_as_root` can "
"only be enabled when "
"forward slicing";
CHECK((frontier_selector == nullptr))
<< "Option `nearest_common_ancestor_as_root` can not be specified "
"with `frontier_selector`";
SliceOutput forward_slice_output =
SliceModuleHelper(hlo_module, slice_starting_instructions,
nullptr,
ignore_control_dependency, true,
true);
std::vector<const HloInstruction*> nearest_common_ancestor(
{forward_slice_output.nearest_common_ancestor_root()});
CHECK_EQ(nearest_common_ancestor.size(), 1);
SliceOutput backward_slice_output =
SliceModuleHelper(hlo_module,
absl::MakeSpan(nearest_common_ancestor),
nullptr,
ignore_control_dependency, false,
false);
return SliceOutput{SliceOutput::IntersectSlicedInstructions(
forward_slice_output, backward_slice_output),
backward_slice_output.frontier_instructions(),
forward_slice_output.nearest_common_ancestor_root()};
}
} else {
return SliceModuleHelper(hlo_module, slice_starting_instructions,
frontier_selector, ignore_control_dependency,
false,
false);
}
}
std::vector<std::unique_ptr<HloModule>> SliceModuleAndExtract(
const HloModule* hlo_module,
absl::Span<const HloInstruction*> slice_starting_instructions,
const SlicingConfiguration& slicing_configuration) {
std::vector<std::unique_ptr<HloModule>> sliced_modules;
int slicing_group = slicing_configuration.slicing_group;
CHECK(slicing_group >= 1 || slicing_group == -1);
std::vector<absl::Span<const HloInstruction*>> grouped_instructions;
if (slicing_group == -1) {
grouped_instructions = {slice_starting_instructions};
} else {
for (int i = 0; i < slice_starting_instructions.size();
i += slicing_group) {
grouped_instructions.push_back(
slice_starting_instructions.subspan(i, slicing_group));
}
}
for (const auto& grouped_slice_starting_instructions : grouped_instructions) {
SliceOutput forward_slice_output;
if (slicing_configuration.forward_slicing ==
SlicingConfiguration::ForwardSlicingConfig::kRoot) {
forward_slice_output = SliceModule(
hlo_module, grouped_slice_starting_instructions,
nullptr,
false, true,
false);
} else if (slicing_configuration.forward_slicing ==
SlicingConfiguration::ForwardSlicingConfig::kNca) {
forward_slice_output = SliceModule(
hlo_module, grouped_slice_starting_instructions,
nullptr,
false, true,
true);
}
VLOG(1) << "[Num of forward sliced insts]: "
<< forward_slice_output.NumSlicedInstructions();
SliceOutput backward_slice_output;
if (slicing_configuration.backward_slicing) {
backward_slice_output = SliceModule(
hlo_module, grouped_slice_starting_instructions,
nullptr,
false, false);
} else {
backward_slice_output = SliceOutput();
}
auto sliced_result = SliceOutput(SliceOutput::UnionSlicedInstructions(
forward_slice_output, backward_slice_output));
const HloInstruction* extraction_root =
slicing_configuration.forward_slicing ==
SlicingConfiguration::ForwardSlicingConfig::kNca
? forward_slice_output.nearest_common_ancestor_root()
: hlo_module->entry_computation()->root_instruction();
VLOG(1) << "[Root instruction of the sliced module]: "
<< extraction_root->ToString();
auto extract_selector = [&sliced_result](const HloInstruction* hlo_inst) {
for (const auto& [computation, instructions] :
sliced_result.sliced_instructions()) {
if (instructions.contains(hlo_inst)) {
return true;
}
}
return false;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(extraction_root, -1,
extract_selector,
replace_type_selector,
true);
if (slicing_configuration.remove_sharding) {
RemoveSharding(extracted_module.get());
}
if (slicing_configuration.reduce_tuple_parameter) {
ReduceTupleParameter(extracted_module.get());
}
HloVerifier verifier(false,
true);
TF_CHECK_OK(verifier.Run(extracted_module.get()).status());
sliced_modules.emplace_back(std::move(extracted_module));
}
CHECK_EQ(sliced_modules.size(), grouped_instructions.size());
return sliced_modules;
}
} | #include "xla/tools/hlo_slicer.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = testing::opcode_matchers;
using HloSlicerTest = HloTestBase;
TEST_F(HloSlicerTest, SingleComputationForwardSlice) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] constant(1)
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto p2 = FindInstruction(hlo_module.get(), "p.2");
EXPECT_THAT(p2, op::Parameter());
auto p3 = FindInstruction(hlo_module.get(), "p.3");
EXPECT_THAT(p3, op::Parameter());
auto x = FindInstruction(hlo_module.get(), "x");
EXPECT_THAT(x, op::Subtract());
auto y = FindInstruction(hlo_module.get(), "y");
EXPECT_THAT(y, op::Multiply());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto add1 = FindInstruction(hlo_module.get(), "add.1");
EXPECT_THAT(add1, op::Add());
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
{
std::vector<const HloInstruction*> relevant_instructions({p2, x});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({add0, p3});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p3));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
}
TEST_F(HloSlicerTest, MultipleComputationForwardSlice) {
const std::string& hlo_string = R"(
HloModule test
calculate_alpha {
constant.5 = s32[] constant(2)
constant.6 = s32[] constant(3)
ROOT ret = s32[] subtract(constant.5, constant.6)
}
While.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get_tuple_element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(23)
add.3 = s32[] add(get_tuple_element.1, constant.1)
get_tuple_element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply = s32[3]{0} multiply(get_tuple_element.2, get_tuple_element.2)
ROOT tuple = (s32[], s32[3]{0}) tuple(add.3, multiply)
}
While.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get_tuple_element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(100)
ROOT less_than = pred[] compare(get_tuple_element.3, constant.2), direction=LT
}
ENTRY Test {
p.1 = s32[] parameter(0)
p.2 = s32[] parameter(1)
add.1 = s32[] add(p.1, p.2)
constant.3 = s32[] call(), to_apply=calculate_alpha
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.1 = (s32[], s32[3]{0}) while(tuple.1), condition=While.condition, body=While.body
loop_count = s32[] get-tuple-element(while.1), index=0
ROOT add.2 = s32[] add(loop_count, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto add1 = FindInstruction(hlo_module.get(), "add.1");
EXPECT_THAT(add1, op::Add());
auto while1 = FindInstruction(hlo_module.get(), "while.1");
EXPECT_THAT(while1, op::While());
auto loop_count = FindInstruction(hlo_module.get(), "loop_count");
EXPECT_THAT(loop_count, op::GetTupleElement());
auto add2 = FindInstruction(hlo_module.get(), "add.2");
EXPECT_THAT(add2, op::Add());
auto gte1 = FindInstruction(hlo_module.get(), "get_tuple_element.1");
EXPECT_THAT(gte1, op::GetTupleElement());
auto gte2 = FindInstruction(hlo_module.get(), "get_tuple_element.2");
EXPECT_THAT(gte2, op::GetTupleElement());
auto constant5 = FindInstruction(hlo_module.get(), "constant.5");
EXPECT_THAT(constant5, op::Constant());
auto tuple1 = FindInstruction(hlo_module.get(), "tuple.1");
EXPECT_THAT(tuple1, op::Tuple());
auto entry_comp = FindComputation(hlo_module.get(), "Test");
EXPECT_NE(entry_comp, nullptr);
auto while_cond_comp = FindComputation(hlo_module.get(), "While.condition");
EXPECT_NE(while_cond_comp, nullptr);
auto while_body_comp = FindComputation(hlo_module.get(), "While.body");
EXPECT_NE(while_body_comp, nullptr);
auto calculate_alpha_comp =
FindComputation(hlo_module.get(), "calculate_alpha");
EXPECT_NE(calculate_alpha_comp, nullptr);
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
{
std::vector<const HloInstruction*> relevant_instructions({add1, while1});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add1));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(while1));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(loop_count));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({constant5});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_FALSE(sliced_instructions[entry_comp].contains(add1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({gte2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_TRUE(sliced_instructions.contains(while_body_comp));
EXPECT_FALSE(sliced_instructions.contains(while_cond_comp));
EXPECT_FALSE(sliced_instructions[entry_comp].contains(tuple1));
EXPECT_FALSE(sliced_instructions[entry_comp].contains(add1));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add2));
EXPECT_FALSE(sliced_instructions[while_body_comp].contains(gte1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
}
TEST_F(HloSlicerTest, SingleComputationForwardFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] constant(1)
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
p.4 = f32[10] parameter(4)
p.5 = f32[10] parameter(5)
sub.1 = f32[10] subtract(p.4, p.5)
add.2 = f32[10] add(p.3, sub.1)
ROOT add.1 = f32[10] add(x, add.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto broadcast = FindInstruction(hlo_module.get(), "broadcast");
EXPECT_THAT(broadcast, op::Broadcast());
auto x = FindInstruction(hlo_module.get(), "x");
EXPECT_THAT(x, op::Subtract());
auto y = FindInstruction(hlo_module.get(), "y");
EXPECT_THAT(y, op::Multiply());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto p5 = FindInstruction(hlo_module.get(), "p.5");
EXPECT_THAT(p5, op::Parameter());
auto sub1 = FindInstruction(hlo_module.get(), "sub.1");
EXPECT_THAT(sub1, op::Subtract());
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kSubtract;
};
std::vector<const HloInstruction*> relevant_instructions({broadcast, add0});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 4);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions[entry_comp].contains(x));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kSubtract;
};
std::vector<const HloInstruction*> relevant_instructions({add0, y, p5});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 5);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(x));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p5));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(sub1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 2);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions[entry_comp].contains(x));
EXPECT_TRUE(frontier_instructions[entry_comp].contains(sub1));
}
}
TEST_F(HloSlicerTest, MultipleComputationForwardFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
c.2 = f32[] add(c.0, c.1)
c.3 = f32[] constant(4)
ROOT ret = f32[] multiply(c.2, c.3)
}
ENTRY axpy_computation {
p.0 = f32[] parameter(0)
alpha = f32[] call(), to_apply=calculate_alpha
ROOT add = f32[] add(p.0, alpha)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto calculate_alpha_comp =
FindComputation(hlo_module.get(), "calculate_alpha");
EXPECT_NE(calculate_alpha_comp, nullptr);
auto ret = FindInstruction(hlo_module.get(), "ret");
EXPECT_THAT(ret, op::Multiply());
auto c2 = FindInstruction(hlo_module.get(), "c.2");
EXPECT_THAT(c2, op::Add());
auto c3 = FindInstruction(hlo_module.get(), "c.3");
EXPECT_THAT(c3, op::Constant());
auto alpha = FindInstruction(hlo_module.get(), "alpha");
EXPECT_THAT(alpha, op::Call());
{
auto hlo_selector = [&ret](const HloInstruction* hlo_inst) -> bool {
return hlo_inst != ret;
};
std::vector<const HloInstruction*> relevant_instructions({c2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 2);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 2);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(calculate_alpha_comp));
EXPECT_TRUE(frontier_instructions[calculate_alpha_comp].contains(ret));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kCall;
};
std::vector<const HloInstruction*> relevant_instructions({c2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 2);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_EQ(sliced_instructions[entry_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(entry_comp));
EXPECT_TRUE(frontier_instructions[entry_comp].contains(alpha));
}
}
TEST_F(HloSlicerTest, SingleComputationBackwardSliceAndFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] constant(1)
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto alpha = FindInstruction(hlo_module.get(), "alpha");
EXPECT_THAT(alpha, op::Constant());
auto p0 = FindInstruction(hlo_module.get(), "p.0");
EXPECT_THAT(p0, op::Parameter());
auto p1 = FindInstruction(hlo_module.get(), "p.1");
EXPECT_THAT(p1, op::Parameter());
auto p2 = FindInstruction(hlo_module.get(), "p.2");
EXPECT_THAT(p2, op::Parameter());
auto p3 = FindInstruction(hlo_module.get(), "p.3");
EXPECT_THAT(p3, op::Parameter());
auto broadcast = FindInstruction(hlo_module.get(), "broadcast");
EXPECT_THAT(broadcast, op::Broadcast());
auto x = FindInstruction(hlo_module.get(), "x");
EXPECT_THAT(x, op::Subtract());
auto y = FindInstruction(hlo_module.get(), "y");
EXPECT_THAT(y, op::Multiply());
auto add0 = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(add0, op::Add());
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
{
std::vector<const HloInstruction*> relevant_instructions({y});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 4);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
std::vector<const HloInstruction*> relevant_instructions({add0, y});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 7);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(add0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p0));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p1));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
auto broadcast_slicer = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kBroadcast;
};
{
std::vector<const HloInstruction*> relevant_instructions({y});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions),
broadcast_slicer,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_EQ(sliced_instructions[entry_comp].size(), 3);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(y));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(p2));
EXPECT_TRUE(sliced_instructions[entry_comp].contains(broadcast));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions[entry_comp].contains(broadcast));
}
}
TEST_F(HloSlicerTest, MultipleComputationBackwardSliceAndFrontier) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
c.2 = f32[] add(c.0, c.1)
c.3 = f32[] constant(4)
ROOT ret = f32[] multiply(c.2, c.3)
}
ENTRY axpy_computation {
p.0 = f32[] parameter(0)
alpha = f32[] call(), to_apply=calculate_alpha
ROOT add = f32[] add(p.0, alpha)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto entry_comp = FindComputation(hlo_module.get(), "axpy_computation");
EXPECT_NE(entry_comp, nullptr);
auto calculate_alpha_comp =
FindComputation(hlo_module.get(), "calculate_alpha");
EXPECT_NE(calculate_alpha_comp, nullptr);
auto ret = FindInstruction(hlo_module.get(), "ret");
EXPECT_THAT(ret, op::Multiply());
auto c0 = FindInstruction(hlo_module.get(), "c.0");
EXPECT_THAT(c0, op::Constant());
auto c1 = FindInstruction(hlo_module.get(), "c.1");
EXPECT_THAT(c1, op::Constant());
auto c2 = FindInstruction(hlo_module.get(), "c.2");
EXPECT_THAT(c2, op::Add());
auto c3 = FindInstruction(hlo_module.get(), "c.3");
EXPECT_THAT(c3, op::Constant());
auto alpha = FindInstruction(hlo_module.get(), "alpha");
EXPECT_THAT(alpha, op::Call());
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
std::vector<const HloInstruction*> relevant_instructions({c2});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 3);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 3);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c0));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c1));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return true;
};
std::vector<const HloInstruction*> relevant_instructions({alpha});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), hlo_selector,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 5);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c0));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c1));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c3));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_EQ(sliced_instructions[entry_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 0);
}
{
auto add_slicer = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kAdd;
};
std::vector<const HloInstruction*> relevant_instructions({ret});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), add_slicer,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 3);
EXPECT_EQ(sliced_instructions.size(), 1);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 3);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c3));
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(c2));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(calculate_alpha_comp));
EXPECT_TRUE(frontier_instructions[calculate_alpha_comp].contains(c2));
}
{
auto mul_slicer = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kMultiply;
};
std::vector<const HloInstruction*> relevant_instructions({alpha});
auto sliced_result = SliceModule(
hlo_module.get(), absl::MakeSpan(relevant_instructions), mul_slicer,
false, false);
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 2);
EXPECT_EQ(sliced_instructions.size(), 2);
EXPECT_TRUE(sliced_instructions.contains(calculate_alpha_comp));
EXPECT_EQ(sliced_instructions[calculate_alpha_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[calculate_alpha_comp].contains(ret));
EXPECT_TRUE(sliced_instructions.contains(entry_comp));
EXPECT_EQ(sliced_instructions[entry_comp].size(), 1);
EXPECT_TRUE(sliced_instructions[entry_comp].contains(alpha));
EXPECT_EQ(sliced_result.NumFrontierInstructions(), 1);
auto frontier_instructions = sliced_result.frontier_instructions();
EXPECT_TRUE(frontier_instructions.contains(calculate_alpha_comp));
EXPECT_TRUE(frontier_instructions[calculate_alpha_comp].contains(ret));
}
}
TEST_F(HloSlicerTest, ForwardSlicingNearestCommonAncestor) {
const std::string& hlo_string = R"(
HloModule module
ENTRY computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
p.2 = f32[10] parameter(2)
mul.0 = f32[10] multiply(p.1, p.2)
sub.0 = f32[10] subtract(add.0, mul.0)
add.1 = f32[10] add(add.0, p.2)
ROOT add.2 = f32[10] add(sub.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto p0 = FindInstruction(hlo_module.get(), "p.0");
auto p2 = FindInstruction(hlo_module.get(), "p.2");
auto mul0 = FindInstruction(hlo_module.get(), "mul.0");
auto add0 = FindInstruction(hlo_module.get(), "add.0");
auto sub0 = FindInstruction(hlo_module.get(), "sub.0");
auto add1 = FindInstruction(hlo_module.get(), "add.1");
const HloComputation* computation = hlo_module->entry_computation();
{
std::vector<const HloInstruction*> relevant_instructions({p0});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false, true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_EQ(sliced_result.nearest_common_ancestor_root(), p0);
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 1);
}
{
std::vector<const HloInstruction*> relevant_instructions({p0, p2});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false, true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_TRUE(sliced_result.nearest_common_ancestor_root() == sub0 ||
sliced_result.nearest_common_ancestor_root() == add1);
EXPECT_TRUE(sliced_result.sliced_instructions().contains(computation));
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_TRUE(sliced_instructions[computation].contains(add0));
}
{
std::vector<const HloInstruction*> relevant_instructions({p0, mul0});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false,
true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_EQ(sliced_result.nearest_common_ancestor_root(), sub0);
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 4);
EXPECT_TRUE(sliced_result.sliced_instructions().contains(computation));
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_TRUE(sliced_instructions[computation].contains(p0));
EXPECT_TRUE(sliced_instructions[computation].contains(add0));
EXPECT_TRUE(sliced_instructions[computation].contains(mul0));
EXPECT_TRUE(sliced_instructions[computation].contains(sub0));
}
}
TEST_F(HloSlicerTest, MultipleComputationForwardSlicingNearestCommonAncestor) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
ROOT ret.0 = f32[] multiply(c.0, c.1)
}
calculate_y {
c.2 = f32[] constant(2)
c.3 = f32[] constant(3)
ROOT ret.1 = f32[] add(c.2, c.3)
}
ENTRY axpy_computation {
alpha = f32[] call(), to_apply=calculate_alpha
y = f32[] call(), to_apply=calculate_y
add.0 = f32[] add(alpha, y)
p.0 = f32[] parameter(0)
ROOT add.1 = f32[] add(add.0, p.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto c0 = FindInstruction(hlo_module.get(), "c.0");
auto ret0 = FindInstruction(hlo_module.get(), "ret.0");
auto c2 = FindInstruction(hlo_module.get(), "c.2");
auto ret1 = FindInstruction(hlo_module.get(), "ret.1");
auto alpha = FindInstruction(hlo_module.get(), "alpha");
auto y = FindInstruction(hlo_module.get(), "y");
auto add0 = FindInstruction(hlo_module.get(), "add.0");
const HloComputation* computation = hlo_module->entry_computation();
const HloComputation* calculate_alpha =
FindComputation(hlo_module.get(), "calculate_alpha");
const HloComputation* calculate_y =
FindComputation(hlo_module.get(), "calculate_y");
{
std::vector<const HloInstruction*> relevant_instructions({c0, c2});
auto sliced_result =
SliceModule(hlo_module.get(), absl::MakeSpan(relevant_instructions),
nullptr,
false,
true,
true);
EXPECT_NE(sliced_result.nearest_common_ancestor_root(), nullptr);
EXPECT_EQ(sliced_result.nearest_common_ancestor_root(), add0);
EXPECT_EQ(sliced_result.sliced_instructions().size(), 3);
EXPECT_TRUE(sliced_result.sliced_instructions().contains(computation));
EXPECT_TRUE(sliced_result.sliced_instructions().contains(calculate_alpha));
EXPECT_TRUE(sliced_result.sliced_instructions().contains(calculate_y));
auto sliced_instructions = sliced_result.sliced_instructions();
EXPECT_EQ(sliced_result.NumSlicedInstructions(), 7);
EXPECT_TRUE(sliced_instructions[calculate_alpha].contains(c0));
EXPECT_TRUE(sliced_instructions[calculate_alpha].contains(ret0));
EXPECT_TRUE(sliced_instructions[calculate_y].contains(c2));
EXPECT_TRUE(sliced_instructions[calculate_y].contains(ret1));
EXPECT_TRUE(sliced_instructions[computation].contains(alpha));
EXPECT_TRUE(sliced_instructions[computation].contains(y));
EXPECT_TRUE(sliced_instructions[computation].contains(add0));
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtract) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.0 = f32[] constant(1)
c.1 = f32[] constant(2)
ROOT ret.0 = f32[] multiply(c.0, c.1)
}
calculate_y {
c.2 = f32[] constant(2)
c.3 = f32[] constant(3)
ROOT ret.1 = f32[] add(c.2, c.3)
}
ENTRY axpy_computation {
alpha = f32[] call(), to_apply=calculate_alpha
y = f32[] call(), to_apply=calculate_y
add.0 = f32[] add(alpha, y)
p.0 = f32[] parameter(0)
ROOT add.1 = f32[] add(add.0, p.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
auto alpha = FindInstruction(hlo_module.get(), "alpha");
auto y = FindInstruction(hlo_module.get(), "y");
auto add0 = FindInstruction(hlo_module.get(), "add.0");
{
std::vector<const HloInstruction*> relevant_instructions({alpha, y});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kNca,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.0");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 3);
HloInstruction* c0 = FindInstruction(sliced_module.get(), "c.0");
EXPECT_NE(c0, nullptr);
HloInstruction* c1 = FindInstruction(sliced_module.get(), "c.1");
EXPECT_NE(c1, nullptr);
HloInstruction* c2 = FindInstruction(sliced_module.get(), "c.2");
EXPECT_NE(c2, nullptr);
HloInstruction* c3 = FindInstruction(sliced_module.get(), "c.3");
EXPECT_NE(c3, nullptr);
}
{
std::vector<const HloInstruction*> relevant_instructions({alpha, y});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.1");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 3);
HloInstruction* c0 = FindInstruction(sliced_module.get(), "c.0");
EXPECT_NE(c0, nullptr);
HloInstruction* c1 = FindInstruction(sliced_module.get(), "c.1");
EXPECT_NE(c1, nullptr);
HloInstruction* c2 = FindInstruction(sliced_module.get(), "c.2");
EXPECT_NE(c2, nullptr);
HloInstruction* c3 = FindInstruction(sliced_module.get(), "c.3");
EXPECT_NE(c3, nullptr);
}
{
std::vector<const HloInstruction*> relevant_instructions({y});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.1");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 2);
HloInstruction* c0 = FindInstruction(sliced_module.get(), "c.0");
EXPECT_EQ(c0, nullptr);
HloInstruction* c1 = FindInstruction(sliced_module.get(), "c.1");
EXPECT_EQ(c1, nullptr);
HloInstruction* c2 = FindInstruction(sliced_module.get(), "c.2");
EXPECT_NE(c2, nullptr);
HloInstruction* c3 = FindInstruction(sliced_module.get(), "c.3");
EXPECT_NE(c3, nullptr);
}
{
std::vector<const HloInstruction*> relevant_instructions({add0});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
false};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
CHECK_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->name(),
"add.1");
EXPECT_EQ(sliced_module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kAdd);
EXPECT_EQ(sliced_module->computation_count(), 1);
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtractRemoveSharding) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation {
%constant.39733 = bf16[] constant(111)
%broadcast.39734 = bf16[8,1,12288]{2,1,0} broadcast(bf16[] %constant.39733), dimensions={}
%multiply.39766 = bf16[8,1,12288]{2,1,0} multiply(bf16[8,1,12288]{2,1,0} %broadcast.39734, bf16[8,1,12288]{2,1,0} %broadcast.39734)
%custom-call.39767 = bf16[8,1,12288]{2,1,0} custom-call(bf16[8,1,12288]{2,1,0} %multiply.39766), custom_call_target="Sharding", sharding={replicated}
ROOT %add.39786 = bf16[8,1,12288]{2,1,0} add(bf16[8,1,12288]{2,1,0} %custom-call.39767, bf16[8,1,12288]{2,1,0} %custom-call.39767)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* multiply_39766 =
FindInstruction(hlo_module.get(), "multiply.39766");
{
std::vector<const HloInstruction*> relevant_instructions({multiply_39766});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
false, true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
EXPECT_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
for (HloInstruction* instruction :
sliced_module->entry_computation()->instructions()) {
EXPECT_NE(instruction->opcode(), HloOpcode::kCustomCall);
}
for (HloInstruction* instruction :
sliced_module->entry_computation()->root_instruction()->operands()) {
EXPECT_EQ(instruction->name(), "multiply.39766");
}
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtractReduceTupleParameter) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation (p.0: (s32[], s32[3]{0}), p.1: (s32[3]{0}, s32[])) -> s32[] {
p.0 = (s32[], s32[3]{0}) parameter(0)
gte.0 = s32[] get-tuple-element(p.0), index=0
p.1 = (s32[3]{0}, s32[]) parameter(1)
gte.1 = s32[] get-tuple-element(p.1), index=1
ROOT add.0 = s32[] add(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* add_0 = FindInstruction(hlo_module.get(), "add.0");
CHECK_NE(add_0, nullptr);
{
std::vector<const HloInstruction*> relevant_instructions({add_0});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kRoot,
true, false,
true};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
EXPECT_EQ(sliced_modules.size(), 1);
auto sliced_module = std::move(sliced_modules[0]);
HloInstruction* p_0 = FindInstruction(sliced_module.get(), "p.0");
EXPECT_NE(p_0, nullptr);
EXPECT_EQ(p_0->shape().tuple_shapes_size(), 1);
HloInstruction* p_1 = FindInstruction(sliced_module.get(), "p.1");
EXPECT_NE(p_1, nullptr);
EXPECT_EQ(p_1->shape().tuple_shapes_size(), 1);
}
}
TEST_F(HloSlicerTest, TestSliceModuleAndExtractSlicingGroup) {
const std::string& hlo_string = R"(
HloModule axpy_module
ENTRY axpy_computation (p.0: (s32[], s32[3]{0}), p.1: (s32[3]{0}, s32[])) -> s32[] {
p.0 = (s32[], s32[3]{0}) parameter(0)
gte.0 = s32[] get-tuple-element(p.0), index=0
p.1 = (s32[3]{0}, s32[]) parameter(1)
gte.1 = s32[] get-tuple-element(p.1), index=1
ROOT add.0 = s32[] add(gte.0, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* gte_0 = FindInstruction(hlo_module.get(), "gte.0");
CHECK_NE(gte_0, nullptr);
HloInstruction* gte_1 = FindInstruction(hlo_module.get(), "gte.1");
CHECK_NE(gte_1, nullptr);
{
std::vector<const HloInstruction*> relevant_instructions({gte_0, gte_1});
SlicingConfiguration slicing_config = {
SlicingConfiguration::ForwardSlicingConfig::kNca,
true, false,
false, 1};
std::vector<std::unique_ptr<HloModule>> sliced_modules =
SliceModuleAndExtract(hlo_module.get(),
absl::MakeSpan(relevant_instructions),
slicing_config);
EXPECT_EQ(sliced_modules.size(), 2);
auto sliced_module_0 = std::move(sliced_modules[0]);
EXPECT_EQ(sliced_module_0->entry_computation()->instruction_count(), 2);
HloInstruction* p_0 = FindInstruction(sliced_module_0.get(), "p.0");
EXPECT_NE(p_0, nullptr);
auto sliced_module_1 = std::move(sliced_modules[1]);
EXPECT_EQ(sliced_module_0->entry_computation()->instruction_count(), 2);
HloInstruction* p_1 = FindInstruction(sliced_module_1.get(), "p.1");
EXPECT_NE(p_1, nullptr);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_slicer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_slicer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7ab1b71f-221f-4d1d-98bf-b8020a98c711 | cpp | tensorflow/tensorflow | arithmetic | third_party/xla/xla/hlo/builder/lib/arithmetic.cc | tensorflow/lite/delegates/hexagon/builders/tests/arithmetic_test.cc | #include "xla/hlo/builder/lib/arithmetic.h"
#include <cstdint>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/builder/lib/constants.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
XlaComputation CreateScalarComputation(const std::string& name,
PrimitiveType type, XlaBuilder* builder,
XlaOpGenerator generator) {
std::unique_ptr<XlaBuilder> b;
if (type == PRED) {
b = builder->CreateSubBuilder(name);
} else {
b = builder->CreateSubBuilder(
absl::StrCat(name, "_", PrimitiveType_Name(type)));
}
const Shape scalar = ShapeUtil::MakeShape(type, {});
auto lhs = Parameter(b.get(), 0, scalar, "lhs");
auto rhs = Parameter(b.get(), 1, scalar, "rhs");
generator(lhs, rhs);
return b->BuildAndNoteError();
}
XlaComputation CreateScalarAddComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"add", type, builder, [](XlaOp lhs, XlaOp rhs) { return Add(lhs, rhs); });
}
XlaComputation CreateScalarMultiplyComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"mul", type, builder, [](XlaOp lhs, XlaOp rhs) { return Mul(lhs, rhs); });
}
XlaComputation CreateScalarGeComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"ge", type, builder, [](XlaOp lhs, XlaOp rhs) { return Ge(lhs, rhs); });
}
XlaComputation CreateScalarMaxComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"max", type, builder, [](XlaOp lhs, XlaOp rhs) { return Max(lhs, rhs); });
}
XlaComputation CreateScalarMinComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"min", type, builder, [](XlaOp lhs, XlaOp rhs) { return Min(lhs, rhs); });
}
XlaComputation CreateScalarAndComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"and", type, builder, [](XlaOp lhs, XlaOp rhs) { return And(lhs, rhs); });
}
XlaComputation CreateScalarOrComputation(PrimitiveType type,
XlaBuilder* builder) {
return CreateScalarComputation(
"or", type, builder, [](XlaOp lhs, XlaOp rhs) { return Or(lhs, rhs); });
}
XlaComputation CreateScalarIdentityWithZeroComputation(PrimitiveType type,
XlaBuilder* builder) {
XlaComputation reducer =
(primitive_util::IsIntegralType(type) || type == PRED)
? CreateScalarOrComputation(type, builder)
: CreateScalarAddComputation(type, builder);
return reducer;
}
XlaOp Any(XlaOp predicates) {
XlaBuilder* builder = predicates.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
auto f = ConstantR0<bool>(builder, false);
XlaComputation logical_or = CreateScalarOrComputation(PRED, builder);
TF_ASSIGN_OR_RETURN(const Shape& predicates_shape,
builder->GetShape(predicates));
std::vector<int64_t> all_dimensions(predicates_shape.rank());
std::iota(all_dimensions.begin(), all_dimensions.end(), 0);
return Reduce(predicates, f, logical_or, all_dimensions);
});
}
static XlaComputation CreateMinMaxComputation(XlaBuilder* outer_builder,
PrimitiveType value_type,
PrimitiveType index_type,
bool is_min) {
auto sub_builder = outer_builder->CreateSubBuilder("minmax_func");
XlaBuilder* b = sub_builder.get();
XlaOp lhs_value =
Parameter(b, 0, ShapeUtil::MakeShape(value_type, {}), "lhs_value");
XlaOp lhs_index =
Parameter(b, 1, ShapeUtil::MakeShape(index_type, {}), "lhs_index");
XlaOp rhs_value =
Parameter(b, 2, ShapeUtil::MakeShape(value_type, {}), "rhs_value");
XlaOp rhs_index =
Parameter(b, 3, ShapeUtil::MakeShape(index_type, {}), "rhs_index");
XlaOp cmp = is_min ? Le(lhs_value, rhs_value) : Ge(lhs_value, rhs_value);
XlaOp max = Select(cmp, lhs_value, rhs_value);
XlaOp arg_max = Select(cmp, lhs_index, rhs_index);
XlaOp eq = Eq(lhs_value, rhs_value);
XlaOp tie_id = Min(lhs_index, rhs_index);
arg_max = Select(eq, tie_id, arg_max);
Tuple(b, {max, arg_max});
return b->BuildAndNoteError();
}
XlaOp ArgMinMax(XlaOp input, PrimitiveType output_type, int axis, bool is_min) {
XlaBuilder* builder = input.builder();
return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<XlaOp> {
TF_ASSIGN_OR_RETURN(Shape input_shape, builder->GetShape(input));
XlaOp value_init_value;
if (is_min) {
value_init_value = MaxValue(builder, input_shape.element_type());
} else {
value_init_value = MinValue(builder, input_shape.element_type());
}
int64_t dimension_size = input_shape.dimensions(axis);
auto index_type = dimension_size <= INT32_MAX ? S32 : output_type;
XlaOp index_init_value = Zero(builder, index_type);
auto iota_shape =
ShapeUtil::MakeShape(index_type, input_shape.dimensions());
XlaOp iota = Iota(builder, iota_shape, axis);
XlaComputation reducer = CreateMinMaxComputation(
builder, input_shape.element_type(), index_type, is_min);
XlaOp max_argmax = Reduce(builder, {input, iota},
{value_init_value, index_init_value}, reducer,
{axis});
XlaOp argmax = GetTupleElement(max_argmax, 1);
if (index_type != output_type) {
argmax = ConvertElementType(argmax, output_type);
}
return argmax;
});
}
XlaOp ArgMax(XlaOp input, PrimitiveType output_type, int axis) {
return ArgMinMax(input, output_type, axis, false);
}
} | #include <initializer_list>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/hexagon/builders/tests/hexagon_delegate_op_model.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
using testing::ElementsAreArray;
class ArithmeticOpBaseModel : public SingleOpModelWithHexagon {
public:
ArithmeticOpBaseModel(const TensorData& input1, const TensorData& input2,
const TensorData& output)
: SingleOpModelWithHexagon() {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
}
ArithmeticOpBaseModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
const std::initializer_list<uint8_t>& input1_data,
const std::initializer_list<uint8_t>& input2_data) {
if (input1_data.size() > 0)
input1_ = AddConstInput(input1, input1_data);
else
input1_ = AddInput(input1);
if (input2_data.size() > 0)
input2_ = AddConstInput(input2, input2_data);
else
input2_ = AddInput(input2);
output_ = AddOutput(output);
}
void InitInterpreter() {
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
template <typename T>
void SetInput1(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input1_, data);
}
template <typename T>
void SetInput2(const std::vector<float>& data) {
QuantizeAndPopulate<T>(input2_, data);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
class AddOpModel : public ArithmeticOpBaseModel {
public:
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_func)
: ArithmeticOpBaseModel(input1, input2, output),
activation_func_(activation_func) {}
AddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
const std::initializer_list<uint8_t>& input1_data,
const std::initializer_list<uint8_t>& input2_data,
ActivationFunctionType activation_func)
: ArithmeticOpBaseModel(input1, input2, output, input1_data, input2_data),
activation_func_(activation_func) {}
void InitInterpreter() {
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_func_).Union());
ArithmeticOpBaseModel::InitInterpreter();
}
private:
ActivationFunctionType activation_func_;
};
template <TensorType tensor_type, typename integer_dtype>
void QuantizedTestsNoActivation(ActivationFunctionType activation_func) {
const float kQuantizedTolerance = 2.0 / 255.0;
std::vector<std::vector<float>> inputs1 = {
{0.1, 0.2, 0.3, 0.4}, {-0.8, 0.2, 0.4, 0.7}, {-0.8, 0.2, 0.7, 0.3}};
std::vector<std::vector<float>> inputs2 = {
{0.6, 0.4, 0.3, 0.1}, {0.6, 0.4, 0.5, -0.8}, {0.6, 0.4, -0.8, 0.5}};
for (size_t i = 0; i < 1; ++i) {
AddOpModel m({tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
{tensor_type, {1, 2, 2, 1}, -1.0, 1.0},
{tensor_type, {1, 2, 2, 1}, -1.0, 1.0}, activation_func);
m.InitInterpreter();
m.SetInput1<integer_dtype>(inputs1[i]);
m.SetInput2<integer_dtype>(inputs2[i]);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<integer_dtype>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<integer_dtype>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)))
<< "With test number " << i;
}
}
class QuantizedAddOpModel
: public testing::TestWithParam<ActivationFunctionType> {};
TEST_P(QuantizedAddOpModel, QuantizedTestsNoActivationUInt8) {
QuantizedTestsNoActivation<TensorType_UINT8, uint8_t>(GetParam());
}
TEST_P(QuantizedAddOpModel, QuantizedTestsNoActivationInt8) {
QuantizedTestsNoActivation<TensorType_INT8, int8_t>(GetParam());
}
TEST(QuantizedAddOpModelNoActivation, TestUInt8_ConstInput_1) {
const float kQuantizedTolerance = 2.0 / 255.0;
AddOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{110, 142, 156, 171}, {}, ActivationFunctionType_NONE);
m.InitInterpreter();
m.SetInput1<uint8_t>({0.1, 0.2, 0.3, 0.4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
TEST(QuantizedAddOpModelNoActivation, TestUInt8_ConstInput_2) {
const float kQuantizedTolerance = 2.0 / 255.0;
AddOpModel m({TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_UINT8, {1, 2, 2, 1}, -1.0, 1.0}, {},
{110, 142, 156, 171}, ActivationFunctionType_NONE);
m.InitInterpreter();
m.SetInput2<uint8_t>({0.1, 0.2, 0.3, 0.4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<uint8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
TEST(QuantizedAddOpModelNoActivation, TestInt8_ConstInput) {
const float kQuantizedTolerance = 2.0 / 255.0;
AddOpModel m({TensorType_INT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_INT8, {1, 2, 2, 1}, -1.0, 1.0},
{TensorType_INT8, {1, 2, 2, 1}, -1.0, 1.0}, {},
{110, 101, 105, 120}, ActivationFunctionType_NONE);
m.InitInterpreter();
m.SetInput2<int8_t>({0.1, 0.2, 0.3, 0.4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
auto reference_output = m.GetDequantizedOutput<int8_t>();
m.ApplyDelegateAndInvoke();
EXPECT_THAT(
m.GetDequantizedOutput<int8_t>(),
ElementsAreArray(ArrayFloatNear(reference_output, kQuantizedTolerance)));
}
INSTANTIATE_TEST_SUITE_P(QuantizedAddOpModel, QuantizedAddOpModel,
testing::Values(ActivationFunctionType_NONE,
ActivationFunctionType_RELU,
ActivationFunctionType_RELU_N1_TO_1,
ActivationFunctionType_RELU6));
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/builder/lib/arithmetic.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/tests/arithmetic_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee9ec021-ba6f-4611-b011-93d469b89593 | cpp | tensorflow/tensorflow | tokenize | tensorflow/lite/testing/tokenize.cc | tensorflow/lite/testing/tokenize_test.cc | #include "tensorflow/lite/testing/tokenize.h"
#include <istream>
#include <string>
#include "tensorflow/lite/string_type.h"
namespace tflite {
namespace testing {
void Tokenize(std::istream* input, TokenProcessor* processor) {
enum State { kBuildQuotedToken, kBuildToken, kIdle };
std::string current_token;
State state = kIdle;
auto start_token = [&](char c) {
state = kBuildToken;
current_token.clear();
current_token = c;
};
auto issue_token = [&]() {
state = kIdle;
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto start_quoted_token = [&]() {
state = kBuildQuotedToken;
current_token.clear();
};
auto issue_quoted_token = [&]() {
state = kIdle;
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto issue_delim = [&](char d) {
current_token = string(1, d);
processor->ConsumeToken(¤t_token);
current_token.clear();
};
auto is_delim = [](char c) { return c == '{' || c == '}' || c == ':'; };
auto is_quote = [](char c) { return c == '"'; };
for (auto it = std::istreambuf_iterator<char>(*input);
it != std::istreambuf_iterator<char>(); ++it) {
switch (state) {
case kIdle:
if (is_delim(*it)) {
issue_delim(*it);
} else if (is_quote(*it)) {
start_quoted_token();
} else if (!isspace(*it)) {
start_token(*it);
}
break;
case kBuildToken:
if (is_delim(*it)) {
issue_token();
issue_delim(*it);
} else if (is_quote(*it)) {
issue_token();
start_quoted_token();
} else if (isspace(*it)) {
issue_token();
} else {
current_token += *it;
}
break;
case kBuildQuotedToken:
if (is_quote(*it)) {
issue_quoted_token();
} else {
current_token += *it;
}
break;
}
}
if (state != kIdle) {
issue_token();
}
}
}
} | #include "tensorflow/lite/testing/tokenize.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite {
namespace testing {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class TokenCollector : public TokenProcessor {
public:
void ConsumeToken(std::string* token) override { tokens_.push_back(*token); }
const std::vector<std::string>& Tokens() { return tokens_; }
private:
std::vector<std::string> tokens_;
};
std::vector<std::string> TokenizeString(const std::string& s) {
std::stringstream ss(s);
TokenCollector collector;
Tokenize(&ss, &collector);
return collector.Tokens();
}
TEST(TokenizeTest, TokenDetection) {
EXPECT_THAT(TokenizeString("x :1"), ElementsAre("x", ":", "1"));
EXPECT_THAT(TokenizeString("x:1"), ElementsAre("x", ":", "1"));
EXPECT_THAT(TokenizeString("x {1"), ElementsAre("x", "{", "1"));
EXPECT_THAT(TokenizeString("x{1"), ElementsAre("x", "{", "1"));
EXPECT_THAT(TokenizeString("x }1"), ElementsAre("x", "}", "1"));
EXPECT_THAT(TokenizeString("x}1"), ElementsAre("x", "}", "1"));
EXPECT_THAT(TokenizeString("x \"1"), ElementsAre("x", "1"));
EXPECT_THAT(TokenizeString("x\"1"), ElementsAre("x", "1"));
}
TEST(TokenizeTest, QuotedTokenDetection) {
EXPECT_THAT(TokenizeString("\"w:x{y}z\"1"), ElementsAre("w:x{y}z", "1"));
EXPECT_THAT(TokenizeString("\"w:x{y}z\"\"1\""), ElementsAre("w:x{y}z", "1"));
}
TEST(TokenizeTest, Delimiters) {
EXPECT_THAT(TokenizeString("}"), ElementsAre("}"));
EXPECT_THAT(TokenizeString("}}"), ElementsAre("}", "}"));
EXPECT_THAT(TokenizeString("{"), ElementsAre("{"));
EXPECT_THAT(TokenizeString("{{"), ElementsAre("{", "{"));
EXPECT_THAT(TokenizeString(":"), ElementsAre(":"));
EXPECT_THAT(TokenizeString("::"), ElementsAre(":", ":"));
}
TEST(TokenizeTest, CornerCases) {
EXPECT_THAT(TokenizeString(" i { b:a } "),
ElementsAre("i", "{", "b", ":", "a", "}"));
EXPECT_THAT(TokenizeString(" }"), ElementsAre("}"));
EXPECT_THAT(TokenizeString(" } "), ElementsAre("}"));
EXPECT_THAT(TokenizeString(" {} "), ElementsAre("{", "}"));
EXPECT_THAT(TokenizeString(" x{} y{} "),
ElementsAre("x", "{", "}", "y", "{", "}"));
EXPECT_THAT(TokenizeString("x:1 y:2 "),
ElementsAre("x", ":", "1", "y", ":", "2"));
EXPECT_THAT(TokenizeString("x:\"1\" y:2 "),
ElementsAre("x", ":", "1", "y", ":", "2"));
EXPECT_THAT(TokenizeString("x:\"1, 2\" y:\"\" "),
ElementsAre("x", ":", "1, 2", "y", ":", ""));
}
TEST(TokenizeTest, NewLines) {
EXPECT_THAT(TokenizeString("x:\n1,\n 2 \n y :\n3 \n"),
ElementsAre("x", ":", "1,", "2", "y", ":", "3"));
}
TEST(TokenizeTest, LongString) {
EXPECT_THAT(
TokenizeString(" i { b:a } input {"
"a: \"1e-1, 2,3\" b:\"1,2,3\"\n c{ "
"id:1 x{d{a:"
"1}}} f:2 "
"\n}\n t:1"),
ElementsAreArray({"i", "{", "b", ":", "a", "}", "input", "{",
"a", ":", "1e-1, 2,3", "b", ":", "1,2,3", "c", "{",
"id", ":", "1", "x", "{", "d", "{", "a",
":", "1", "}", "}", "}", "f", ":", "2",
"}", "t", ":", "1"}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tokenize.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/testing/tokenize_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1fbbc63d-427e-4986-9d90-d77ea560e790 | cpp | tensorflow/tensorflow | delegate_data | tensorflow/lite/delegates/flex/delegate_data.cc | tensorflow/lite/delegates/flex/delegate_data_test.cc | #include "tensorflow/lite/delegates/flex/delegate_data.h"
#include <functional>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/delegates/flex/util.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace flex {
namespace {
void BuildFunctionDefProto(const std::string& function_name,
const Subgraph& subgraph,
tensorflow::FunctionDef& fdef) {
std::vector<std::string> inputs, outputs;
inputs.reserve(subgraph.inputs().size());
outputs.reserve(subgraph.outputs().size());
for (int i = 0; i < subgraph.inputs().size(); ++i) {
inputs.push_back(absl::StrCat(
"args_", i, ": ",
TfLiteTypeToTfTypeName(subgraph.tensor(subgraph.inputs()[i])->type)));
}
for (int i = 0; i < subgraph.outputs().size(); ++i) {
outputs.push_back(absl::StrCat(
"res_", i, ": ",
TfLiteTypeToTfTypeName(subgraph.tensor(subgraph.outputs()[i])->type)));
}
std::vector<tensorflow::FunctionDefHelper::Node> nodes;
nodes.push_back(tensorflow::FunctionDefHelper::Const<tensorflow::tstring>(
"SubgraphResourceKey", function_name));
tensorflow::FunctionDefHelper::Node execute_node;
execute_node.ret.push_back("InvokeTfLite");
execute_node.op = "TfLiteSubgraphExecute";
execute_node.arg.push_back("SubgraphResourceKey:output:0");
for (int i = 0; i < subgraph.inputs().size(); ++i) {
execute_node.arg.push_back(absl::StrCat("args_", i));
}
nodes.push_back(execute_node);
std::vector<std::pair<std::string, std::string>> ret_def;
ret_def.reserve(subgraph.outputs().size());
for (int i = 0; i < subgraph.outputs().size(); ++i) {
ret_def.emplace_back(absl::StrCat("res_", i),
absl::StrCat("InvokeTfLite:output:", i));
}
fdef = tensorflow::FunctionDefHelper::Create(function_name, inputs, outputs,
{}, nodes, ret_def);
tensorflow::AttrValue tin_attrs, tout_attrs;
for (int i = 0; i < subgraph.inputs().size(); ++i) {
TF_DataType dtype = tflite::flex::GetTensorFlowDataType(
subgraph.tensor(subgraph.inputs()[i])->type);
tin_attrs.mutable_list()->add_type(tensorflow::DataType(dtype));
}
for (int i = 0; i < subgraph.outputs().size(); ++i) {
TF_DataType dtype = tflite::flex::GetTensorFlowDataType(
subgraph.tensor(subgraph.outputs()[i])->type);
tout_attrs.mutable_list()->add_type(tensorflow::DataType(dtype));
}
fdef.mutable_node_def(1)->mutable_attr()->insert({"Tin", tin_attrs});
fdef.mutable_node_def(1)->mutable_attr()->insert({"Tout", tout_attrs});
}
tensorflow::Status GetSubgraphNamesForFunctionExecution(
const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
tensorflow::NodeDef node_def;
for (const auto& subgraph : subgraphs) {
for (const auto& node_and_reg : subgraph->nodes_and_registration()) {
if (node_and_reg.second.builtin_code != tflite::BuiltinOperator_CUSTOM) {
continue;
}
const std::string custom_name = node_and_reg.second.custom_name;
if (custom_name.substr(0, strlen(tflite::kFlexCustomCodePrefix)) !=
tflite::kFlexCustomCodePrefix) {
continue;
}
const flexbuffers::Vector& v =
flexbuffers::GetRoot(reinterpret_cast<const uint8_t*>(
node_and_reg.first.custom_initial_data),
node_and_reg.first.custom_initial_data_size)
.AsVector();
if (!node_def.ParseFromString(v[1].AsString().str())) {
return tensorflow::Status(absl::StatusCode::kInternal,
"could not parse NodeDef");
}
for (const auto& attr : node_def.attr()) {
if (attr.second.has_func()) {
result->insert(attr.second.func().name());
}
}
}
}
return absl::OkStatus();
}
}
tensorflow::Status RegisterFunctionDefForSubgraphs(
Subgraph& main_subgraph,
const std::function<tensorflow::Status(
const std::vector<std::unique_ptr<Subgraph>>&, std::set<std::string>*)>&
select_subgraphs_to_register,
tensorflow::ResourceMgr* resource_mgr,
tensorflow::EagerContext* eager_context, TfLiteDelegate* flex_delegate) {
std::vector<std::unique_ptr<Subgraph>>* subgraphs =
main_subgraph.GetSubgraphs();
if (!subgraphs) {
return absl::OkStatus();
}
std::set<std::string> function_subgraphs;
TF_RETURN_IF_ERROR(
select_subgraphs_to_register(*subgraphs, &function_subgraphs));
for (int i = 0; i < subgraphs->size(); ++i) {
if (subgraphs->at(i)->GetName() == "main") {
continue;
}
const std::string subgraph_name = subgraphs->at(i)->GetName();
if (!function_subgraphs.count(subgraph_name)) {
continue;
}
auto* subgraph_resource =
new TFLiteSubgraphResource(*(subgraphs->at(i)), flex_delegate);
TF_RETURN_IF_ERROR(resource_mgr->Create<TFLiteSubgraphResource>(
"flex", subgraph_name, subgraph_resource));
tensorflow::FunctionDef fdef;
BuildFunctionDefProto(subgraph_name, *(subgraphs->at(i)), fdef);
TF_RETURN_IF_ERROR(eager_context->AddFunctionDef(fdef));
}
return absl::OkStatus();
}
DelegateData::DelegateData() {}
DelegateData::~DelegateData() {
if (eager_context_) {
eager_context_->HostCPU()->ClearResourceMgr();
eager_context_->Unref();
}
}
tensorflow::Status DelegateData::Prepare(
const tensorflow::SessionOptions& session_options, Subgraph* main_subgraph,
TfLiteDelegate* flex_delegate) {
if (eager_context_) {
return tensorflow::Status();
}
if (flex_delegate == nullptr && main_subgraph != nullptr) {
return tensorflow::Status(
absl::StatusCode::kFailedPrecondition,
"flex_delegate must be non-null when main_subgraph is provided.");
}
std::vector<std::unique_ptr<tensorflow::Device>> devices;
TF_RETURN_IF_ERROR(tensorflow::DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
auto device_mgr =
std::make_unique<tensorflow::StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::IntraProcessRendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
eager_context_ = new tensorflow::EagerContext(
session_options,
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true,
std::move(rendezvous), nullptr);
if (main_subgraph) {
TF_RETURN_IF_ERROR(RegisterFunctionDefForSubgraphs(
*main_subgraph, GetSubgraphNamesForFunctionExecution,
eager_context_->HostCPU()->resource_manager(), eager_context_,
flex_delegate));
}
return tensorflow::Status();
}
}
} | #include "tensorflow/lite/delegates/flex/delegate_data.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/common_runtime/eager/context.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace flex {
namespace {
using ::tensorflow::protobuf::TextFormat;
using ::tensorflow::protobuf::util::MessageDifferencer;
TEST(DelegateDataTest, Basic) {
DelegateData data;
tensorflow::SessionOptions session_options;
session_options.config.set_intra_op_parallelism_threads(2);
EXPECT_TRUE(data.Prepare(session_options).ok());
TfLiteContext dummy_context1 = {};
TfLiteContext dummy_context2 = {};
ASSERT_NE(data.GetEagerContext(), nullptr);
EXPECT_NE(data.GetBufferMap(&dummy_context1), nullptr);
EXPECT_NE(data.GetBufferMap(&dummy_context1),
data.GetBufferMap(&dummy_context2));
}
TEST(DelegateDataTest, CheckFunctionDef) {
tensorflow::StaticDeviceMgr device_mgr(tensorflow::DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
tensorflow::SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr, false, nullptr,
nullptr);
auto select_subgraphs_to_register =
[](const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
result->insert("add_subgraph");
result->insert("mul_subgraph");
return absl::OkStatus();
};
subgraph_test_util::SubgraphBuilder builder;
std::unique_ptr<ErrorReporter> error_reporter =
std::make_unique<TestErrorReporter>();
auto add_subgraph = std::make_unique<Subgraph>(
error_reporter.get(), nullptr,
nullptr, nullptr, nullptr,
nullptr);
add_subgraph->SetName("add_subgraph");
auto mul_subgraph = std::make_unique<Subgraph>(
error_reporter.get(), nullptr,
nullptr, nullptr, nullptr,
nullptr);
mul_subgraph->SetName("mul_subgraph");
builder.BuildAddSubgraph(add_subgraph.get());
builder.BuildMulSubgraph(mul_subgraph.get());
std::vector<std::unique_ptr<Subgraph>> subgraphs;
subgraphs.push_back(std::move(add_subgraph));
subgraphs.push_back(std::move(mul_subgraph));
Subgraph main_subgraph(error_reporter.get(), nullptr, &subgraphs,
nullptr, nullptr,
nullptr);
main_subgraph.SetName("main");
TF_ASSERT_OK(RegisterFunctionDefForSubgraphs(
main_subgraph, select_subgraphs_to_register,
eager_context->HostCPU()->resource_manager(), eager_context,
nullptr));
const string add_fdef_txt = R"pb(
signature {
name: "add_subgraph"
input_arg { name: "args_0" type: DT_INT32 }
input_arg { name: "args_1" type: DT_INT32 }
output_arg { name: "res_0" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "SubgraphResourceKey"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "add_subgraph"
}
}
}
}
node_def {
name: "InvokeTfLite"
op: "TfLiteSubgraphExecute"
input: "SubgraphResourceKey:output:0"
input: "args_0"
input: "args_1"
attr {
key: "Tin"
value { list { type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
}
ret { key: "res_0" value: "InvokeTfLite:output:0" })pb";
const string mul_fdef_txt = R"pb(
signature {
name: "mul_subgraph"
input_arg { name: "args_0" type: DT_INT32 }
input_arg { name: "args_1" type: DT_INT32 }
output_arg { name: "res_0" type: DT_INT32 }
is_stateful: true
}
node_def {
name: "SubgraphResourceKey"
op: "Const"
attr {
key: "dtype"
value { type: DT_STRING }
}
attr {
key: "value"
value {
tensor {
dtype: DT_STRING
tensor_shape {}
string_val: "mul_subgraph"
}
}
}
}
node_def {
name: "InvokeTfLite"
op: "TfLiteSubgraphExecute"
input: "SubgraphResourceKey:output:0"
input: "args_0"
input: "args_1"
attr {
key: "Tin"
value { list { type: DT_INT32 type: DT_INT32 } }
}
attr {
key: "Tout"
value { list { type: DT_INT32 } }
}
}
ret { key: "res_0" value: "InvokeTfLite:output:0" })pb";
tensorflow::FunctionDef add_fdef, mul_fdef;
ASSERT_TRUE(TextFormat::ParseFromString(add_fdef_txt, &add_fdef));
ASSERT_TRUE(TextFormat::ParseFromString(mul_fdef_txt, &mul_fdef));
EXPECT_EQ(eager_context->GetFunctionDef("main"), nullptr);
ASSERT_NE(eager_context->GetFunctionDef("add_subgraph"), nullptr);
ASSERT_NE(eager_context->GetFunctionDef("mul_subgraph"), nullptr);
EXPECT_TRUE(MessageDifferencer::Equals(
*(eager_context->GetFunctionDef("add_subgraph")), add_fdef));
EXPECT_TRUE(MessageDifferencer::Equals(
*(eager_context->GetFunctionDef("mul_subgraph")), mul_fdef));
eager_context->Unref();
}
TEST(DelegateDataTest, CheckFunctionDefWithOnlyMainGraph) {
tensorflow::StaticDeviceMgr device_mgr(tensorflow::DeviceFactory::NewDevice(
"CPU", {}, "/job:localhost/replica:0/task:0/device:CPU:0"));
tensorflow::EagerContext* eager_context = new tensorflow::EagerContext(
tensorflow::SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, &device_mgr, false, nullptr,
nullptr);
auto select_subgraphs_to_register =
[](const std::vector<std::unique_ptr<Subgraph>>& subgraphs,
std::set<std::string>* result) {
result->insert("add_subgraph");
result->insert("mul_subgraph");
return absl::OkStatus();
};
subgraph_test_util::SubgraphBuilder builder;
std::unique_ptr<ErrorReporter> error_reporter =
std::make_unique<TestErrorReporter>();
Subgraph main_subgraph(error_reporter.get(), nullptr,
nullptr, nullptr,
nullptr,
nullptr);
main_subgraph.SetName("main");
TF_ASSERT_OK(RegisterFunctionDefForSubgraphs(
main_subgraph, select_subgraphs_to_register,
eager_context->HostCPU()->resource_manager(), eager_context,
nullptr));
EXPECT_EQ(eager_context->GetFunctionDef("main"), nullptr);
eager_context->Unref();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate_data.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/delegate_data_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee8e14f6-ef93-4bb8-9efa-87a35039a7f1 | cpp | tensorflow/tensorflow | denormal | third_party/xla/third_party/tsl/tsl/platform/denormal.cc | third_party/xla/third_party/tsl/tsl/platform/denormal_test.cc | #include "tsl/platform/denormal.h"
#include <cstdint>
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/platform.h"
#if !defined(__SSE3__) && !defined(__clang__) && \
(defined(__GNUC__) && (__GNUC__ < 4) || \
((__GNUC__ == 4) && (__GNUC_MINOR__ < 9)))
#define GCC_WITHOUT_INTRINSICS
#endif
#if defined(PLATFORM_IS_X86) && !defined(IS_MOBILE_PLATFORM) && \
!defined(GCC_WITHOUT_INTRINSICS)
#define X86_DENORM_USE_INTRINSICS
#endif
#ifdef X86_DENORM_USE_INTRINSICS
#include <pmmintrin.h>
#endif
#if defined(PLATFORM_IS_ARM) && defined(__ARM_FP) && (__ARM_FP > 0)
#define ARM_DENORM_AVAILABLE
#define ARM_FPCR_FZ (1 << 24)
#endif
namespace tsl {
namespace port {
bool DenormalState::operator==(const DenormalState& other) const {
return flush_to_zero() == other.flush_to_zero() &&
denormals_are_zero() == other.denormals_are_zero();
}
bool DenormalState::operator!=(const DenormalState& other) const {
return !(this->operator==(other));
}
#ifdef ARM_DENORM_AVAILABLE
static inline void ArmSetFloatingPointControlRegister(uint32_t fpcr) {
#ifdef PLATFORM_IS_ARM64
__asm__ __volatile__("msr fpcr, %[fpcr]"
:
: [fpcr] "r"(static_cast<uint64_t>(fpcr)));
#else
__asm__ __volatile__("vmsr fpscr, %[fpcr]" : : [fpcr] "r"(fpcr));
#endif
}
static inline uint32_t ArmGetFloatingPointControlRegister() {
uint32_t fpcr;
#ifdef PLATFORM_IS_ARM64
uint64_t fpcr64;
__asm__ __volatile__("mrs %[fpcr], fpcr" : [fpcr] "=r"(fpcr64));
fpcr = static_cast<uint32_t>(fpcr64);
#else
__asm__ __volatile__("vmrs %[fpcr], fpscr" : [fpcr] "=r"(fpcr));
#endif
return fpcr;
}
#endif
bool SetDenormalState(const DenormalState& state) {
#ifdef X86_DENORM_USE_INTRINSICS
if (TestCPUFeature(SSE3)) {
_MM_SET_FLUSH_ZERO_MODE(state.flush_to_zero() ? _MM_FLUSH_ZERO_ON
: _MM_FLUSH_ZERO_OFF);
_MM_SET_DENORMALS_ZERO_MODE(state.denormals_are_zero()
? _MM_DENORMALS_ZERO_ON
: _MM_DENORMALS_ZERO_OFF);
return true;
}
#endif
#ifdef ARM_DENORM_AVAILABLE
if (state.flush_to_zero() == state.denormals_are_zero()) {
uint32_t fpcr = ArmGetFloatingPointControlRegister();
if (state.flush_to_zero()) {
fpcr |= ARM_FPCR_FZ;
} else {
fpcr &= ~ARM_FPCR_FZ;
}
ArmSetFloatingPointControlRegister(fpcr);
return true;
}
#endif
return false;
}
DenormalState GetDenormalState() {
#ifdef X86_DENORM_USE_INTRINSICS
if (TestCPUFeature(SSE3)) {
bool flush_zero_mode = _MM_GET_FLUSH_ZERO_MODE() == _MM_FLUSH_ZERO_ON;
bool denormals_zero_mode =
_MM_GET_DENORMALS_ZERO_MODE() == _MM_DENORMALS_ZERO_ON;
return DenormalState(flush_zero_mode, denormals_zero_mode);
}
#endif
#ifdef ARM_DENORM_AVAILABLE
uint32_t fpcr = ArmGetFloatingPointControlRegister();
if ((fpcr & ARM_FPCR_FZ) != 0) {
return DenormalState(true, true);
}
#endif
return DenormalState(false, false);
}
ScopedRestoreFlushDenormalState::ScopedRestoreFlushDenormalState()
: denormal_state_(GetDenormalState()) {}
ScopedRestoreFlushDenormalState::~ScopedRestoreFlushDenormalState() {
SetDenormalState(denormal_state_);
}
ScopedFlushDenormal::ScopedFlushDenormal() {
SetDenormalState(
DenormalState(true, true));
}
ScopedDontFlushDenormal::ScopedDontFlushDenormal() {
SetDenormalState(
DenormalState(false, false));
}
}
} | #include "tsl/platform/denormal.h"
#include <cstring>
#include <limits>
#include "tsl/platform/test.h"
namespace tsl {
namespace port {
TEST(DenormalStateTest, ConstructorAndAccessorsWork) {
const bool flush_to_zero[] = {true, true, false, false};
const bool denormals_are_zero[] = {true, false, true, false};
for (int i = 0; i < 4; ++i) {
const DenormalState state =
DenormalState(flush_to_zero[i], denormals_are_zero[i]);
EXPECT_EQ(state.flush_to_zero(), flush_to_zero[i]);
EXPECT_EQ(state.denormals_are_zero(), denormals_are_zero[i]);
}
}
uint32_t bits(float x) {
uint32_t out;
memcpy(&out, &x, sizeof(float));
return out;
}
void CheckDenormalHandling(const DenormalState& state) {
volatile float denormal_output = std::numeric_limits<float>::min();
denormal_output *= 0.25f;
if (state.flush_to_zero()) {
EXPECT_EQ(bits(denormal_output), 0x0);
} else {
EXPECT_NE(bits(denormal_output), 0x0);
}
volatile float normal_output = std::numeric_limits<float>::denorm_min();
normal_output *= std::numeric_limits<float>::max();
if (state.denormals_are_zero()) {
EXPECT_EQ(bits(normal_output), 0x0);
} else {
EXPECT_NE(bits(normal_output), 0x0);
}
}
TEST(DenormalTest, GetAndSetStateWorkWithCorrectFlushing) {
const DenormalState states[] = {
DenormalState(true, true),
DenormalState(true, false),
DenormalState(false, true),
DenormalState(false, false)};
for (const DenormalState& state : states) {
if (SetDenormalState(state)) {
EXPECT_EQ(GetDenormalState(), state);
CheckDenormalHandling(state);
}
}
}
TEST(ScopedRestoreFlushDenormalStateTest, RestoresState) {
const DenormalState flush_denormals(true,
true);
const DenormalState dont_flush_denormals(false,
false);
const bool can_set_denormal_state = SetDenormalState(flush_denormals) &&
SetDenormalState(dont_flush_denormals);
if (can_set_denormal_state) {
SetDenormalState(flush_denormals);
{
ScopedRestoreFlushDenormalState restore_state;
SetDenormalState(dont_flush_denormals);
EXPECT_EQ(GetDenormalState(), dont_flush_denormals);
}
EXPECT_EQ(GetDenormalState(), flush_denormals);
SetDenormalState(dont_flush_denormals);
{
ScopedRestoreFlushDenormalState restore_state;
SetDenormalState(flush_denormals);
EXPECT_EQ(GetDenormalState(), flush_denormals);
}
EXPECT_EQ(GetDenormalState(), dont_flush_denormals);
}
}
TEST(ScopedFlushDenormalTest, SetsFlushingAndRestoresState) {
const DenormalState flush_denormals(true,
true);
const DenormalState dont_flush_denormals(false,
false);
const bool can_set_denormal_state = SetDenormalState(flush_denormals) &&
SetDenormalState(dont_flush_denormals);
if (can_set_denormal_state) {
SetDenormalState(dont_flush_denormals);
{
ScopedFlushDenormal scoped_flush_denormal;
EXPECT_EQ(GetDenormalState(), flush_denormals);
}
EXPECT_EQ(GetDenormalState(), dont_flush_denormals);
}
}
TEST(ScopedDontFlushDenormalTest, SetsNoFlushingAndRestoresState) {
const DenormalState flush_denormals(true,
true);
const DenormalState dont_flush_denormals(false,
false);
const bool can_set_denormal_state = SetDenormalState(flush_denormals) &&
SetDenormalState(dont_flush_denormals);
if (can_set_denormal_state) {
SetDenormalState(flush_denormals);
{
ScopedDontFlushDenormal scoped_dont_flush_denormal;
EXPECT_EQ(GetDenormalState(), dont_flush_denormals);
}
EXPECT_EQ(GetDenormalState(), flush_denormals);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/denormal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/denormal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2990a7a8-f107-4a38-9baa-a95ddb80a44f | cpp | tensorflow/tensorflow | uniform_dequantize_op | tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op.cc | tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/uniform_quant_ops/math_utils.h"
#include "tensorflow/core/kernels/uniform_quant_ops/tensor_utils.h"
namespace tensorflow {
namespace {
using tensorflow::errors::InvalidArgument;
template <typename Tin, typename Tout>
void EvalPerTensorDequantize(const Tensor& input, float scale,
int32_t zero_point, Tensor& output) {
DCHECK(input.IsSameSize(output));
AffineDequantize(input.flat<Tin>(), scale, zero_point, output.flat<Tout>());
}
template <typename Tin, typename Tout>
void EvalPerChannelDequantize(const Tensor& input, const Tensor& scales,
const Tensor& zero_points, int quantization_axis,
Tensor& output) {
DCHECK(input.IsSameSize(output));
const float* scales_data = scales.flat<float>().data();
const int32_t* zero_points_data = zero_points.flat<int32_t>().data();
auto input_tensor =
input.template flat_inner_outer_dims<Tin, 3>(quantization_axis - 1);
auto output_tensor =
output.template flat_inner_outer_dims<Tout, 3>(quantization_axis - 1);
for (int i = 0; i < output.dim_size(quantization_axis); ++i) {
AffineDequantize(input_tensor.template chip<1>(i), scales_data[i],
zero_points_data[i], output_tensor.template chip<1>(i));
}
}
template <typename Tin, typename Tout>
void EvalDequantize(const Tensor& input, const Tensor& scales,
const Tensor& zero_points, int quantization_axis,
Tensor& output) {
if (quantization_axis >= 0) {
EvalPerChannelDequantize<Tin, Tout>(input, scales, zero_points,
quantization_axis, output);
} else {
EvalPerTensorDequantize<Tin, Tout>(input, scales.scalar<float>()(),
zero_points.scalar<int32>()(), output);
}
}
}
template <typename Tin, typename Tout>
class UniformDequantizeOp : public OpKernel {
public:
explicit UniformDequantizeOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr("quantization_axis", &quantization_axis_));
OP_REQUIRES(context,
(std::is_same<Tin, qint8>() || std::is_same<Tin, qint32>()),
InvalidArgument("Unsupported input type."));
OP_REQUIRES(context, (std::is_same<Tout, float>()),
InvalidArgument("Unsupported output type."));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& scales = context->input(1);
const Tensor& zero_points = context->input(2);
OP_REQUIRES_OK(context, QuantizationAxisAndShapeValid(
input.shape(), scales.shape(),
zero_points.shape(), quantization_axis_));
OP_REQUIRES(context, AllElementsPositive<float>(scales),
InvalidArgument("rhs scales elements must be all positive."));
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
EvalDequantize<Tin, Tout>(input, scales, zero_points, quantization_axis_,
*output);
}
private:
int quantization_axis_;
};
REGISTER_KERNEL_BUILDER(Name("UniformDequantize")
.Device(DEVICE_CPU)
.TypeConstraint<qint8>("Tin")
.TypeConstraint<float>("Tout"),
UniformDequantizeOp<qint8, float>);
REGISTER_KERNEL_BUILDER(Name("UniformDequantize")
.Device(DEVICE_CPU)
.TypeConstraint<qint32>("Tin")
.TypeConstraint<float>("Tout"),
UniformDequantizeOp<qint32, float>);
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
class UniformDequantizeOpTest : public OpsTestBase {
protected:
};
TEST_F(UniformDequantizeOpTest, PerTensorDequantize) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformDequantize")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("quantization_axis", -1)
.Attr("quantization_min_val", -128)
.Attr("quantization_max_val", 127)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 3}), {-128, -100, -20, -16, 0, 20});
AddInputFromArray<float>(TensorShape({}), {0.25});
AddInputFromArray<int32>(TensorShape({}), {-20});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 3}));
test::FillValues<float>(&expected, {-27.0, -20.0, 0.0, 1.0, 5.0, 10.0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(UniformDequantizeOpTest, PerChannelDequantize) {
TF_ASSERT_OK(NodeDefBuilder("test", "UniformDequantize")
.Input(FakeInput(DT_QINT8))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("Tin", DT_QINT8)
.Attr("Tout", DT_FLOAT)
.Attr("quantization_axis", 1)
.Attr("quantization_min_val", -128)
.Attr("quantization_max_val", 127)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<qint8>(TensorShape({2, 2, 3}),
{-128, -100, -20, -8, 0, 5, 10, 15, 20, 40, 50, 55});
AddInputFromArray<float>(TensorShape({2}), {0.25, 0.5});
AddInputFromArray<int32>(TensorShape({2}), {-20, -10});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({2, 2, 3}));
test::FillValues<float>(&expected, {-27.0, -20.0, 0.0, 1.0, 5.0, 7.5, 7.5,
8.75, 10.0, 25.0, 30.0, 32.5});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/uniform_quant_ops/uniform_dequantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ea181341-9294-41a6-ba91-a770ca43600d | cpp | tensorflow/tensorflow | compute_engine_zone_provider | third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider_test.cc | #include "tsl/platform/cloud/compute_engine_zone_provider.h"
#include <utility>
#include "tsl/platform/str_util.h"
namespace tsl {
namespace {
constexpr char kGceMetadataZonePath[] = "instance/zone";
}
ComputeEngineZoneProvider::ComputeEngineZoneProvider(
std::shared_ptr<ComputeEngineMetadataClient> google_metadata_client)
: google_metadata_client_(std::move(google_metadata_client)) {}
absl::Status ComputeEngineZoneProvider::GetZone(string* zone) {
if (!cached_zone.empty()) {
*zone = cached_zone;
return absl::OkStatus();
}
std::vector<char> response_buffer;
TF_RETURN_IF_ERROR(google_metadata_client_->GetMetadata(kGceMetadataZonePath,
&response_buffer));
absl::string_view location(&response_buffer[0], response_buffer.size());
std::vector<string> elems = str_util::Split(location, "/");
if (elems.size() == 4) {
cached_zone = elems.back();
*zone = cached_zone;
} else {
LOG(ERROR) << "Failed to parse the zone name from location: "
<< string(location);
}
return absl::OkStatus();
}
ComputeEngineZoneProvider::~ComputeEngineZoneProvider() {}
} | #include "tsl/platform/cloud/compute_engine_zone_provider.h"
#include "tsl/platform/cloud/http_request_fake.h"
#include "tsl/platform/test.h"
namespace tsl {
class ComputeEngineZoneProviderTest : public ::testing::Test {
protected:
void SetUp() override {}
void TearDown() override {}
};
TEST_F(ComputeEngineZoneProviderTest, GetZone) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"Header Metadata-Flavor: Google\n",
"projects/123456789/zones/us-west1-b")});
auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadata_client = std::make_shared<ComputeEngineMetadataClient>(
httpRequestFactory, RetryConfig(0 ));
ComputeEngineZoneProvider provider(metadata_client);
string zone;
TF_EXPECT_OK(provider.GetZone(&zone));
EXPECT_EQ("us-west1-b", zone);
TF_EXPECT_OK(provider.GetZone(&zone));
}
TEST_F(ComputeEngineZoneProviderTest, InvalidZoneString) {
std::vector<HttpRequest*> requests({new FakeHttpRequest(
"Uri: http:
"Header Metadata-Flavor: Google\n",
"invalidresponse")});
auto httpRequestFactory = std::make_shared<FakeHttpRequestFactory>(&requests);
auto metadata_client = std::make_shared<ComputeEngineMetadataClient>(
httpRequestFactory, RetryConfig(0 ));
ComputeEngineZoneProvider provider(metadata_client);
string zone;
TF_EXPECT_OK(provider.GetZone(&zone));
EXPECT_EQ("", zone);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/compute_engine_zone_provider_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6bc26f15-8535-45cf-abc2-4451d8d04c30 | cpp | tensorflow/tensorflow | slice_util | tensorflow/dtensor/cc/slice_util.cc | tensorflow/dtensor/tests/slice_util_test.cc | #include "tensorflow/dtensor/cc/slice_util.h"
#include <optional>
#include <string>
#include <vector>
#include "mlir/IR/BuiltinTypes.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
namespace tensorflow {
namespace dtensor {
namespace slice_util {
namespace {
StatusOr<int64_t> GetEllipsisSize(int64_t input_rank,
const std::vector<Token>& tokens,
int64_t* output_rank) {
bool found = false;
int64_t regular_axis = 0;
int64_t new_axis = 0;
int64_t shrink_axis = 0;
for (const auto& token : tokens) {
switch (token.token_type) {
case Token::ELLIPSIS:
if (found) {
return absl::InvalidArgumentError(
"More than one ellipsis was found.");
}
found = true;
break;
case Token::NEW_AXIS:
++new_axis;
break;
case Token::SHRINK_AXIS:
++shrink_axis;
break;
case Token::REGULAR:
++regular_axis;
break;
}
}
int64_t ellipsis_size = input_rank - (regular_axis + shrink_axis);
if (found && ellipsis_size < 0) {
return absl::InvalidArgumentError(absl::StrCat(
"Ellipsis was found, but there is no remaining axis for it.",
" input_rank=", input_rank, " regular_axis=", regular_axis,
" shrink_axis=", shrink_axis));
}
*output_rank = regular_axis + ellipsis_size + new_axis;
return ellipsis_size;
}
}
Token Token::normalize(int64_t dim_size) const {
if (dynamic_mask) {
return *this;
}
int64_t new_begin = begin;
int dir = (stride > 0) ? 1 : -1;
if (begin_mask) {
if (dir > 0) {
new_begin = 0;
} else {
new_begin = dim_size - 1;
}
}
int64_t new_end = end;
if (end_mask) {
if (dir > 0) {
new_end = dim_size;
} else {
new_end = -1;
}
}
int64_t shift = (new_begin - new_begin % dim_size);
new_begin -= shift;
new_end -= shift;
int64_t n = dir * (new_end - new_begin + stride - dir) / (dir * stride);
if (n < 0) {
new_end = new_end + dir * dim_size;
}
n = dir * (new_end - new_begin + stride - dir) / (dir * stride);
new_end = new_begin + n * stride;
Token r = *this;
r.begin = new_begin;
r.end = new_end;
return r;
}
std::optional<Token> Token::GetLocalToken(int64_t dim_size,
int64_t num_shards) const {
Token token = normalize(dim_size);
VLOG(5) << "Compute: "
<< "dim_size=" << dim_size << " num_shards=" << num_shards
<< " token.begin=" << token.begin << " token.end=" << token.end
<< " token.stride=" << token.stride;
if (token.begin_mask && token.end_mask) return token;
if (token.dynamic_mask) return std::nullopt;
if (token.stride < 0) return std::nullopt;
int64_t shard_dim_size = dim_size / num_shards;
if (shard_dim_size % token.stride == 0) {
if (token.begin >= 0 && token.begin < token.stride &&
token.end >= dim_size && token.end < dim_size + token.stride) {
token.end = shard_dim_size + (token.end - dim_size);
return token;
}
}
return std::nullopt;
}
Status TokenProcessor::Run(const std::vector<Token>& tokens) {
int64_t input_rank = input_rank_;
int64_t output_rank;
TF_ASSIGN_OR_RETURN(int64_t ellipsis_size,
GetEllipsisSize(input_rank, tokens, &output_rank));
PrepareResults(tokens.size(), input_rank, output_rank);
bool out_of_bound = false;
int64_t input_index = 0;
int64_t output_index = 0;
for (const auto& token : tokens) {
switch (token.token_type) {
case Token::ELLIPSIS:
VisitEllipsisAxis(token);
out_of_bound = VisitLoop(input_rank, output_rank, ellipsis_size,
&input_index, &output_index);
ellipsis_size = 0;
break;
case Token::SHRINK_AXIS:
VisitShrinkAxis(token, input_index, output_index);
++input_index;
break;
case Token::NEW_AXIS:
VisitNewAxis(token, input_index, output_index);
++output_index;
break;
case Token::REGULAR:
if (input_index >= input_rank) {
out_of_bound = true;
break;
}
VisitRegularAxis(token, input_index, output_index);
++input_index;
++output_index;
break;
}
if (out_of_bound) {
break;
}
}
if (ellipsis_size > 0) {
out_of_bound = VisitLoop(input_rank, output_rank, ellipsis_size,
&input_index, &output_index);
}
if (out_of_bound) {
return absl::InvalidArgumentError(
"Reading axis beyond the input tensor's rank. "
"The slicing token is incorrect.");
}
return FinalizeResults(input_rank, output_rank);
}
bool TokenProcessor::VisitLoop(int64_t input_rank, int64_t output_rank,
int64_t ellipsis_size, int64_t* input_index,
int64_t* output_index) {
for (int64_t k = 0; k < ellipsis_size; ++k) {
if (*input_index >= input_rank) {
return true;
}
VisitImplicitAxis(*input_index, *output_index);
++*input_index;
++*output_index;
}
return false;
}
}
}
} | #include "tensorflow/dtensor/cc/slice_util.h"
#include <map>
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/platform/test.h"
#include "tensorflow/dtensor/cc/tensor_layout.h"
#include "tensorflow/dtensor/proto/layout.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace dtensor {
namespace slice_util {
namespace {
using ::testing::SizeIs;
using ::tsl::testing::IsOk;
TEST(TokenTest, NormalizeDynamic) {
auto spec = Token(Token::REGULAR, 0, 0, 1,
true,
true, true);
EXPECT_EQ(spec.normalize(4).begin, 0);
EXPECT_EQ(spec.normalize(4).end, 0);
EXPECT_EQ(spec.normalize(4).dynamic_mask, true);
EXPECT_EQ(spec.normalize(4).begin_mask, true);
EXPECT_EQ(spec.normalize(4).end_mask, true);
}
TEST(TokenTest, NormalizeFullPositiveStride) {
auto spec = Token(Token::REGULAR, 0, 4, 1);
EXPECT_EQ(spec.normalize(4).begin, 0);
EXPECT_EQ(spec.normalize(4).end, 4);
spec = Token(Token::REGULAR, 0, 4, 2);
EXPECT_EQ(spec.normalize(4).begin, 0);
EXPECT_EQ(spec.normalize(4).end, 4);
spec = Token(Token::REGULAR, 0, 4, 3);
EXPECT_EQ(spec.normalize(4).begin, 0);
EXPECT_EQ(spec.normalize(4).end, 6);
spec = Token(Token::REGULAR, 0, 4, 5);
EXPECT_EQ(spec.normalize(4).begin, 0);
EXPECT_EQ(spec.normalize(4).end, 5);
}
TEST(TokenTest, NormalizeFullNegativeStride) {
auto spec = Token(Token::REGULAR, 3, -1, -1);
EXPECT_EQ(spec.normalize(4).begin, 3);
EXPECT_EQ(spec.normalize(4).end, -1);
spec = Token(Token::REGULAR, 3, -1, -2);
EXPECT_EQ(spec.normalize(4).begin, 3);
EXPECT_EQ(spec.normalize(4).end, -1);
spec = Token(Token::REGULAR, 3, -1, -3);
EXPECT_EQ(spec.normalize(4).begin, 3);
EXPECT_EQ(spec.normalize(4).end, -3);
spec = Token(Token::REGULAR, 3, -1, -5);
EXPECT_EQ(spec.normalize(4).begin, 3);
EXPECT_EQ(spec.normalize(4).end, -2);
}
TEST(TokenTest, NormalizeZeroPositiveStride) {
auto spec = Token(Token::REGULAR, 3, 3, 1);
EXPECT_EQ(spec.normalize(7).begin, 3);
EXPECT_EQ(spec.normalize(7).end, 3);
spec = Token(Token::REGULAR, 0, 0, 1);
EXPECT_EQ(spec.normalize(7).begin, 0);
EXPECT_EQ(spec.normalize(7).end, 0);
}
TEST(TokenTest, NormalizeZeroNegativeStride) {
auto spec = Token(Token::REGULAR, 3, 3, -1);
EXPECT_EQ(spec.normalize(7).begin, 3);
EXPECT_EQ(spec.normalize(7).end, 3);
spec = Token(Token::REGULAR, 0, 0, -1);
EXPECT_EQ(spec.normalize(7).begin, 0);
EXPECT_EQ(spec.normalize(7).end, 0);
}
TEST(TokenTest, NormalizePartialPositiveStride) {
auto spec = Token(Token::REGULAR, 1, 5, 1);
EXPECT_EQ(spec.normalize(7).begin, 1);
EXPECT_EQ(spec.normalize(7).end, 5);
spec = Token(Token::REGULAR, 1, 5, 2);
EXPECT_EQ(spec.normalize(7).begin, 1);
EXPECT_EQ(spec.normalize(7).end, 5);
spec = Token(Token::REGULAR, 1, 5, 3);
EXPECT_EQ(spec.normalize(7).begin, 1);
EXPECT_EQ(spec.normalize(7).end, 7);
spec = Token(Token::REGULAR, 1, 5, 5);
EXPECT_EQ(spec.normalize(7).begin, 1);
EXPECT_EQ(spec.normalize(7).end, 6);
spec = Token(Token::REGULAR, 1, -1, 1);
EXPECT_EQ(spec.normalize(7).begin, 1);
EXPECT_EQ(spec.normalize(7).end, 6);
spec = Token(Token::REGULAR, 0, -1, 1);
EXPECT_EQ(spec.normalize(7).begin, 0);
EXPECT_EQ(spec.normalize(7).end, 6);
}
TEST(TokenTest, NormalizePartialNegativeStride) {
auto spec = Token(Token::REGULAR, 6, 2, -1);
EXPECT_EQ(spec.normalize(7).begin, 6);
EXPECT_EQ(spec.normalize(7).end, 2);
spec = Token(Token::REGULAR, 6, 2, -2);
EXPECT_EQ(spec.normalize(7).begin, 6);
EXPECT_EQ(spec.normalize(7).end, 2);
spec = Token(Token::REGULAR, 6, 2, -3);
EXPECT_EQ(spec.normalize(7).begin, 6);
EXPECT_EQ(spec.normalize(7).end, 0);
spec = Token(Token::REGULAR, 6, 2, -5);
EXPECT_EQ(spec.normalize(7).begin, 6);
EXPECT_EQ(spec.normalize(7).end, 1);
}
TEST(TokenTest, NormalizeFarFromCenter) {
auto spec = Token(Token::REGULAR, 100, 102, 1);
EXPECT_EQ(spec.normalize(9).begin, 1);
EXPECT_EQ(spec.normalize(9).end, 3);
}
TEST(TokenTest, NormalizeBeginMask) {
auto spec = Token(Token::REGULAR, 3, 2, 1);
spec.begin_mask = true;
EXPECT_EQ(spec.normalize(7).begin, 0);
spec = Token(Token::REGULAR, 3, 2, -1);
spec.begin_mask = true;
EXPECT_EQ(spec.normalize(7).begin, 6);
}
TEST(TokenTest, NormalizeEndMask) {
auto spec = Token(Token::REGULAR, 3, 2, 1);
spec.end_mask = true;
EXPECT_EQ(spec.normalize(7).end, 7);
spec = Token(Token::REGULAR, 3, 2, -1);
spec.end_mask = true;
EXPECT_EQ(spec.normalize(7).end, -1);
}
class InferenceTest : public ::testing::Test {
protected:
Mesh GetMesh() {
return Mesh::CreateMesh("MyMesh", {"x", "y"},
{2, 1},
{0, 1},
{"/job:localhost/task:0/device:CPU:0",
"/job:localhost/task:0/device:CPU:1"},
{0, 1},
{"/job:localhost/task:0/device:CPU:0",
"/job:localhost/task:0/device:CPU:1"},
false);
}
};
TEST_F(InferenceTest, FullyReplicatedInputs) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim},
GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim},
GetMesh());
const auto specs = std::vector<Token>{
Token(Token::REGULAR, 0, -1, 1,
false,
false,
false),
Token(Token::REGULAR, 0, 2, 2,
false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(
forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, -1);
EXPECT_EQ(forward->local_tokens()[1].end, 2);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(
backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(
backward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->local_tokens()[0].end, -1);
EXPECT_EQ(backward->local_tokens()[1].end, 2);
}
TEST_F(InferenceTest, NewAxisMask) {
const Layout input_layout =
*Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim,
"x", "y"},
GetMesh());
const auto specs = std::vector<Token>{
Token(Token::NEW_AXIS, 0, 0, 1,
false,
false,
false),
Token(Token::NEW_AXIS, 0, 0, 1,
false,
false,
false),
Token(Token::REGULAR, 0, 2, 1,
false,
false,
false),
Token(Token::REGULAR, 0, 4, 1,
false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({"x", "y"}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(4));
EXPECT_EQ(forward->local_tokens()[0].end, 0);
EXPECT_EQ(forward->local_tokens()[1].end, 0);
EXPECT_EQ(forward->local_tokens()[2].end, 1);
EXPECT_EQ(forward->local_tokens()[3].end, 4);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>(
{Layout::kUnshardedDim, Layout::kUnshardedDim, "x", "y"}));
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_THAT(backward->local_tokens(), SizeIs(4));
EXPECT_EQ(backward->local_tokens()[0].end, 0);
EXPECT_EQ(backward->local_tokens()[1].end, 0);
EXPECT_EQ(backward->local_tokens()[2].end, 1);
EXPECT_EQ(backward->local_tokens()[3].end, 4);
}
TEST_F(InferenceTest, ShrinkAxisMask) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim},
GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim}, GetMesh());
const auto specs = std::vector<Token>{
Token(Token::REGULAR, 0, -1, 1,
false,
false,
false),
Token(Token::SHRINK_AXIS, 0, 2,
1, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(
forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, -1);
EXPECT_EQ(forward->local_tokens()[1].end, 2);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_EQ(backward->local_tokens()[0].end, -1);
EXPECT_EQ(backward->local_tokens()[1].end, 2);
}
TEST_F(InferenceTest, EllipsisMask) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{"x", "y", Layout::kUnshardedDim}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{"x", "y", Layout::kUnshardedDim,
Layout::kUnshardedDim, Layout::kUnshardedDim},
GetMesh());
const auto specs =
std::vector<Token>{Token(Token::ELLIPSIS, 0, 0,
1, false,
false,
false),
Token(Token::NEW_AXIS, 0, 0,
1, false,
false,
false),
Token(Token::NEW_AXIS, 0, 0,
1, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4, 6});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({"x", "y", Layout::kUnshardedDim}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(3));
EXPECT_EQ(forward->local_tokens()[0].end, 0);
EXPECT_EQ(forward->local_tokens()[1].end, 0);
EXPECT_EQ(forward->local_tokens()[2].end, 0);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4, 6});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(
backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({"x", "y", Layout::kUnshardedDim,
Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_THAT(backward->local_tokens(), SizeIs(3));
EXPECT_EQ(backward->local_tokens()[0].end, 0);
EXPECT_EQ(backward->local_tokens()[1].end, 0);
EXPECT_EQ(backward->local_tokens()[2].end, 0);
}
TEST_F(InferenceTest, EllipsisNewAxisEndMask) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim},
GetMesh());
const auto specs = std::vector<Token>{
Token(Token::ELLIPSIS, 0, 0, 1,
false,
false,
false),
Token(Token::NEW_AXIS, 0, 0, 1,
false,
false,
false),
Token(Token::REGULAR, 0, 0, 1,
false,
true,
true),
};
auto forward = CreateAndRun<ForwardLayoutInference>(specs, input_layout,
std::vector<int64_t>{2});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(3));
EXPECT_EQ(forward->local_tokens()[0].end, 0);
EXPECT_EQ(forward->local_tokens()[1].end, 0);
EXPECT_EQ(forward->local_tokens()[2].end, 2);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(
backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(backward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim}));
EXPECT_THAT(backward->local_tokens(), SizeIs(3));
EXPECT_EQ(backward->local_tokens()[0].end, 0);
EXPECT_EQ(backward->local_tokens()[1].end, 0);
EXPECT_EQ(backward->local_tokens()[2].end, 2);
}
TEST_F(InferenceTest, AdditionalAxes) {
const Layout input_layout =
*Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh());
const Layout output_layout =
*Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh());
const auto specs =
std::vector<Token>{Token(Token::REGULAR, 0, 0,
1, false,
true,
true)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({"x", "y"}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(1));
EXPECT_EQ(forward->local_tokens()[0].begin_mask, true);
EXPECT_EQ(forward->local_tokens()[0].end_mask, true);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({"x", "y"}));
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_THAT(backward->local_tokens(), SizeIs(1));
EXPECT_EQ(forward->local_tokens()[0].begin_mask, true);
EXPECT_EQ(forward->local_tokens()[0].end_mask, true);
}
TEST_F(InferenceTest, ShardingOnNonSlicedDimension) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh());
const auto specs =
std::vector<Token>{Token(Token::REGULAR, 0, 2,
1, false,
false,
false),
Token(Token::REGULAR, 0, 2,
2, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({"x", Layout::kUnshardedDim}));
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, 1);
EXPECT_EQ(forward->local_tokens()[1].end, 2);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({"x", Layout::kUnshardedDim}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->local_tokens()[0].end, 1);
EXPECT_EQ(backward->local_tokens()[1].end, 2);
}
TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout1) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh());
const auto specs =
std::vector<Token>{Token(Token::REGULAR, 0, 2,
1, false,
false,
false),
Token(Token::REGULAR, 0, 4,
2, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, "x"}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, 2);
EXPECT_EQ(forward->local_tokens()[1].end, 2);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, "x"}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->local_tokens()[0].end, 2);
EXPECT_EQ(backward->local_tokens()[1].end, 2);
}
TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout2) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, "y"}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, "y"}, GetMesh());
const auto specs =
std::vector<Token>{Token(Token::REGULAR, 0, 2,
1, false,
false,
false),
Token(Token::REGULAR, 0, 4,
2, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, "y"}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, 2);
EXPECT_EQ(forward->local_tokens()[1].end, 4);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, "y"}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->local_tokens()[0].end, 2);
EXPECT_EQ(backward->local_tokens()[1].end, 4);
}
TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout3) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh());
const auto specs =
std::vector<Token>{Token(Token::REGULAR, 0, 2,
1, false,
false,
false),
Token(Token::REGULAR, 0, 3,
2, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, "x"}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, 2);
EXPECT_EQ(forward->local_tokens()[1].end, 2);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(backward->expander_input_layout(), input_layout);
EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, "x"}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->local_tokens()[0].end, 2);
EXPECT_EQ(backward->local_tokens()[1].end, 2);
}
TEST_F(InferenceTest, StrideOnShardedDimensionNeedRelayout) {
const Layout input_layout = *Layout::GetLayout(
std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh());
const Layout output_layout = *Layout::GetLayout(
std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim},
GetMesh());
const auto specs =
std::vector<Token>{Token(Token::REGULAR, 0, -1,
1, false,
false,
false),
Token(Token::REGULAR, 0, 4,
3, false,
false,
false)};
auto forward = CreateAndRun<ForwardLayoutInference>(
specs, input_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(forward, IsOk());
EXPECT_EQ(
forward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(forward->expander_value_layout(), output_layout);
EXPECT_THAT(forward->local_tokens(), SizeIs(2));
EXPECT_EQ(forward->local_tokens()[0].end, -1);
EXPECT_EQ(forward->local_tokens()[1].end, 4);
auto backward = CreateAndRun<BackwardLayoutInference>(
specs, output_layout, std::vector<int64_t>{2, 4});
ASSERT_THAT(backward, IsOk());
EXPECT_EQ(
backward->expander_input_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_EQ(
backward->expander_value_layout().sharding_spec_strs(),
std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim}));
EXPECT_THAT(backward->local_tokens(), SizeIs(2));
EXPECT_EQ(backward->local_tokens()[0].end, -1);
EXPECT_EQ(backward->local_tokens()[1].end, 4);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/cc/slice_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/dtensor/tests/slice_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a08e9eff-c4d9-4548-8237-e935523bf760 | cpp | google/quiche | quic_url | quiche/quic/tools/quic_url.cc | quiche/quic/tools/quic_url_test.cc | #include "quiche/quic/tools/quic_url.h"
#include <string>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace quic {
static constexpr size_t kMaxHostNameLength = 256;
QuicUrl::QuicUrl(absl::string_view url) : url_(static_cast<std::string>(url)) {}
QuicUrl::QuicUrl(absl::string_view url, absl::string_view default_scheme)
: QuicUrl(url) {
if (url_.has_scheme()) {
return;
}
url_ = GURL(absl::StrCat(default_scheme, ":
}
std::string QuicUrl::ToString() const {
if (IsValid()) {
return url_.spec();
}
return "";
}
bool QuicUrl::IsValid() const {
if (!url_.is_valid() || !url_.has_scheme()) {
return false;
}
if (url_.has_host() && url_.host().length() > kMaxHostNameLength) {
return false;
}
return true;
}
std::string QuicUrl::HostPort() const {
if (!IsValid() || !url_.has_host()) {
return "";
}
std::string host = url_.host();
int port = url_.IntPort();
if (port == url::PORT_UNSPECIFIED) {
return host;
}
return absl::StrCat(host, ":", port);
}
std::string QuicUrl::PathParamsQuery() const {
if (!IsValid() || !url_.has_path()) {
return "/";
}
return url_.PathForRequest();
}
std::string QuicUrl::scheme() const {
if (!IsValid()) {
return "";
}
return url_.scheme();
}
std::string QuicUrl::host() const {
if (!IsValid()) {
return "";
}
return url_.HostNoBrackets();
}
std::string QuicUrl::path() const {
if (!IsValid()) {
return "";
}
return url_.path();
}
uint16_t QuicUrl::port() const {
if (!IsValid()) {
return 0;
}
int port = url_.EffectiveIntPort();
if (port == url::PORT_UNSPECIFIED) {
return 0;
}
return port;
}
} | #include "quiche/quic/tools/quic_url.h"
#include <string>
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
class QuicUrlTest : public QuicTest {};
TEST_F(QuicUrlTest, Basic) {
std::string url_str = "www.example.com";
QuicUrl url(url_str);
EXPECT_FALSE(url.IsValid());
url_str = "http:
url = QuicUrl(url_str);
EXPECT_TRUE(url.IsValid());
EXPECT_EQ("http:
EXPECT_EQ("http", url.scheme());
EXPECT_EQ("www.example.com", url.HostPort());
EXPECT_EQ("/", url.PathParamsQuery());
EXPECT_EQ(80u, url.port());
url_str = "https:
url = QuicUrl(url_str);
EXPECT_TRUE(url.IsValid());
EXPECT_EQ("https:
url.ToString());
EXPECT_EQ("https", url.scheme());
EXPECT_EQ("www.example.com:12345", url.HostPort());
EXPECT_EQ("/path/to/resource?a=1&campaign=2", url.PathParamsQuery());
EXPECT_EQ(12345u, url.port());
url_str = "ftp:
url = QuicUrl(url_str);
EXPECT_TRUE(url.IsValid());
EXPECT_EQ("ftp:
EXPECT_EQ("ftp", url.scheme());
EXPECT_EQ("www.example.com", url.HostPort());
EXPECT_EQ("/", url.PathParamsQuery());
EXPECT_EQ(21u, url.port());
}
TEST_F(QuicUrlTest, DefaultScheme) {
std::string url_str = "www.example.com";
QuicUrl url(url_str, "http");
EXPECT_EQ("http:
EXPECT_EQ("http", url.scheme());
url_str = "http:
url = QuicUrl(url_str, "https");
EXPECT_EQ("http:
EXPECT_EQ("http", url.scheme());
url_str = "www.example.com";
url = QuicUrl(url_str, "ftp");
EXPECT_EQ("ftp:
EXPECT_EQ("ftp", url.scheme());
}
TEST_F(QuicUrlTest, IsValid) {
std::string url_str =
"ftp:
EXPECT_TRUE(QuicUrl(url_str).IsValid());
url_str = "https:
EXPECT_FALSE(QuicUrl(url_str).IsValid());
url_str = "%http:
EXPECT_FALSE(QuicUrl(url_str).IsValid());
std::string host(1024, 'a');
url_str = "https:
EXPECT_FALSE(QuicUrl(url_str).IsValid());
url_str = "https:
EXPECT_FALSE(QuicUrl(url_str).IsValid());
}
TEST_F(QuicUrlTest, HostPort) {
std::string url_str = "http:
QuicUrl url(url_str);
EXPECT_EQ("www.example.com", url.HostPort());
EXPECT_EQ("www.example.com", url.host());
EXPECT_EQ(80u, url.port());
url_str = "http:
url = QuicUrl(url_str);
EXPECT_EQ("www.example.com", url.HostPort());
EXPECT_EQ("www.example.com", url.host());
EXPECT_EQ(80u, url.port());
url_str = "http:
url = QuicUrl(url_str);
EXPECT_EQ("www.example.com:81", url.HostPort());
EXPECT_EQ("www.example.com", url.host());
EXPECT_EQ(81u, url.port());
url_str = "https:
url = QuicUrl(url_str);
EXPECT_EQ("192.168.1.1", url.HostPort());
EXPECT_EQ("192.168.1.1", url.host());
EXPECT_EQ(443u, url.port());
url_str = "http:
url = QuicUrl(url_str);
EXPECT_EQ("[2001::1]", url.HostPort());
EXPECT_EQ("2001::1", url.host());
EXPECT_EQ(80u, url.port());
url_str = "http:
url = QuicUrl(url_str);
EXPECT_EQ("[2001::1]:81", url.HostPort());
EXPECT_EQ("2001::1", url.host());
EXPECT_EQ(81u, url.port());
}
TEST_F(QuicUrlTest, PathParamsQuery) {
std::string url_str =
"https:
QuicUrl url(url_str);
EXPECT_EQ("/path/to/resource?a=1&campaign=2", url.PathParamsQuery());
EXPECT_EQ("/path/to/resource", url.path());
url_str = "https:
url = QuicUrl(url_str);
EXPECT_EQ("/?", url.PathParamsQuery());
EXPECT_EQ("/", url.path());
url_str = "https:
url = QuicUrl(url_str);
EXPECT_EQ("/", url.PathParamsQuery());
EXPECT_EQ("/", url.path());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_url.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/tools/quic_url_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
6a99b84b-fcf5-4ca5-8e15-10a72bf74889 | cpp | abseil/abseil-cpp | escaping | absl/strings/internal/escaping.cc | absl/strings/escaping_test.cc | #include "absl/strings/internal/escaping.h"
#include <limits>
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace strings_internal {
ABSL_CONST_INIT const char kBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
ABSL_CONST_INIT const char kWebSafeBase64Chars[] =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_";
size_t CalculateBase64EscapedLenInternal(size_t input_len, bool do_padding) {
constexpr size_t kMaxSize = (std::numeric_limits<size_t>::max() - 1) / 4 * 3;
ABSL_INTERNAL_CHECK(input_len <= kMaxSize,
"CalculateBase64EscapedLenInternal() overflow");
size_t len = (input_len / 3) * 4;
if (input_len % 3 == 0) {
} else if (input_len % 3 == 1) {
len += 2;
if (do_padding) {
len += 2;
}
} else {
len += 3;
if (do_padding) {
len += 1;
}
}
return len;
}
size_t Base64EscapeInternal(const unsigned char* src, size_t szsrc, char* dest,
size_t szdest, const char* base64,
bool do_padding) {
static const char kPad64 = '=';
if (szsrc * 4 > szdest * 3) return 0;
char* cur_dest = dest;
const unsigned char* cur_src = src;
char* const limit_dest = dest + szdest;
const unsigned char* const limit_src = src + szsrc;
if (szsrc >= 3) {
while (cur_src < limit_src - 3) {
uint32_t in = absl::big_endian::Load32(cur_src) >> 8;
cur_dest[0] = base64[in >> 18];
in &= 0x3FFFF;
cur_dest[1] = base64[in >> 12];
in &= 0xFFF;
cur_dest[2] = base64[in >> 6];
in &= 0x3F;
cur_dest[3] = base64[in];
cur_dest += 4;
cur_src += 3;
}
}
szdest = static_cast<size_t>(limit_dest - cur_dest);
szsrc = static_cast<size_t>(limit_src - cur_src);
switch (szsrc) {
case 0:
break;
case 1: {
if (szdest < 2) return 0;
uint32_t in = cur_src[0];
cur_dest[0] = base64[in >> 2];
in &= 0x3;
cur_dest[1] = base64[in << 4];
cur_dest += 2;
szdest -= 2;
if (do_padding) {
if (szdest < 2) return 0;
cur_dest[0] = kPad64;
cur_dest[1] = kPad64;
cur_dest += 2;
szdest -= 2;
}
break;
}
case 2: {
if (szdest < 3) return 0;
uint32_t in = absl::big_endian::Load16(cur_src);
cur_dest[0] = base64[in >> 10];
in &= 0x3FF;
cur_dest[1] = base64[in >> 4];
in &= 0x00F;
cur_dest[2] = base64[in << 2];
cur_dest += 3;
szdest -= 3;
if (do_padding) {
if (szdest < 1) return 0;
cur_dest[0] = kPad64;
cur_dest += 1;
szdest -= 1;
}
break;
}
case 3: {
if (szdest < 4) return 0;
uint32_t in =
(uint32_t{cur_src[0]} << 16) + absl::big_endian::Load16(cur_src + 1);
cur_dest[0] = base64[in >> 18];
in &= 0x3FFFF;
cur_dest[1] = base64[in >> 12];
in &= 0xFFF;
cur_dest[2] = base64[in >> 6];
in &= 0x3F;
cur_dest[3] = base64[in];
cur_dest += 4;
szdest -= 4;
break;
}
default:
ABSL_RAW_LOG(FATAL, "Logic problem? szsrc = %zu", szsrc);
break;
}
return static_cast<size_t>(cur_dest - dest);
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/escaping.h"
#include <array>
#include <cstddef>
#include <cstdio>
#include <cstring>
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/internal/escaping_test_common.h"
#include "absl/strings/string_view.h"
namespace {
struct epair {
std::string escaped;
std::string unescaped;
};
TEST(CEscape, EscapeAndUnescape) {
const std::string inputs[] = {
std::string("foo\nxx\r\b\0023"),
std::string(""),
std::string("abc"),
std::string("\1chad_rules"),
std::string("\1arnar_drools"),
std::string("xxxx\r\t'\"\\"),
std::string("\0xx\0", 4),
std::string("\x01\x31"),
std::string("abc\xb\x42\141bc"),
std::string("123\1\x31\x32\x33"),
std::string("\xc1\xca\x1b\x62\x19o\xcc\x04"),
std::string(
"\\\"\xe8\xb0\xb7\xe6\xad\x8c\\\" is Google\\\'s Chinese name"),
};
for (int kind = 0; kind < 4; kind++) {
for (const std::string& original : inputs) {
std::string escaped;
switch (kind) {
case 0:
escaped = absl::CEscape(original);
break;
case 1:
escaped = absl::CHexEscape(original);
break;
case 2:
escaped = absl::Utf8SafeCEscape(original);
break;
case 3:
escaped = absl::Utf8SafeCHexEscape(original);
break;
}
std::string unescaped_str;
EXPECT_TRUE(absl::CUnescape(escaped, &unescaped_str));
EXPECT_EQ(unescaped_str, original);
unescaped_str.erase();
std::string error;
EXPECT_TRUE(absl::CUnescape(escaped, &unescaped_str, &error));
EXPECT_EQ(error, "");
std::string s = escaped;
EXPECT_TRUE(absl::CUnescape(s, &s));
ASSERT_EQ(s, original);
}
}
for (int char0 = 0; char0 < 256; char0++) {
for (int char1 = 0; char1 < 256; char1++) {
char chars[2];
chars[0] = char0;
chars[1] = char1;
std::string s(chars, 2);
std::string escaped = absl::CHexEscape(s);
std::string unescaped;
EXPECT_TRUE(absl::CUnescape(escaped, &unescaped));
EXPECT_EQ(s, unescaped);
}
}
}
TEST(CEscape, BasicEscaping) {
epair oct_values[] = {
{"foo\\rbar\\nbaz\\t", "foo\rbar\nbaz\t"},
{"\\'full of \\\"sound\\\" and \\\"fury\\\"\\'",
"'full of \"sound\" and \"fury\"'"},
{"signi\\\\fying\\\\ nothing\\\\", "signi\\fying\\ nothing\\"},
{"\\010\\t\\n\\013\\014\\r", "\010\011\012\013\014\015"}
};
epair hex_values[] = {
{"ubik\\rubik\\nubik\\t", "ubik\rubik\nubik\t"},
{"I\\\'ve just seen a \\\"face\\\"",
"I've just seen a \"face\""},
{"hel\\\\ter\\\\skel\\\\ter\\\\", "hel\\ter\\skel\\ter\\"},
{"\\x08\\t\\n\\x0b\\x0c\\r", "\010\011\012\013\014\015"}
};
epair utf8_oct_values[] = {
{"\xe8\xb0\xb7\xe6\xad\x8c\\r\xe8\xb0\xb7\xe6\xad\x8c\\nbaz\\t",
"\xe8\xb0\xb7\xe6\xad\x8c\r\xe8\xb0\xb7\xe6\xad\x8c\nbaz\t"},
{"\\\"\xe8\xb0\xb7\xe6\xad\x8c\\\" is Google\\\'s Chinese name",
"\"\xe8\xb0\xb7\xe6\xad\x8c\" is Google\'s Chinese name"},
{"\xe3\x83\xa1\xe3\x83\xbc\xe3\x83\xab\\\\are\\\\Japanese\\\\chars\\\\",
"\xe3\x83\xa1\xe3\x83\xbc\xe3\x83\xab\\are\\Japanese\\chars\\"},
{"\xed\x81\xac\xeb\xa1\xac\\010\\t\\n\\013\\014\\r",
"\xed\x81\xac\xeb\xa1\xac\010\011\012\013\014\015"}
};
epair utf8_hex_values[] = {
{"\x20\xe4\xbd\xa0\\t\xe5\xa5\xbd,\\r!\\n",
"\x20\xe4\xbd\xa0\t\xe5\xa5\xbd,\r!\n"},
{"\xe8\xa9\xa6\xe9\xa8\x93\\\' means \\\"test\\\"",
"\xe8\xa9\xa6\xe9\xa8\x93\' means \"test\""},
{"\\\\\xe6\x88\x91\\\\:\\\\\xe6\x9d\xa8\xe6\xac\xa2\\\\",
"\\\xe6\x88\x91\\:\\\xe6\x9d\xa8\xe6\xac\xa2\\"},
{"\xed\x81\xac\xeb\xa1\xac\\x08\\t\\n\\x0b\\x0c\\r",
"\xed\x81\xac\xeb\xa1\xac\010\011\012\013\014\015"}
};
for (const epair& val : oct_values) {
std::string escaped = absl::CEscape(val.unescaped);
EXPECT_EQ(escaped, val.escaped);
}
for (const epair& val : hex_values) {
std::string escaped = absl::CHexEscape(val.unescaped);
EXPECT_EQ(escaped, val.escaped);
}
for (const epair& val : utf8_oct_values) {
std::string escaped = absl::Utf8SafeCEscape(val.unescaped);
EXPECT_EQ(escaped, val.escaped);
}
for (const epair& val : utf8_hex_values) {
std::string escaped = absl::Utf8SafeCHexEscape(val.unescaped);
EXPECT_EQ(escaped, val.escaped);
}
}
TEST(Unescape, BasicFunction) {
epair tests[] =
{{"", ""},
{"\\u0030", "0"},
{"\\u00A3", "\xC2\xA3"},
{"\\u22FD", "\xE2\x8B\xBD"},
{"\\U00010000", "\xF0\x90\x80\x80"},
{"\\U0010FFFD", "\xF4\x8F\xBF\xBD"}};
for (const epair& val : tests) {
std::string out;
EXPECT_TRUE(absl::CUnescape(val.escaped, &out));
EXPECT_EQ(out, val.unescaped);
}
std::string bad[] = {"\\u1",
"\\U1",
"\\Uffffff",
"\\U00110000",
"\\uD835",
"\\U0000DD04",
"\\777",
"\\xABCD"};
for (const std::string& e : bad) {
std::string error;
std::string out;
EXPECT_FALSE(absl::CUnescape(e, &out, &error));
EXPECT_FALSE(error.empty());
out.erase();
EXPECT_FALSE(absl::CUnescape(e, &out));
}
}
class CUnescapeTest : public testing::Test {
protected:
static const char kStringWithMultipleOctalNulls[];
static const char kStringWithMultipleHexNulls[];
static const char kStringWithMultipleUnicodeNulls[];
std::string result_string_;
};
const char CUnescapeTest::kStringWithMultipleOctalNulls[] =
"\\0\\n"
"0\\n"
"\\00\\12"
"\\000";
const char CUnescapeTest::kStringWithMultipleHexNulls[] =
"\\x0\\n"
"0\\n"
"\\x00\\xa"
"\\x000";
const char CUnescapeTest::kStringWithMultipleUnicodeNulls[] =
"\\u0000\\n"
"0\\n"
"\\U00000000";
TEST_F(CUnescapeTest, Unescapes1CharOctalNull) {
std::string original_string = "\\0";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes2CharOctalNull) {
std::string original_string = "\\00";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes3CharOctalNull) {
std::string original_string = "\\000";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes1CharHexNull) {
std::string original_string = "\\x0";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes2CharHexNull) {
std::string original_string = "\\x00";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes3CharHexNull) {
std::string original_string = "\\x000";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes4CharUnicodeNull) {
std::string original_string = "\\u0000";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, Unescapes8CharUnicodeNull) {
std::string original_string = "\\U00000000";
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0", 1), result_string_);
}
TEST_F(CUnescapeTest, UnescapesMultipleOctalNulls) {
std::string original_string(kStringWithMultipleOctalNulls);
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0\n"
"0\n"
"\0\n"
"\0",
7),
result_string_);
}
TEST_F(CUnescapeTest, UnescapesMultipleHexNulls) {
std::string original_string(kStringWithMultipleHexNulls);
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0\n"
"0\n"
"\0\n"
"\0",
7),
result_string_);
}
TEST_F(CUnescapeTest, UnescapesMultipleUnicodeNulls) {
std::string original_string(kStringWithMultipleUnicodeNulls);
EXPECT_TRUE(absl::CUnescape(original_string, &result_string_));
EXPECT_EQ(std::string("\0\n"
"0\n"
"\0",
5),
result_string_);
}
static struct {
absl::string_view plaintext;
absl::string_view cyphertext;
} const base64_tests[] = {
{{"", 0}, {"", 0}},
{{nullptr, 0},
{"", 0}},
{{"\000", 1}, "AA=="},
{{"\001", 1}, "AQ=="},
{{"\002", 1}, "Ag=="},
{{"\004", 1}, "BA=="},
{{"\010", 1}, "CA=="},
{{"\020", 1}, "EA=="},
{{"\040", 1}, "IA=="},
{{"\100", 1}, "QA=="},
{{"\200", 1}, "gA=="},
{{"\377", 1}, "/w=="},
{{"\376", 1}, "/g=="},
{{"\375", 1}, "/Q=="},
{{"\373", 1}, "+w=="},
{{"\367", 1}, "9w=="},
{{"\357", 1}, "7w=="},
{{"\337", 1}, "3w=="},
{{"\277", 1}, "vw=="},
{{"\177", 1}, "fw=="},
{{"\000\000", 2}, "AAA="},
{{"\000\001", 2}, "AAE="},
{{"\000\002", 2}, "AAI="},
{{"\000\004", 2}, "AAQ="},
{{"\000\010", 2}, "AAg="},
{{"\000\020", 2}, "ABA="},
{{"\000\040", 2}, "ACA="},
{{"\000\100", 2}, "AEA="},
{{"\000\200", 2}, "AIA="},
{{"\001\000", 2}, "AQA="},
{{"\002\000", 2}, "AgA="},
{{"\004\000", 2}, "BAA="},
{{"\010\000", 2}, "CAA="},
{{"\020\000", 2}, "EAA="},
{{"\040\000", 2}, "IAA="},
{{"\100\000", 2}, "QAA="},
{{"\200\000", 2}, "gAA="},
{{"\377\377", 2}, "
{{"\377\376", 2}, "
{{"\377\375", 2}, "
{{"\377\373", 2}, "
{{"\377\367", 2}, "
{{"\377\357", 2}, "/+8="},
{{"\377\337", 2}, "/98="},
{{"\377\277", 2}, "/78="},
{{"\377\177", 2}, "/38="},
{{"\376\377", 2}, "/v8="},
{{"\375\377", 2}, "/f8="},
{{"\373\377", 2}, "+/8="},
{{"\367\377", 2}, "9/8="},
{{"\357\377", 2}, "7/8="},
{{"\337\377", 2}, "3/8="},
{{"\277\377", 2}, "v/8="},
{{"\177\377", 2}, "f/8="},
{{"\000\000\000", 3}, "AAAA"},
{{"\000\000\001", 3}, "AAAB"},
{{"\000\000\002", 3}, "AAAC"},
{{"\000\000\004", 3}, "AAAE"},
{{"\000\000\010", 3}, "AAAI"},
{{"\000\000\020", 3}, "AAAQ"},
{{"\000\000\040", 3}, "AAAg"},
{{"\000\000\100", 3}, "AABA"},
{{"\000\000\200", 3}, "AACA"},
{{"\000\001\000", 3}, "AAEA"},
{{"\000\002\000", 3}, "AAIA"},
{{"\000\004\000", 3}, "AAQA"},
{{"\000\010\000", 3}, "AAgA"},
{{"\000\020\000", 3}, "ABAA"},
{{"\000\040\000", 3}, "ACAA"},
{{"\000\100\000", 3}, "AEAA"},
{{"\000\200\000", 3}, "AIAA"},
{{"\001\000\000", 3}, "AQAA"},
{{"\002\000\000", 3}, "AgAA"},
{{"\004\000\000", 3}, "BAAA"},
{{"\010\000\000", 3}, "CAAA"},
{{"\020\000\000", 3}, "EAAA"},
{{"\040\000\000", 3}, "IAAA"},
{{"\100\000\000", 3}, "QAAA"},
{{"\200\000\000", 3}, "gAAA"},
{{"\377\377\377", 3}, "
{{"\377\377\376", 3}, "
{{"\377\377\375", 3}, "
{{"\377\377\373", 3}, "
{{"\377\377\367", 3}, "
{{"\377\377\357", 3}, "
{{"\377\377\337", 3}, "
{{"\377\377\277", 3}, "
{{"\377\377\177", 3}, "
{{"\377\376\377", 3}, "
{{"\377\375\377", 3}, "
{{"\377\373\377", 3}, "
{{"\377\367\377", 3}, "
{{"\377\357\377", 3}, "/+
{{"\377\337\377", 3}, "/9
{{"\377\277\377", 3}, "/7
{{"\377\177\377", 3}, "/3
{{"\376\377\377", 3}, "/v
{{"\375\377\377", 3}, "/f
{{"\373\377\377", 3}, "+
{{"\367\377\377", 3}, "9
{{"\357\377\377", 3}, "7
{{"\337\377\377", 3}, "3
{{"\277\377\377", 3}, "v
{{"\177\377\377", 3}, "f
{{"\243\361", 2}, "o/E="},
{{"\024\167", 2}, "FHc="},
{{"\313\252", 2}, "y6o="},
{{"\046\041", 2}, "JiE="},
{{"\145\236", 2}, "ZZ4="},
{{"\254\325", 2}, "rNU="},
{{"\061\330", 2}, "Mdg="},
{{"\245\032", 2}, "pRo="},
{{"\006\000", 2}, "BgA="},
{{"\375\131", 2}, "/Vk="},
{{"\303\210", 2}, "w4g="},
{{"\040\037", 2}, "IB8="},
{{"\261\372", 2}, "sfo="},
{{"\335\014", 2}, "3Qw="},
{{"\233\217", 2}, "m48="},
{{"\373\056", 2}, "+y4="},
{{"\247\232", 2}, "p5o="},
{{"\107\053", 2}, "Rys="},
{{"\204\077", 2}, "hD8="},
{{"\276\211", 2}, "vok="},
{{"\313\110", 2}, "y0g="},
{{"\363\376", 2}, "8/4="},
{{"\251\234", 2}, "qZw="},
{{"\103\262", 2}, "Q7I="},
{{"\142\312", 2}, "Yso="},
{{"\067\211", 2}, "N4k="},
{{"\220\001", 2}, "kAE="},
{{"\152\240", 2}, "aqA="},
{{"\367\061", 2}, "9zE="},
{{"\133\255", 2}, "W60="},
{{"\176\035", 2}, "fh0="},
{{"\032\231", 2}, "Gpk="},
{{"\013\007\144", 3}, "Cwdk"},
{{"\030\112\106", 3}, "GEpG"},
{{"\047\325\046", 3}, "J9Um"},
{{"\310\160\022", 3}, "yHAS"},
{{"\131\100\237", 3}, "WUCf"},
{{"\064\342\134", 3}, "NOJc"},
{{"\010\177\004", 3}, "CH8E"},
{{"\345\147\205", 3}, "5WeF"},
{{"\300\343\360", 3}, "wOPw"},
{{"\061\240\201", 3}, "MaCB"},
{{"\225\333\044", 3}, "ldsk"},
{{"\215\137\352", 3}, "jV/q"},
{{"\371\147\160", 3}, "+Wdw"},
{{"\030\320\051", 3}, "GNAp"},
{{"\044\174\241", 3}, "JHyh"},
{{"\260\127\037", 3}, "sFcf"},
{{"\111\045\033", 3}, "SSUb"},
{{"\202\114\107", 3}, "gkxH"},
{{"\057\371\042", 3}, "L/ki"},
{{"\223\247\244", 3}, "k6ek"},
{{"\047\216\144", 3}, "J45k"},
{{"\203\070\327", 3}, "gzjX"},
{{"\247\140\072", 3}, "p2A6"},
{{"\124\115\116", 3}, "VE1O"},
{{"\157\162\050", 3}, "b3Io"},
{{"\357\223\004", 3}, "75ME"},
{{"\052\117\156", 3}, "Kk9u"},
{{"\347\154\000", 3}, "52wA"},
{{"\303\012\142", 3}, "wwpi"},
{{"\060\035\362", 3}, "MB3y"},
{{"\130\226\361", 3}, "WJbx"},
{{"\173\013\071", 3}, "ews5"},
{{"\336\004\027", 3}, "3gQX"},
{{"\357\366\234", 3}, "7/ac"},
{{"\353\304\111", 3}, "68RJ"},
{{"\024\264\131", 3}, "FLRZ"},
{{"\075\114\251", 3}, "PUyp"},
{{"\315\031\225", 3}, "zRmV"},
{{"\154\201\276", 3}, "bIG+"},
{{"\200\066\072", 3}, "gDY6"},
{{"\142\350\267", 3}, "Yui3"},
{{"\033\000\166", 3}, "GwB2"},
{{"\210\055\077", 3}, "iC0/"},
{{"\341\037\124", 3}, "4R9U"},
{{"\161\103\152", 3}, "cUNq"},
{{"\270\142\131", 3}, "uGJZ"},
{{"\337\076\074", 3}, "3z48"},
{{"\375\106\362", 3}, "/Uby"},
{{"\227\301\127", 3}, "l8FX"},
{{"\340\002\234", 3}, "4AKc"},
{{"\121\064\033", 3}, "UTQb"},
{{"\157\134\143", 3}, "b1xj"},
{{"\247\055\327", 3}, "py3X"},
{{"\340\142\005", 3}, "4GIF"},
{{"\060\260\143", 3}, "MLBj"},
{{"\075\203\170", 3}, "PYN4"},
{{"\143\160\016", 3}, "Y3AO"},
{{"\313\013\063", 3}, "ywsz"},
{{"\174\236\135", 3}, "fJ5d"},
{{"\103\047\026", 3}, "QycW"},
{{"\365\005\343", 3}, "9QXj"},
{{"\271\160\223", 3}, "uXCT"},
{{"\362\255\172", 3}, "8q16"},
{{"\113\012\015", 3}, "SwoN"},
{{"", 0}, {"", 0}},
{"a", "YQ=="},
{"ab", "YWI="},
{"abc", "YWJj"},
{"abcd", "YWJjZA=="},
{"abcde", "YWJjZGU="},
{"abcdef", "YWJjZGVm"},
{"abcdefg", "YWJjZGVmZw=="},
{"abcdefgh", "YWJjZGVmZ2g="},
{"abcdefghi", "YWJjZGVmZ2hp"},
{"abcdefghij", "YWJjZGVmZ2hpag=="},
{"abcdefghijk", "YWJjZGVmZ2hpams="},
{"abcdefghijkl", "YWJjZGVmZ2hpamts"},
{"abcdefghijklm", "YWJjZGVmZ2hpamtsbQ=="},
{"abcdefghijklmn", "YWJjZGVmZ2hpamtsbW4="},
{"abcdefghijklmno", "YWJjZGVmZ2hpamtsbW5v"},
{"abcdefghijklmnop", "YWJjZGVmZ2hpamtsbW5vcA=="},
{"abcdefghijklmnopq", "YWJjZGVmZ2hpamtsbW5vcHE="},
{"abcdefghijklmnopqr", "YWJjZGVmZ2hpamtsbW5vcHFy"},
{"abcdefghijklmnopqrs", "YWJjZGVmZ2hpamtsbW5vcHFycw=="},
{"abcdefghijklmnopqrst", "YWJjZGVmZ2hpamtsbW5vcHFyc3Q="},
{"abcdefghijklmnopqrstu", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1"},
{"abcdefghijklmnopqrstuv", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dg=="},
{"abcdefghijklmnopqrstuvw", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnc="},
{"abcdefghijklmnopqrstuvwx", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4"},
{"abcdefghijklmnopqrstuvwxy", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eQ=="},
{"abcdefghijklmnopqrstuvwxyz", "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXo="},
};
template <typename StringType>
void TestEscapeAndUnescape() {
for (const auto& tc : base64_tests) {
StringType encoded("this junk should be ignored");
absl::Base64Escape(tc.plaintext, &encoded);
EXPECT_EQ(encoded, tc.cyphertext);
EXPECT_EQ(absl::Base64Escape(tc.plaintext), tc.cyphertext);
StringType decoded("this junk should be ignored");
EXPECT_TRUE(absl::Base64Unescape(encoded, &decoded));
EXPECT_EQ(decoded, tc.plaintext);
StringType websafe_with_padding(tc.cyphertext);
for (unsigned int c = 0; c < websafe_with_padding.size(); ++c) {
if ('+' == websafe_with_padding[c]) websafe_with_padding[c] = '-';
if ('/' == websafe_with_padding[c]) websafe_with_padding[c] = '_';
}
StringType websafe(websafe_with_padding);
for (unsigned int c = 0; c < websafe.size(); ++c) {
if ('=' == websafe[c]) {
websafe.resize(c);
break;
}
}
encoded = "this junk should be ignored";
absl::WebSafeBase64Escape(tc.plaintext, &encoded);
EXPECT_EQ(encoded, websafe);
EXPECT_EQ(absl::WebSafeBase64Escape(tc.plaintext), websafe);
decoded = "this junk should be ignored";
EXPECT_TRUE(absl::WebSafeBase64Unescape(websafe, &decoded));
EXPECT_EQ(decoded, tc.plaintext);
}
for (const auto& tc : absl::strings_internal::base64_strings()) {
StringType buffer;
absl::WebSafeBase64Escape(tc.plaintext, &buffer);
EXPECT_EQ(tc.cyphertext, buffer);
EXPECT_EQ(absl::WebSafeBase64Escape(tc.plaintext), tc.cyphertext);
}
{
absl::string_view data_set[] = {"ab-/", absl::string_view("\0bcd", 4),
absl::string_view("abc.\0", 5)};
for (absl::string_view bad_data : data_set) {
StringType buf;
EXPECT_FALSE(absl::Base64Unescape(bad_data, &buf));
EXPECT_FALSE(absl::WebSafeBase64Unescape(bad_data, &buf));
EXPECT_TRUE(buf.empty());
}
}
}
TEST(Base64, EscapeAndUnescape) {
TestEscapeAndUnescape<std::string>();
}
TEST(Base64, Padding) {
std::initializer_list<absl::string_view> good_padding = {
"YQ",
"YQ==",
"YQ=.",
"YQ.=",
"YQ..",
};
for (absl::string_view b64 : good_padding) {
std::string decoded;
EXPECT_TRUE(absl::Base64Unescape(b64, &decoded));
EXPECT_EQ(decoded, "a");
std::string websafe_decoded;
EXPECT_TRUE(absl::WebSafeBase64Unescape(b64, &websafe_decoded));
EXPECT_EQ(websafe_decoded, "a");
}
std::initializer_list<absl::string_view> bad_padding = {
"YQ=",
"YQ.",
"YQ===",
"YQ==.",
"YQ=.=",
"YQ=..",
"YQ.==",
"YQ.=.",
"YQ..=",
"YQ...",
"YQ====",
"YQ....",
"YQ=====",
"YQ.....",
};
for (absl::string_view b64 : bad_padding) {
std::string decoded;
EXPECT_FALSE(absl::Base64Unescape(b64, &decoded));
std::string websafe_decoded;
EXPECT_FALSE(absl::WebSafeBase64Unescape(b64, &websafe_decoded));
}
}
TEST(Base64, DISABLED_HugeData) {
const size_t kSize = size_t(3) * 1000 * 1000 * 1000;
static_assert(kSize % 3 == 0, "kSize must be divisible by 3");
const std::string huge(kSize, 'x');
std::string escaped;
absl::Base64Escape(huge, &escaped);
std::string expected_encoding;
expected_encoding.reserve(kSize / 3 * 4);
for (size_t i = 0; i < kSize / 3; ++i) {
expected_encoding.append("eHh4");
}
EXPECT_EQ(expected_encoding, escaped);
std::string unescaped;
EXPECT_TRUE(absl::Base64Unescape(escaped, &unescaped));
EXPECT_EQ(huge, unescaped);
}
TEST(Escaping, HexStringToBytesBackToHex) {
std::string bytes, hex;
constexpr absl::string_view kTestHexLower = "1c2f0032f40123456789abcdef";
constexpr absl::string_view kTestHexUpper = "1C2F0032F40123456789ABCDEF";
constexpr absl::string_view kTestBytes = absl::string_view(
"\x1c\x2f\x00\x32\xf4\x01\x23\x45\x67\x89\xab\xcd\xef", 13);
EXPECT_TRUE(absl::HexStringToBytes(kTestHexLower, &bytes));
EXPECT_EQ(bytes, kTestBytes);
EXPECT_TRUE(absl::HexStringToBytes(kTestHexUpper, &bytes));
EXPECT_EQ(bytes, kTestBytes);
hex = absl::BytesToHexString(kTestBytes);
EXPECT_EQ(hex, kTestHexLower);
bytes = std::string(kTestHexUpper);
(void)absl::HexStringToBytes(bytes, &bytes);
EXPECT_FALSE(absl::HexStringToBytes("1c2f003", &bytes));
EXPECT_FALSE(absl::HexStringToBytes("1c2f00ft", &bytes));
bytes = "abc";
EXPECT_TRUE(absl::HexStringToBytes("", &bytes));
EXPECT_EQ("", bytes);
}
TEST(HexAndBack, HexStringToBytes_and_BytesToHexString) {
std::string hex_mixed = "0123456789abcdefABCDEF";
std::string bytes_expected = "\x01\x23\x45\x67\x89\xab\xcd\xef\xAB\xCD\xEF";
std::string hex_only_lower = "0123456789abcdefabcdef";
std::string bytes_result = absl::HexStringToBytes(hex_mixed);
EXPECT_EQ(bytes_expected, bytes_result);
std::string prefix_valid = hex_mixed + "?";
std::string prefix_valid_result = absl::HexStringToBytes(
absl::string_view(prefix_valid.data(), prefix_valid.size() - 1));
EXPECT_EQ(bytes_expected, prefix_valid_result);
std::string infix_valid = "?" + hex_mixed + "???";
std::string infix_valid_result = absl::HexStringToBytes(
absl::string_view(infix_valid.data() + 1, hex_mixed.size()));
EXPECT_EQ(bytes_expected, infix_valid_result);
std::string hex_result = absl::BytesToHexString(bytes_expected);
EXPECT_EQ(hex_only_lower, hex_result);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/escaping.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/escaping_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
639f12ec-94cf-4ade-98fe-9223116f5cae | cpp | google/cel-cpp | constant | common/constant.cc | common/constant_test.cc | #include "common/constant.h"
#include <cmath>
#include <cstdint>
#include <string>
#include "absl/base/no_destructor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "internal/strings.h"
namespace cel {
const BytesConstant& BytesConstant::default_instance() {
static const absl::NoDestructor<BytesConstant> instance;
return *instance;
}
const StringConstant& StringConstant::default_instance() {
static const absl::NoDestructor<StringConstant> instance;
return *instance;
}
const Constant& Constant::default_instance() {
static const absl::NoDestructor<Constant> instance;
return *instance;
}
std::string FormatNullConstant() { return "null"; }
std::string FormatBoolConstant(bool value) {
return value ? std::string("true") : std::string("false");
}
std::string FormatIntConstant(int64_t value) { return absl::StrCat(value); }
std::string FormatUintConstant(uint64_t value) {
return absl::StrCat(value, "u");
}
std::string FormatDoubleConstant(double value) {
if (std::isfinite(value)) {
if (std::floor(value) != value) {
return absl::StrCat(value);
}
std::string stringified = absl::StrCat(value);
if (!absl::StrContains(stringified, '.')) {
absl::StrAppend(&stringified, ".0");
}
return stringified;
}
if (std::isnan(value)) {
return "nan";
}
if (std::signbit(value)) {
return "-infinity";
}
return "+infinity";
}
std::string FormatBytesConstant(absl::string_view value) {
return internal::FormatBytesLiteral(value);
}
std::string FormatStringConstant(absl::string_view value) {
return internal::FormatStringLiteral(value);
}
std::string FormatDurationConstant(absl::Duration value) {
return absl::StrCat("duration(\"", absl::FormatDuration(value), "\")");
}
std::string FormatTimestampConstant(absl::Time value) {
return absl::StrCat(
"timestamp(\"",
absl::FormatTime("%Y-%m-%d%ET%H:%M:%E*SZ", value, absl::UTCTimeZone()),
"\")");
}
} | #include "common/constant.h"
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <string>
#include "absl/strings/has_absl_stringify.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::IsEmpty;
using ::testing::IsFalse;
using ::testing::IsTrue;
TEST(Constant, NullValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_null_value(), IsFalse());
const_expr.set_null_value();
EXPECT_THAT(const_expr.has_null_value(), IsTrue());
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<std::nullptr_t>());
}
TEST(Constant, BoolValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_bool_value(), IsFalse());
EXPECT_EQ(const_expr.bool_value(), false);
const_expr.set_bool_value(false);
EXPECT_THAT(const_expr.has_bool_value(), IsTrue());
EXPECT_EQ(const_expr.bool_value(), false);
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<bool>());
}
TEST(Constant, IntValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_int_value(), IsFalse());
EXPECT_EQ(const_expr.int_value(), 0);
const_expr.set_int_value(0);
EXPECT_THAT(const_expr.has_int_value(), IsTrue());
EXPECT_EQ(const_expr.int_value(), 0);
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<int64_t>());
}
TEST(Constant, UintValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_uint_value(), IsFalse());
EXPECT_EQ(const_expr.uint_value(), 0);
const_expr.set_uint_value(0);
EXPECT_THAT(const_expr.has_uint_value(), IsTrue());
EXPECT_EQ(const_expr.uint_value(), 0);
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<uint64_t>());
}
TEST(Constant, DoubleValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_double_value(), IsFalse());
EXPECT_EQ(const_expr.double_value(), 0);
const_expr.set_double_value(0);
EXPECT_THAT(const_expr.has_double_value(), IsTrue());
EXPECT_EQ(const_expr.double_value(), 0);
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<double>());
}
TEST(Constant, BytesValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_bytes_value(), IsFalse());
EXPECT_THAT(const_expr.bytes_value(), IsEmpty());
const_expr.set_bytes_value("foo");
EXPECT_THAT(const_expr.has_bytes_value(), IsTrue());
EXPECT_EQ(const_expr.bytes_value(), "foo");
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<BytesConstant>());
}
TEST(Constant, StringValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_string_value(), IsFalse());
EXPECT_THAT(const_expr.string_value(), IsEmpty());
const_expr.set_string_value("foo");
EXPECT_THAT(const_expr.has_string_value(), IsTrue());
EXPECT_EQ(const_expr.string_value(), "foo");
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<StringConstant>());
}
TEST(Constant, DurationValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_duration_value(), IsFalse());
EXPECT_EQ(const_expr.duration_value(), absl::ZeroDuration());
const_expr.set_duration_value(absl::ZeroDuration());
EXPECT_THAT(const_expr.has_duration_value(), IsTrue());
EXPECT_EQ(const_expr.duration_value(), absl::ZeroDuration());
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<absl::Duration>());
}
TEST(Constant, TimestampValue) {
Constant const_expr;
EXPECT_THAT(const_expr.has_timestamp_value(), IsFalse());
EXPECT_EQ(const_expr.timestamp_value(), absl::UnixEpoch());
const_expr.set_timestamp_value(absl::UnixEpoch());
EXPECT_THAT(const_expr.has_timestamp_value(), IsTrue());
EXPECT_EQ(const_expr.timestamp_value(), absl::UnixEpoch());
EXPECT_EQ(const_expr.kind().index(), ConstantKindIndexOf<absl::Time>());
}
TEST(Constant, Equality) {
EXPECT_EQ(Constant{}, Constant{});
Constant lhs_const_expr;
Constant rhs_const_expr;
lhs_const_expr.set_null_value();
rhs_const_expr.set_null_value();
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_bool_value(false);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_bool_value(false);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_int_value(0);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_int_value(0);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_uint_value(0);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_uint_value(0);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_double_value(0);
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_double_value(0);
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_bytes_value("foo");
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_bytes_value("foo");
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_string_value("foo");
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_string_value("foo");
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_duration_value(absl::ZeroDuration());
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_duration_value(absl::ZeroDuration());
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
lhs_const_expr.set_timestamp_value(absl::UnixEpoch());
rhs_const_expr.set_null_value();
EXPECT_NE(lhs_const_expr, rhs_const_expr);
EXPECT_NE(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
rhs_const_expr.set_timestamp_value(absl::UnixEpoch());
EXPECT_EQ(lhs_const_expr, rhs_const_expr);
EXPECT_EQ(rhs_const_expr, lhs_const_expr);
EXPECT_NE(lhs_const_expr, Constant{});
EXPECT_NE(Constant{}, rhs_const_expr);
}
std::string Stringify(const Constant& constant) {
return absl::StrFormat("%v", constant);
}
TEST(Constant, HasAbslStringify) {
EXPECT_TRUE(absl::HasAbslStringify<Constant>::value);
}
TEST(Constant, AbslStringify) {
Constant constant;
EXPECT_EQ(Stringify(constant), "<unspecified>");
constant.set_null_value();
EXPECT_EQ(Stringify(constant), "null");
constant.set_bool_value(true);
EXPECT_EQ(Stringify(constant), "true");
constant.set_int_value(1);
EXPECT_EQ(Stringify(constant), "1");
constant.set_uint_value(1);
EXPECT_EQ(Stringify(constant), "1u");
constant.set_double_value(1);
EXPECT_EQ(Stringify(constant), "1.0");
constant.set_double_value(1.1);
EXPECT_EQ(Stringify(constant), "1.1");
constant.set_double_value(NAN);
EXPECT_EQ(Stringify(constant), "nan");
constant.set_double_value(INFINITY);
EXPECT_EQ(Stringify(constant), "+infinity");
constant.set_double_value(-INFINITY);
EXPECT_EQ(Stringify(constant), "-infinity");
constant.set_bytes_value("foo");
EXPECT_EQ(Stringify(constant), "b\"foo\"");
constant.set_string_value("foo");
EXPECT_EQ(Stringify(constant), "\"foo\"");
constant.set_duration_value(absl::Seconds(1));
EXPECT_EQ(Stringify(constant), "duration(\"1s\")");
constant.set_timestamp_value(absl::UnixEpoch() + absl::Seconds(1));
EXPECT_EQ(Stringify(constant), "timestamp(\"1970-01-01T00:00:01Z\")");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/constant.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/constant_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
a2bab1db-6420-4fbc-87b6-1fc3ea4883f7 | cpp | google/tensorstore | future_collecting_receiver | tensorstore/util/execution/future_collecting_receiver.h | tensorstore/util/execution/future_collecting_receiver_test.cc | #ifndef TENSORSTORE_UTIL_EXECUTION_FUTURE_COLLECTING_RECEIVER_H_
#define TENSORSTORE_UTIL_EXECUTION_FUTURE_COLLECTING_RECEIVER_H_
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/future.h"
namespace tensorstore {
template <typename Container>
struct FutureCollectingReceiver {
Promise<Container> promise;
Container container;
FutureCallbackRegistration cancel_registration;
template <typename... V>
void set_value(V&&... v) {
container.emplace_back(std::forward<V>(v)...);
}
void set_error(absl::Status status) { promise.SetResult(std::move(status)); }
void set_done() { promise.SetResult(std::move(container)); }
template <typename Cancel>
void set_starting(Cancel cancel) {
cancel_registration = promise.ExecuteWhenNotNeeded(std::move(cancel));
}
void set_stopping() { cancel_registration.Unregister(); }
};
template <typename Container, typename Sender>
Future<Container> CollectFlowSenderIntoFuture(Sender sender) {
auto [promise, future] = PromiseFuturePair<Container>::Make();
execution::submit(std::move(sender),
FutureCollectingReceiver<Container>{std::move(promise)});
return std::move(future);
}
}
#endif | #include "tensorstore/util/execution/future_collecting_receiver.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/util/execution/execution.h"
#include "tensorstore/util/execution/sender_testutil.h"
#include "tensorstore/util/execution/sender_util.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::CollectFlowSenderIntoFuture;
using ::tensorstore::MatchesStatus;
TEST(CollectingSenderTest, Success) {
std::vector<int> input{1, 2, 3, 4};
EXPECT_THAT(CollectFlowSenderIntoFuture<std::vector<int>>(
tensorstore::RangeFlowSender<tensorstore::span<int>>{input})
.result(),
::testing::Optional(::testing::ElementsAreArray(input)));
}
TEST(CollectingSenderTest, Error) {
EXPECT_THAT(
CollectFlowSenderIntoFuture<std::vector<int>>(
tensorstore::FlowSingleSender<tensorstore::ErrorSender<absl::Status>>{
absl::UnknownError("abc")})
.result(),
MatchesStatus(absl::StatusCode::kUnknown, "abc"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_collecting_receiver.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/execution/future_collecting_receiver_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
f229d361-0fa4-493a-b68e-5e828b20bda1 | cpp | tensorflow/tensorflow | array_grad | tensorflow/c/experimental/gradients/array_grad.cc | tensorflow/c/experimental/gradients/array_grad_test.cc | #include "tensorflow/c/experimental/gradients/array_grad.h"
#include "tensorflow/c/eager/abstract_context.h"
namespace tensorflow {
namespace gradients {
namespace {
class IdentityNGradientFunction : public GradientFunction {
public:
Status Compute(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> grad_outputs,
absl::Span<AbstractTensorHandle*> grad_inputs) override {
for (int i = 0; i < grad_outputs.size(); i++) {
auto grad_input = grad_outputs[i];
if (grad_input) {
grad_input->Ref();
}
grad_inputs[i] = grad_input;
}
return absl::OkStatus();
}
~IdentityNGradientFunction() override {}
};
}
GradientFunction* IdentityNRegisterer(const ForwardOperation& op) {
return new IdentityNGradientFunction;
}
}
} | #include "tensorflow/c/experimental/gradients/array_grad.h"
#include "tensorflow/c/eager/c_api_test_util.h"
#include "tensorflow/c/eager/c_api_unified_experimental_internal.h"
#include "tensorflow/c/eager/unified_api_testutil.h"
#include "tensorflow/c/experimental/gradients/grad_test_helper.h"
#include "tensorflow/c/experimental/gradients/tape/tape_context.h"
#include "tensorflow/c/experimental/ops/array_ops.h"
#include "tensorflow/c/tf_status_helper.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace gradients {
namespace internal {
namespace {
using tensorflow::TF_StatusPtr;
Status IdentityNModel(AbstractContext* ctx,
absl::Span<AbstractTensorHandle* const> inputs,
absl::Span<AbstractTensorHandle*> outputs) {
std::vector<AbstractTensorHandle*> temp_outputs(2);
TF_RETURN_IF_ERROR(
ops::IdentityN(ctx, inputs, absl::MakeSpan(temp_outputs), "IdentityN"));
outputs[0] = temp_outputs[1];
temp_outputs[0]->Unref();
return absl::OkStatus();
}
class CppGradients
: public ::testing::TestWithParam<std::tuple<const char*, bool, bool>> {
protected:
void SetUp() override {
TF_StatusPtr status(TF_NewStatus());
TF_SetTracingImplementation(std::get<0>(GetParam()), status.get());
status_ = StatusFromTF_Status(status.get());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
{
AbstractContext* ctx_raw = nullptr;
status_ =
BuildImmediateExecutionContext(std::get<1>(GetParam()), &ctx_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
immediate_execution_ctx_.reset(ctx_raw);
}
enable_tensor_float_32_execution(false);
}
AbstractContextPtr immediate_execution_ctx_;
GradientRegistry registry_;
Status status_;
public:
bool UseMlir() const { return strcmp(std::get<0>(GetParam()), "mlir") == 0; }
bool UseFunction() const { return std::get<2>(GetParam()); }
};
TEST_P(CppGradients, TestIdentityNGrad) {
AbstractTensorHandlePtr x1;
{
AbstractTensorHandle* x1_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 1.0f, &x1_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x1.reset(x1_raw);
}
AbstractTensorHandlePtr x2;
{
AbstractTensorHandle* x2_raw = nullptr;
status_ = TestScalarTensorHandle<float, TF_FLOAT>(
immediate_execution_ctx_.get(), 1.0f, &x2_raw);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
x2.reset(x2_raw);
}
status_ = registry_.Register("IdentityN", IdentityNRegisterer);
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
auto IdentityNGradModel = BuildGradModel(IdentityNModel, registry_);
std::vector<AbstractTensorHandle*> outputs(2);
status_ =
RunModel(IdentityNGradModel, immediate_execution_ctx_.get(),
{x1.get(), x2.get()}, absl::MakeSpan(outputs), UseFunction());
ASSERT_EQ(errors::OK, status_.code()) << status_.message();
EXPECT_EQ(outputs[0], nullptr);
ASSERT_NO_FATAL_FAILURE(CheckTensorValue(outputs[1], {1.0f}, {},
0));
outputs[1]->Unref();
}
#ifdef PLATFORM_GOOGLE
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#else
INSTANTIATE_TEST_SUITE_P(
UnifiedCAPI, CppGradients,
::testing::Combine(::testing::Values("graphdef", "mlir"),
::testing::Values(false),
::testing::Values(true, false)));
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/array_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/c/experimental/gradients/array_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d8ed00a3-7513-4797-8a12-6dc6dfb3fefe | cpp | tensorflow/tensorflow | enable_tf2_utils | tensorflow/core/platform/enable_tf2_utils.cc | tensorflow/core/platform/enable_tf2_utils_test.cc | #include "tensorflow/core/platform/enable_tf2_utils.h"
#include <atomic>
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
enum Enablement : uint8 { kFalse = 0, kTrue = 1, undefined = 2 };
static std::atomic<Enablement> tf2_enabled{undefined};
void set_tf2_execution(bool enabled) {
tf2_enabled = (enabled) ? Enablement::kTrue : Enablement::kFalse;
}
bool tf2_execution_enabled() {
if (tf2_enabled == Enablement::undefined) {
static bool tf2_behavior_env_enabled = [] {
string tf2_env;
TF_CHECK_OK(ReadStringFromEnvVar("TF2_BEHAVIOR", "0", &tf2_env));
return tf2_env != "0";
}();
tf2_enabled =
(tf2_behavior_env_enabled) ? Enablement::kTrue : Enablement::kFalse;
}
return tf2_enabled;
}
} | #include "tensorflow/core/platform/enable_tf2_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
TEST(TF2EnabledTest, enabled_behavior) {
string tf2_env;
TF_CHECK_OK(ReadStringFromEnvVar("TF2_BEHAVIOR", "0", &tf2_env));
bool expected = (tf2_env != "0");
EXPECT_EQ(tensorflow::tf2_execution_enabled(), expected);
tensorflow::set_tf2_execution(true);
EXPECT_TRUE(tensorflow::tf2_execution_enabled());
tensorflow::set_tf2_execution(false);
EXPECT_FALSE(tensorflow::tf2_execution_enabled());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/enable_tf2_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/enable_tf2_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a5da3cd0-9c6c-4362-832d-b44b5d5becba | cpp | google/arolla | unit | arolla/util/unit.cc | arolla/util/unit_test.cc | #include "arolla/util/unit.h"
#include "absl/strings/string_view.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
namespace arolla {
ReprToken ReprTraits<Unit>::operator()(const Unit&) const {
return ReprToken{"unit"};
}
void FingerprintHasherTraits<Unit>::operator()(FingerprintHasher* hasher,
const Unit& value) const {
hasher->Combine(absl::string_view("unit"));
}
} | #include "arolla/util/unit.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla {
namespace {
using ::arolla::testing::ReprTokenEq;
TEST(UnitTest, Repr) { EXPECT_THAT(GenReprToken(kUnit), ReprTokenEq("unit")); }
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/unit.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/unit_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
97fd9238-bf9e-4cb6-a0bf-0dc0fcda564c | cpp | tensorflow/tensorflow | xla_op_utils | third_party/xla/xla/tsl/profiler/convert/xla_op_utils.h | third_party/xla/xla/tsl/profiler/convert/xla_op_utils_test.cc | #ifndef XLA_TSL_PROFILER_CONVERT_XLA_OP_UTILS_H_
#define XLA_TSL_PROFILER_CONVERT_XLA_OP_UTILS_H_
#include <string>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace tsl {
namespace profiler {
inline bool IsFusion(absl::string_view category) {
return absl::EndsWith(category, " fusion");
}
inline std::string HloModuleNameWithProgramId(absl::string_view hlo_module_name,
uint64_t program_id) {
return absl::StrCat(hlo_module_name, "(", program_id, ")");
}
}
}
#endif | #include "xla/tsl/profiler/convert/xla_op_utils.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(XlaOpUtilsTest, HloModuleNameWithProgramId) {
EXPECT_EQ("module(123)", HloModuleNameWithProgramId("module", 123));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xla_op_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/convert/xla_op_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7ea580bf-fc22-4f02-84c7-1813a00b67ee | cpp | google/tensorstore | rational | tensorstore/internal/json_binding/rational.h | tensorstore/internal/json_binding/rational_test.cc | #ifndef TENSORSTORE_INTERNAL_JSON_BINDING_RATIONAL_H_
#define TENSORSTORE_INTERNAL_JSON_BINDING_RATIONAL_H_
#include <stddef.h>
#include <string>
#include <string_view>
#include <type_traits>
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/util/rational.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
namespace tensorstore {
namespace internal_json_binding {
namespace rational_binder {
struct RationalBinder {
template <typename Options, typename T>
absl::Status operator()(std::true_type is_loading, const Options& options,
Rational<T>* obj, ::nlohmann::json* j) const {
if (j->is_array()) {
T values[2];
span<T, 2> values_span(values);
TENSORSTORE_RETURN_IF_ERROR(
FixedSizeArray()(is_loading, options, &values_span, j));
*obj = Rational<T>(values[0], values[1]);
return absl::OkStatus();
} else if (auto* s = j->get_ptr<const std::string*>()) {
std::string_view sv = *s;
size_t slash_index = sv.find('/');
T numerator;
T denominator;
if (slash_index == std::string_view::npos) {
denominator = 1;
if (!absl::SimpleAtoi(sv, &numerator)) {
return internal_json::ExpectedError(
*j, "number or rational number `a/b`");
}
} else {
if (!absl::SimpleAtoi(sv.substr(0, slash_index), &numerator) ||
!absl::SimpleAtoi(sv.substr(slash_index + 1), &denominator)) {
return internal_json::ExpectedError(*j, "rational number `a/b`");
}
}
*obj = Rational<T>(numerator, denominator);
return absl::OkStatus();
}
T value;
TENSORSTORE_RETURN_IF_ERROR(
DefaultBinder<>(is_loading, options, &value, j));
*obj = value;
return absl::OkStatus();
}
template <typename Options, typename T>
absl::Status operator()(std::false_type is_loading, const Options& options,
const Rational<T>* obj, ::nlohmann::json* j) const {
if (obj->denominator() == static_cast<T>(1)) {
T num = obj->numerator();
return DefaultBinder<>(is_loading, options, &num, j);
}
*j = absl::StrFormat("%d/%d", obj->numerator(), obj->denominator());
return absl::OkStatus();
}
};
}
template <typename T>
constexpr inline auto DefaultBinder<Rational<T>> =
rational_binder::RationalBinder{};
}
}
#endif | #include "tensorstore/internal/json_binding/rational.h"
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json_fwd.hpp>
#include "tensorstore/index.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/rational.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::Index;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Rational;
namespace {
TEST(JsonBindingTest, Simple) {
tensorstore::TestJsonBinderRoundTrip<Rational<Index>>({
{{2, 3}, "2/3"},
{2, 2},
{1, 1},
{0, 0},
});
tensorstore::TestJsonBinderRoundTripJsonOnly<Rational<Index>>({
"2/0",
"3/0",
"0/0",
});
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<Rational<Index>>({
{{2, 3}, "2/3"},
});
tensorstore::TestJsonBinderFromJson<Rational<Index>>({
{"abc",
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected number or rational number `a/b`, but received: \"abc\"")},
{"12a",
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Expected number or rational number `a/b`, but received: \"12a\"")},
{"12/a",
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected rational number `a/b`, but received: \"12/a\"")},
{{1},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 1 but should have length 2")},
{{1, "a"},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error parsing value at position 1: "
"Expected 64-bit signed integer, but received: \"a\"")},
{{1, 2, 3},
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Array has length 3 but should have length 2")},
});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/rational.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/rational_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3ca819db-f581-4313-a4d9-73d5fb37ee7c | cpp | google/quiche | quic_send_control_stream | quiche/quic/core/http/quic_send_control_stream.cc | quiche/quic/core/http/quic_send_control_stream_test.cc | #include "quiche/quic/core/http/quic_send_control_stream.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
namespace {
}
QuicSendControlStream::QuicSendControlStream(QuicStreamId id,
QuicSpdySession* spdy_session,
const SettingsFrame& settings)
: QuicStream(id, spdy_session, true, WRITE_UNIDIRECTIONAL),
settings_sent_(false),
origin_frame_sent_(false),
settings_(settings),
spdy_session_(spdy_session) {}
void QuicSendControlStream::OnStreamReset(const QuicRstStreamFrame& ) {
QUIC_BUG(quic_bug_10382_1)
<< "OnStreamReset() called for write unidirectional stream.";
}
bool QuicSendControlStream::OnStopSending(QuicResetStreamError ) {
stream_delegate()->OnStreamError(
QUIC_HTTP_CLOSED_CRITICAL_STREAM,
"STOP_SENDING received for send control stream");
return false;
}
void QuicSendControlStream::MaybeSendSettingsFrame() {
if (settings_sent_) {
return;
}
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
char data[sizeof(kControlStream)];
QuicDataWriter writer(ABSL_ARRAYSIZE(data), data);
writer.WriteVarInt62(kControlStream);
WriteOrBufferData(absl::string_view(writer.data(), writer.length()), false,
nullptr);
SettingsFrame settings = settings_;
if (!GetQuicFlag(quic_enable_http3_grease_randomness)) {
settings.values[0x40] = 20;
} else {
uint32_t result;
QuicRandom::GetInstance()->RandBytes(&result, sizeof(result));
uint64_t setting_id = 0x1fULL * static_cast<uint64_t>(result) + 0x21ULL;
QuicRandom::GetInstance()->RandBytes(&result, sizeof(result));
settings.values[setting_id] = result;
}
std::string settings_frame = HttpEncoder::SerializeSettingsFrame(settings);
QUIC_DVLOG(1) << "Control stream " << id() << " is writing settings frame "
<< settings;
if (spdy_session_->debug_visitor()) {
spdy_session_->debug_visitor()->OnSettingsFrameSent(settings);
}
WriteOrBufferData(settings_frame, false, nullptr);
settings_sent_ = true;
WriteOrBufferData(HttpEncoder::SerializeGreasingFrame(), false,
nullptr);
}
void QuicSendControlStream::MaybeSendOriginFrame(
std::vector<std::string> origins) {
if (origins.empty() || origin_frame_sent_) {
return;
}
OriginFrame frame;
frame.origins = std::move(origins);
QUIC_DVLOG(1) << "Control stream " << id() << " is writing origin frame "
<< frame;
WriteOrBufferData(HttpEncoder::SerializeOriginFrame(frame), false,
nullptr);
origin_frame_sent_ = true;
}
void QuicSendControlStream::WritePriorityUpdate(QuicStreamId stream_id,
HttpStreamPriority priority) {
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
MaybeSendSettingsFrame();
const std::string priority_field_value =
SerializePriorityFieldValue(priority);
PriorityUpdateFrame priority_update_frame{stream_id, priority_field_value};
if (spdy_session_->debug_visitor()) {
spdy_session_->debug_visitor()->OnPriorityUpdateFrameSent(
priority_update_frame);
}
std::string frame =
HttpEncoder::SerializePriorityUpdateFrame(priority_update_frame);
QUIC_DVLOG(1) << "Control Stream " << id() << " is writing "
<< priority_update_frame;
WriteOrBufferData(frame, false, nullptr);
}
void QuicSendControlStream::SendGoAway(QuicStreamId id) {
QuicConnection::ScopedPacketFlusher flusher(session()->connection());
MaybeSendSettingsFrame();
GoAwayFrame frame;
frame.id = id;
if (spdy_session_->debug_visitor()) {
spdy_session_->debug_visitor()->OnGoAwayFrameSent(id);
}
WriteOrBufferData(HttpEncoder::SerializeGoAwayFrame(frame), false, nullptr);
}
} | #include "quiche/quic/core/http/quic_send_control_stream.h"
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/null_encrypter.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic {
namespace test {
namespace {
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::Invoke;
using ::testing::StrictMock;
struct TestParams {
TestParams(const ParsedQuicVersion& version, Perspective perspective)
: version(version), perspective(perspective) {
QUIC_LOG(INFO) << "TestParams: " << *this;
}
TestParams(const TestParams& other)
: version(other.version), perspective(other.perspective) {}
friend std::ostream& operator<<(std::ostream& os, const TestParams& tp) {
os << "{ version: " << ParsedQuicVersionToString(tp.version)
<< ", perspective: "
<< (tp.perspective == Perspective::IS_CLIENT ? "client" : "server")
<< "}";
return os;
}
ParsedQuicVersion version;
Perspective perspective;
};
std::string PrintToString(const TestParams& tp) {
return absl::StrCat(
ParsedQuicVersionToString(tp.version), "_",
(tp.perspective == Perspective::IS_CLIENT ? "client" : "server"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
ParsedQuicVersionVector all_supported_versions = AllSupportedVersions();
for (const auto& version : AllSupportedVersions()) {
if (!VersionUsesHttp3(version.transport_version)) {
continue;
}
for (Perspective p : {Perspective::IS_SERVER, Perspective::IS_CLIENT}) {
params.emplace_back(version, p);
}
}
return params;
}
class QuicSendControlStreamTest : public QuicTestWithParam<TestParams> {
public:
QuicSendControlStreamTest()
: connection_(new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective(),
SupportedVersions(GetParam().version))),
session_(connection_) {
ON_CALL(session_, WritevData(_, _, _, _, _, _))
.WillByDefault(Invoke(&session_, &MockQuicSpdySession::ConsumeData));
}
void Initialize() {
EXPECT_CALL(session_, OnCongestionWindowChange(_)).Times(AnyNumber());
session_.Initialize();
connection_->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<NullEncrypter>(connection_->perspective()));
send_control_stream_ = QuicSpdySessionPeer::GetSendControlStream(&session_);
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(
session_.config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(session_.config(), 3);
session_.OnConfigNegotiated();
}
Perspective perspective() const { return GetParam().perspective; }
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnection>* connection_;
StrictMock<MockQuicSpdySession> session_;
QuicSendControlStream* send_control_stream_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSendControlStreamTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicSendControlStreamTest, WriteSettings) {
SetQuicFlag(quic_enable_http3_grease_randomness, false);
session_.set_qpack_maximum_dynamic_table_capacity(255);
session_.set_qpack_maximum_blocked_streams(16);
session_.set_max_inbound_header_list_size(1024);
Initialize();
testing::InSequence s;
std::string expected_write_data;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"04"
"0b"
"01"
"40ff"
"06"
"4400"
"07"
"10"
"4040"
"14"
"4040"
"01"
"61",
&expected_write_data));
if (perspective() == Perspective::IS_CLIENT &&
QuicSpdySessionPeer::LocalHttpDatagramSupport(&session_) !=
HttpDatagramSupport::kNone) {
ASSERT_TRUE(
absl::HexStringToBytes("00"
"04"
"0d"
"01"
"40ff"
"06"
"4400"
"07"
"10"
"33"
"01"
"4040"
"14"
"4040"
"01"
"61",
&expected_write_data));
}
if (perspective() == Perspective::IS_SERVER &&
QuicSpdySessionPeer::LocalHttpDatagramSupport(&session_) ==
HttpDatagramSupport::kNone) {
ASSERT_TRUE(
absl::HexStringToBytes("00"
"04"
"0d"
"01"
"40ff"
"06"
"4400"
"07"
"10"
"08"
"01"
"4040"
"14"
"4040"
"01"
"61",
&expected_write_data));
}
if (perspective() == Perspective::IS_SERVER &&
QuicSpdySessionPeer::LocalHttpDatagramSupport(&session_) !=
HttpDatagramSupport::kNone) {
ASSERT_TRUE(
absl::HexStringToBytes("00"
"04"
"0f"
"01"
"40ff"
"06"
"4400"
"07"
"10"
"08"
"01"
"33"
"01"
"4040"
"14"
"4040"
"01"
"61",
&expected_write_data));
}
char buffer[1000] = {};
QuicDataWriter writer(sizeof(buffer), buffer);
ASSERT_GE(sizeof(buffer), expected_write_data.size());
auto save_write_data =
[&writer, this](QuicStreamId , size_t write_length,
QuicStreamOffset offset, StreamSendingState ,
TransmissionType ,
std::optional<EncryptionLevel> ) {
send_control_stream_->WriteStreamData(offset, write_length, &writer);
return QuicConsumedData( write_length,
false);
};
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), _, _, _, _, _))
.WillRepeatedly(Invoke(save_write_data));
send_control_stream_->MaybeSendSettingsFrame();
quiche::test::CompareCharArraysWithHexError(
"settings", writer.data(), writer.length(), expected_write_data.data(),
expected_write_data.length());
}
TEST_P(QuicSendControlStreamTest, WriteSettingsOnlyOnce) {
Initialize();
testing::InSequence s;
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), 1, _, _, _, _));
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), _, _, _, _, _))
.Times(2);
send_control_stream_->MaybeSendSettingsFrame();
send_control_stream_->MaybeSendSettingsFrame();
}
TEST_P(QuicSendControlStreamTest, SendOriginFrameOnce) {
Initialize();
std::vector<std::string> origins = {"a", "b", "c"};
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), _, _, _, _, _))
.Times(1);
send_control_stream_->MaybeSendOriginFrame(origins);
send_control_stream_->MaybeSendOriginFrame(origins);
}
TEST_P(QuicSendControlStreamTest, WritePriorityBeforeSettings) {
Initialize();
testing::InSequence s;
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), _, _, _, _, _))
.Times(4);
send_control_stream_->WritePriorityUpdate(
0,
HttpStreamPriority{ 3, false});
EXPECT_TRUE(testing::Mock::VerifyAndClearExpectations(&session_));
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), _, _, _, _, _));
send_control_stream_->WritePriorityUpdate(
0,
HttpStreamPriority{ 3, false});
}
TEST_P(QuicSendControlStreamTest, CloseControlStream) {
Initialize();
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM, _, _));
send_control_stream_->OnStopSending(
QuicResetStreamError::FromInternal(QUIC_STREAM_CANCELLED));
}
TEST_P(QuicSendControlStreamTest, ReceiveDataOnSendControlStream) {
Initialize();
QuicStreamFrame frame(send_control_stream_->id(), false, 0, "test");
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_DATA_RECEIVED_ON_WRITE_UNIDIRECTIONAL_STREAM, _, _));
send_control_stream_->OnStreamFrame(frame);
}
TEST_P(QuicSendControlStreamTest, SendGoAway) {
Initialize();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_.set_debug_visitor(&debug_visitor);
QuicStreamId stream_id = 4;
EXPECT_CALL(session_, WritevData(send_control_stream_->id(), _, _, _, _, _))
.Times(AnyNumber());
EXPECT_CALL(debug_visitor, OnSettingsFrameSent(_));
EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(stream_id));
send_control_stream_->SendGoAway(stream_id);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_send_control_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_send_control_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
45c6fd12-caf7-45a0-a1d7-30b319f493de | cpp | abseil/abseil-cpp | cordz_functions | absl/strings/internal/cordz_functions.cc | absl/strings/internal/cordz_functions_test.cc | #include "absl/strings/internal/cordz_functions.h"
#include <atomic>
#include <cmath>
#include <limits>
#include <random>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/profiling/internal/exponential_biased.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
std::atomic<int> g_cordz_mean_interval(50000);
}
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
static constexpr int64_t kInitCordzNextSample = -1;
ABSL_CONST_INIT thread_local SamplingState cordz_next_sample = {
kInitCordzNextSample, 1};
constexpr int64_t kIntervalIfDisabled = 1 << 16;
ABSL_ATTRIBUTE_NOINLINE int64_t
cordz_should_profile_slow(SamplingState& state) {
thread_local absl::profiling_internal::ExponentialBiased
exponential_biased_generator;
int32_t mean_interval = get_cordz_mean_interval();
if (mean_interval <= 0) {
state = {kIntervalIfDisabled, kIntervalIfDisabled};
return 0;
}
if (mean_interval == 1) {
state = {1, 1};
return 1;
}
if (cordz_next_sample.next_sample <= 0) {
const bool initialized =
cordz_next_sample.next_sample != kInitCordzNextSample;
auto old_stride = state.sample_stride;
auto stride = exponential_biased_generator.GetStride(mean_interval);
state = {stride, stride};
bool should_sample = initialized || cordz_should_profile() > 0;
return should_sample ? old_stride : 0;
}
--state.next_sample;
return 0;
}
void cordz_set_next_sample_for_testing(int64_t next_sample) {
cordz_next_sample = {next_sample, next_sample};
}
#endif
int32_t get_cordz_mean_interval() {
return g_cordz_mean_interval.load(std::memory_order_acquire);
}
void set_cordz_mean_interval(int32_t mean_interval) {
g_cordz_mean_interval.store(mean_interval, std::memory_order_release);
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cordz_functions.h"
#include <thread>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::Eq;
using ::testing::Ge;
using ::testing::Le;
TEST(CordzFunctionsTest, SampleRate) {
int32_t orig_sample_rate = get_cordz_mean_interval();
int32_t expected_sample_rate = 123;
set_cordz_mean_interval(expected_sample_rate);
EXPECT_THAT(get_cordz_mean_interval(), Eq(expected_sample_rate));
set_cordz_mean_interval(orig_sample_rate);
}
#ifdef ABSL_INTERNAL_CORDZ_ENABLED
TEST(CordzFunctionsTest, ShouldProfileDisable) {
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(0);
cordz_set_next_sample_for_testing(0);
EXPECT_EQ(cordz_should_profile(), 0);
EXPECT_THAT(cordz_next_sample.next_sample, Eq(1 << 16));
set_cordz_mean_interval(orig_sample_rate);
}
TEST(CordzFunctionsTest, ShouldProfileAlways) {
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(1);
cordz_set_next_sample_for_testing(1);
EXPECT_GT(cordz_should_profile(), 0);
EXPECT_THAT(cordz_next_sample.next_sample, Le(1));
set_cordz_mean_interval(orig_sample_rate);
}
TEST(CordzFunctionsTest, DoesNotAlwaysSampleFirstCord) {
set_cordz_mean_interval(10000);
int tries = 0;
bool sampled = false;
do {
++tries;
ASSERT_THAT(tries, Le(1000));
std::thread thread([&sampled] { sampled = cordz_should_profile() > 0; });
thread.join();
} while (sampled);
}
TEST(CordzFunctionsTest, ShouldProfileRate) {
static constexpr int kDesiredMeanInterval = 1000;
static constexpr int kSamples = 10000;
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(kDesiredMeanInterval);
int64_t sum_of_intervals = 0;
for (int i = 0; i < kSamples; i++) {
cordz_set_next_sample_for_testing(0);
cordz_should_profile();
sum_of_intervals += cordz_next_sample.next_sample;
}
EXPECT_THAT(sum_of_intervals, Ge(9396115));
EXPECT_THAT(sum_of_intervals, Le(10618100));
set_cordz_mean_interval(orig_sample_rate);
}
#else
TEST(CordzFunctionsTest, ShouldProfileDisabled) {
int32_t orig_sample_rate = get_cordz_mean_interval();
set_cordz_mean_interval(1);
cordz_set_next_sample_for_testing(0);
EXPECT_FALSE(cordz_should_profile());
set_cordz_mean_interval(orig_sample_rate);
}
#endif
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_functions.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cordz_functions_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
defc3c9f-2423-4e72-b81a-20362e9dce10 | cpp | tensorflow/tensorflow | stablehlo_scatter | tensorflow/lite/kernels/stablehlo_scatter.cc | tensorflow/lite/kernels/stablehlo_scatter_test.cc | #include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <utility>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/tensor_slice_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace stablehlo_scatter {
namespace {
constexpr int kInputsTensor = 0;
constexpr int kScatterIndicesTensor = 1;
constexpr int kUpdatesTensor = 2;
constexpr int kOutputTensor = 0;
enum class ComputationType {
kUpdate,
kAdd,
kMultiply,
kMaximum,
kMinimum,
kOther
};
struct OpData {
ComputationType computation_type;
};
using DimVector = std::vector<int64_t>;
static DimVector GetUpdateScatterDims(int64_t updates_rank,
const int64_t* update_window_dims,
int num_update_window_dims) {
DimVector result;
for (int64_t dim = 0; dim < updates_rank; ++dim) {
if (!ArrayContains(update_window_dims, num_update_window_dims, dim)) {
result.push_back(dim);
}
}
return result;
}
template <typename IndexType>
static Index<IndexType> GatherIndex(const Index<IndexType>& index,
const DimVector& dims) {
Index<IndexType> result;
for (auto dim : dims) {
result.push_back(index[dim]);
}
return result;
}
template <typename IndexType>
static bool IsInBounds(Index<IndexType> index, RuntimeShape shape) {
if (index.size() != shape.DimensionsCount()) {
return false;
}
for (int dim = 0; dim < shape.DimensionsCount(); ++dim) {
if (index[dim] >= shape.Dims(dim)) {
return false;
}
}
return true;
}
static ComputationType OpCodeToComputationType(int op_code) {
switch (op_code) {
case kTfLiteBuiltinStablehloAdd:
return ComputationType::kAdd;
case kTfLiteBuiltinStablehloMultiply:
return ComputationType::kMultiply;
case kTfLiteBuiltinStablehloMaximum:
return ComputationType::kMaximum;
case kTfLiteBuiltinStablehloMinimum:
return ComputationType::kMinimum;
default:
return ComputationType::kOther;
}
}
static TfLiteStatus GetComputationType(const Subgraph* computation_subgraph,
ComputationType* computation_type,
TfLiteContext* context) {
if (computation_subgraph->execution_plan().empty()) {
*computation_type = ComputationType::kUpdate;
return kTfLiteOk;
}
if (computation_subgraph->execution_plan().size() > 1) {
TF_LITE_KERNEL_LOG(context,
"Only one kernel allowed withing the stablehlo region. "
"(%zu) kernels found.\n",
computation_subgraph->execution_plan().size());
return kTfLiteError;
}
const TfLiteRegistration* kernel =
&(computation_subgraph
->node_and_registration(computation_subgraph->execution_plan()[0])
->second);
*computation_type = OpCodeToComputationType(kernel->builtin_code);
if (*computation_type == ComputationType::kOther) {
TF_LITE_KERNEL_LOG(
context,
"Only update, Add, Multiply, Maximum and Minimum operations are "
"currently supported for stablehlo.scatter.");
return kTfLiteError;
}
return kTfLiteOk;
}
template <typename DataType, typename IndexType>
static TfLiteStatus ApplyComputation(TfLiteTensor* tensor,
Index<IndexType> index,
DataType input_value,
DataType update_value,
ComputationType computation_type,
TfLiteContext* context) {
DataType* tensor_data = GetTensorData<DataType>(tensor);
DataType result;
if (computation_type == ComputationType::kUpdate) {
result = update_value;
} else if (computation_type == ComputationType::kAdd) {
result = input_value + update_value;
} else if (computation_type == ComputationType::kMultiply) {
result = input_value * update_value;
} else if (computation_type == ComputationType::kMaximum) {
result = std::max(input_value, update_value);
} else if (computation_type == ComputationType::kMinimum) {
result = std::min(input_value, update_value);
} else {
TF_LITE_KERNEL_LOG(context,
"Provided kernel in the stablehlo scatter region is not "
"yet supported.");
return kTfLiteError;
}
tensor_data[TensorIndexToFlat(index.data(), index.size(),
GetTensorShape(tensor))] = result;
return kTfLiteOk;
}
template <typename IndexType, typename DataType>
TfLiteStatus EvalWithTypes(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteStablehloScatterParams* data =
reinterpret_cast<TfLiteStablehloScatterParams*>(node->builtin_data);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &input));
const TfLiteTensor* scatter_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kScatterIndicesTensor,
&scatter_indices));
const TfLiteTensor* updates;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kUpdatesTensor, &updates));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
memcpy(output->data.data, input->data.data, input->bytes);
RuntimeShape input_shape = GetTensorShape(input);
int input_rank = input_shape.DimensionsCount();
const DataType* output_data = GetTensorData<DataType>(output);
RuntimeShape scatter_indices_shape = GetTensorShape(scatter_indices);
RuntimeShape updates_shape = GetTensorShape(updates);
int64_t updates_rank = updates_shape.DimensionsCount();
Index<IndexType> update_index = Index<IndexType>(updates_rank, 0);
const DataType* updates_data = GetTensorData<DataType>(updates);
DimVector update_scatter_dims = GetUpdateScatterDims(
updates_rank, data->update_window_dims, data->num_update_window_dims);
std::vector<int64_t> update_window_dims_vec(
data->update_window_dims,
data->update_window_dims + data->num_update_window_dims);
do {
Index<IndexType> update_scatter_index =
GatherIndex(update_index, update_scatter_dims);
Index<IndexType> start_index =
ReadIndexVector(scatter_indices, scatter_indices_shape,
update_scatter_index, data->index_vector_dim);
Index<IndexType> full_start_index;
TF_LITE_ENSURE_STATUS(ScatterIndex(
start_index, data->scatter_dims_to_operand_dims,
data->num_scatter_dims_to_operand_dims, input_rank, &full_start_index));
Index<IndexType> window_index =
GatherIndex(update_index, update_window_dims_vec);
Index<IndexType> full_window_index;
TF_LITE_ENSURE_STATUS(ExpandDims(window_index, data->inserted_window_dims,
data->num_inserted_window_dims,
&full_window_index));
Index<IndexType> result_index =
AddIndices(full_start_index, full_window_index);
if (!IsInBounds(result_index, input_shape)) {
continue;
}
DataType input_value = output_data[TensorIndexToFlat(
result_index.data(), input_rank, input_shape)];
DataType update_value = updates_data[TensorIndexToFlat(
update_index.data(), updates_rank, updates_shape)];
TF_LITE_ENSURE_STATUS(ApplyComputation(output, result_index, input_value,
update_value,
op_data->computation_type, context));
} while (
NextIndex(updates_rank, updates_shape.DimsData(), update_index.data()));
return TfLiteStatus::kTfLiteOk;
}
template <typename IndexType>
TfLiteStatus EvalWithIndexType(TfLiteContext* context, TfLiteNode* node,
TfLiteType index_type, TfLiteType data_type) {
switch (data_type) {
case kTfLiteFloat16:
return EvalWithTypes<IndexType, Eigen::half>(context, node);
case kTfLiteFloat32:
return EvalWithTypes<IndexType, float>(context, node);
case kTfLiteFloat64:
return EvalWithTypes<IndexType, double>(context, node);
case kTfLiteInt8:
return EvalWithTypes<IndexType, int8_t>(context, node);
case kTfLiteInt16:
return EvalWithTypes<IndexType, int16_t>(context, node);
case kTfLiteInt32:
return EvalWithTypes<IndexType, int32_t>(context, node);
case kTfLiteInt64:
return EvalWithTypes<IndexType, int64_t>(context, node);
case kTfLiteUInt8:
return EvalWithTypes<IndexType, uint8_t>(context, node);
case kTfLiteUInt16:
return EvalWithTypes<IndexType, uint16_t>(context, node);
case kTfLiteUInt32:
return EvalWithTypes<IndexType, uint32_t>(context, node);
case kTfLiteUInt64:
return EvalWithTypes<IndexType, uint64_t>(context, node);
default:
TF_LITE_KERNEL_LOG(
context, "(Index Type: %s, Data Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type), TfLiteTypeGetName(data_type));
return TfLiteStatus::kTfLiteError;
}
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return new ComputationType{ComputationType::kOther};
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<ComputationType*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 3);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size));
const TfLiteStablehloScatterParams* data =
reinterpret_cast<TfLiteStablehloScatterParams*>(node->builtin_data);
Subgraph* this_subgraph = reinterpret_cast<Subgraph*>(context->impl_);
auto* subgraphs = this_subgraph->GetSubgraphs();
if (data->update_computation_subgraph_index >= subgraphs->size()) {
TF_LITE_KERNEL_LOG(context,
"Computation subgraph not found for stablehlo.scatter.");
return TfLiteStatus::kTfLiteError;
}
Subgraph* computation_subgraph =
(*subgraphs)[data->update_computation_subgraph_index].get();
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
TF_LITE_ENSURE_STATUS(GetComputationType(
computation_subgraph, &op_data->computation_type, context));
return TfLiteStatus::kTfLiteOk;
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &input));
const TfLiteTensor* scatter_indices;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kScatterIndicesTensor,
&scatter_indices));
TfLiteType index_type = scatter_indices->type;
TfLiteType data_type = input->type;
if (index_type == kTfLiteInt32) {
return EvalWithIndexType<int32_t>(context, node, index_type, data_type);
} else if (index_type == kTfLiteInt64) {
return EvalWithIndexType<int64_t>(context, node, index_type, data_type);
} else {
TF_LITE_KERNEL_LOG(context, "(Index Type: %s) currently not supported.\n",
TfLiteTypeGetName(index_type));
return TfLiteStatus::kTfLiteError;
}
}
}
TfLiteRegistration* Register_STABLEHLO_SCATTER() {
static TfLiteRegistration r = {
stablehlo_scatter::Init, stablehlo_scatter::Free,
stablehlo_scatter::Prepare, stablehlo_scatter::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
enum class StablehloScatterOpType { kAdd, kMul, kMax, kMin, kUpdate };
class StablehloScatterOpModel : public SingleOpModel {
public:
StablehloScatterOpModel(const TensorData& input, const TensorData& indices,
const TensorData& updates,
const TfLiteStablehloScatterParams& params,
StablehloScatterOpType op_type) {
input_ = AddInput(input);
indices_ = AddInput(indices);
updates_ = AddInput(updates);
output_ = AddOutput(input.type);
SetBuiltinOp(
BuiltinOperator_STABLEHLO_SCATTER,
BuiltinOptions2_StablehloScatterOptions,
CreateStablehloScatterOptions(
builder_, params.indices_are_sorted,
builder_.CreateVector(std::vector(
params.update_window_dims,
params.update_window_dims + params.num_update_window_dims)),
builder_.CreateVector(std::vector(
params.inserted_window_dims,
params.inserted_window_dims + params.num_inserted_window_dims)),
builder_.CreateVector(
std::vector(params.scatter_dims_to_operand_dims,
params.scatter_dims_to_operand_dims +
params.num_scatter_dims_to_operand_dims)),
params.index_vector_dim, params.unique_indices, 1)
.Union());
BuildInterpreter({GetShape(input_), GetShape(indices_), GetShape(updates_)},
-1, false,
false, false,
false);
int* dummy = nullptr;
AddSubgraphs(1, dummy);
if (op_type == StablehloScatterOpType::kAdd) {
subgraph_builder_.BuildStablehloAddSubgraph(interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kMul) {
subgraph_builder_.BuildStablehloMulSubgraph(interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kMax) {
subgraph_builder_.BuildStablehloMaximumSubgraph(
interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kMin) {
subgraph_builder_.BuildStablehloMinimumSubgraph(
interpreter_->subgraph(1));
} else if (op_type == StablehloScatterOpType::kUpdate) {
subgraph_builder_.BuildOutputIsSecondInputSubgraph(
interpreter_->subgraph(1));
}
AllocateAndDelegate(true);
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename T>
void SetIndices(std::initializer_list<T> data) {
PopulateTensor<T>(indices_, data);
}
template <typename T>
void SetUpdates(std::initializer_list<T> data) {
PopulateTensor<T>(updates_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
Subgraph* subgraph_;
int input_;
int indices_;
int updates_;
int output_;
subgraph_test_util::SubgraphBuilder subgraph_builder_;
};
TEST(StablehloScatterOpTest, PerformsAddition) {
StablehloScatterOpType op_type = StablehloScatterOpType::kAdd;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 7, 8, 9, 10, 7, 8,
11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsMultiplication) {
StablehloScatterOpType op_type = StablehloScatterOpType::kMul;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 12, 16, 20, 24, 7, 8,
18, 20, 22, 24, 26, 28, 30, 32,
34, 36, 38, 40, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsMaximum) {
StablehloScatterOpType op_type = StablehloScatterOpType::kMax;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsMinimum) {
StablehloScatterOpType op_type = StablehloScatterOpType::kMin;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 2, 2, 2, 2, 7, 8, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
TEST(StablehloScatterOpTest, PerformsUpdate) {
StablehloScatterOpType op_type = StablehloScatterOpType::kUpdate;
TfLiteStablehloScatterParams params = {
false,
{2, 3},
2,
{0},
1,
{1, 0},
2,
2,
false,
1
};
StablehloScatterOpModel model(
{TensorType_FLOAT32, {3, 4, 2}}, {TensorType_INT64, {2, 3, 2}},
{TensorType_FLOAT32, {2, 3, 2, 2}}, params, op_type);
model.SetInput<float>({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24});
model.SetIndices<int64_t>({0, 2, 1, 0, 2, 1, 0, 1, 1, 0, 0, 9});
model.SetUpdates<float>(
{2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {1, 2, 2, 2, 2, 2, 7, 8, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 21, 22, 23, 24};
EXPECT_THAT(model.GetOutput<float>(), ElementsAreArray(expected_values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_scatter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_scatter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.