ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
7a5d48b6-b23a-4473-9c78-e527da0dec6b | cpp | tensorflow/tensorflow | tensor_handle_data | tensorflow/core/common_runtime/eager/tensor_handle_data.cc | tensorflow/core/common_runtime/eager/tensor_handle_data_test.cc | #include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include <utility>
#include <variant>
#include "tensorflow/core/common_runtime/eager/eager_executor.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/profiler/lib/traceme.h"
namespace tensorflow {
Status LocalTensorHandleData::Tensor(const tensorflow::Tensor** t) const {
TF_RETURN_IF_ERROR(WaitReady("Tensor"));
*t = &tensor_;
return absl::OkStatus();
}
Status LocalTensorHandleData::TensorValue(tensorflow::TensorValue* t) {
TF_RETURN_IF_ERROR(WaitReady("TensorValue"));
tensorflow::Tensor& tensor = tensor_;
*t = tensorflow::TensorValue(&tensor);
return absl::OkStatus();
}
Status LocalTensorHandleData::Shape(TensorShape* shape) const {
TF_RETURN_IF_ERROR(WaitReady("Shape"));
*shape = tensor_.shape();
return absl::OkStatus();
}
Status LocalTensorHandleData::NumDims(int* num_dims) const {
TF_RETURN_IF_ERROR(WaitReady("NumDims"));
*num_dims = tensor_.dims();
return absl::OkStatus();
}
Status LocalTensorHandleData::Dim(int dim_index, int64_t* dim) const {
TF_RETURN_IF_ERROR(WaitReady("Dim"));
*dim = tensor_.dim_size(dim_index);
return absl::OkStatus();
}
Status LocalTensorHandleData::NumElements(int64_t* num_elements) const {
TF_RETURN_IF_ERROR(WaitReady("NumElements"));
*num_elements = tensor_.NumElements();
return absl::OkStatus();
}
Status LocalTensorHandleData::Unprotect() {
if (!IsReady()) {
return errors::Internal("Cannot unprotect a non-ready tensor");
}
forwarding_protection_tensor_ = tensorflow::Tensor();
return absl::OkStatus();
}
Status LocalTensorHandleData::SetTensor(tensorflow::Tensor&& t) {
DCHECK(!IsReady()) << "SetTensor is only called on non-ready handles.";
tensor_ = std::move(t);
forwarding_protection_tensor_ = tensor_;
auto& state = std::get<BlockingControl>(ctrl_);
state.SetReady();
return absl::OkStatus();
}
string LocalTensorHandleData::DebugString() const {
if (IsReady()) {
return tensor_.DeviceSafeDebugString();
} else {
return "LocalTensorHandleData";
}
}
void LocalTensorHandleData::BlockingControl::SetReady() {
mutex_lock l(mu_);
is_ready_ = true;
}
Status LocalTensorHandleData::BlockingControl::WaitReady(
const char* caller) const {
tf_shared_lock l(mu_);
if (!is_ready_) {
tsl::profiler::TraceMe activity(
[caller] { return absl::StrCat(caller, " WaitReady"); },
tsl::profiler::TraceMeLevel::kInfo);
DVLOG(3) << "WaitReady: " << caller << " " << this;
mu_.Await(Condition(&is_ready_));
}
return is_poisoned_;
}
void LocalTensorHandleData::BlockingControl::Poison(Status status) {
mutex_lock l(mu_);
if (is_ready_) {
LOG(ERROR) << "Poison can only be called on non-ready handle: " << this;
return;
}
is_poisoned_ = status;
is_ready_ = true;
}
} | #include "tensorflow/core/common_runtime/eager/tensor_handle_data.h"
#include <utility>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(TensorHandleData, TensorAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
LocalTensorHandleData handle_data(std::move(t));
const tensorflow::Tensor* ret_tensor;
TF_EXPECT_OK(handle_data.Tensor(&ret_tensor));
EXPECT_EQ(ret_tensor->dtype(), DT_UINT16);
EXPECT_EQ(ret_tensor->dims(), 2);
}
TEST(TensorHandleData, TensorValueAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
LocalTensorHandleData handle_data(std::move(t));
tensorflow::TensorValue tensor_value;
TF_EXPECT_OK(handle_data.TensorValue(&tensor_value));
EXPECT_EQ(tensor_value.dtype(), DT_UINT16);
}
TEST(TensorHandleData, TensorShapeAttribute) {
TensorShape shape({2, 2});
Tensor t(DT_UINT16, shape);
LocalTensorHandleData handle_data(std::move(t));
tensorflow::TensorShape tensor_shape;
TF_EXPECT_OK(handle_data.Shape(&tensor_shape));
EXPECT_EQ(tensor_shape, shape);
}
TEST(TensorHandleData, NumDimsAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 2}));
LocalTensorHandleData handle_data(std::move(t));
int num_dims;
TF_EXPECT_OK(handle_data.NumDims(&num_dims));
EXPECT_EQ(num_dims, 2);
}
TEST(TensorHandleData, DimAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
int64_t dim;
TF_EXPECT_OK(handle_data.Dim(1, &dim));
EXPECT_EQ(dim, 3);
}
TEST(TensorHandleData, NumElementsAttribute) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
int64_t num_elements;
TF_EXPECT_OK(handle_data.NumElements(&num_elements));
EXPECT_EQ(num_elements, 6);
}
TEST(TensorHandleData, UnprotectReady) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
EXPECT_TRUE(handle_data.IsReady());
TF_EXPECT_OK(handle_data.Unprotect());
}
TEST(TensorHandleData, UnprotectNotReady) {
LocalTensorHandleData handle_data;
EXPECT_FALSE(handle_data.IsReady());
EXPECT_THAT(handle_data.Unprotect(),
tensorflow::testing::StatusIs(tensorflow::error::INTERNAL));
}
TEST(TensorHandleData, DebugString) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
EXPECT_THAT(handle_data.DebugString(),
::testing::HasSubstr("Tensor<type: uint16 shape: [2,3]>"));
}
TEST(TensorHandleData, NonBlockingControlPoisonHandle) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data(std::move(t));
TF_EXPECT_OK(handle_data.IsPoisoned());
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
handle_data.Poison(fake_failure_status);
TF_EXPECT_OK(handle_data.IsPoisoned());
}
TEST(TensorHandleData, BlockingControlPoisonHandle) {
LocalTensorHandleData handle_data;
TF_EXPECT_OK(handle_data.IsPoisoned());
tensorflow::Status fake_failure_status(absl::StatusCode::kAborted,
"Fake failure.");
handle_data.Poison(fake_failure_status);
EXPECT_THAT(handle_data.IsPoisoned(),
tensorflow::testing::StatusIs(
fake_failure_status.code(),
std::string(fake_failure_status.message())));
}
TEST(TensorHandleData, BlockingControlSetTensor) {
Tensor t(DT_UINT16, TensorShape({2, 3}));
LocalTensorHandleData handle_data;
TF_EXPECT_OK(handle_data.SetTensor(std::move(t)));
int64_t num_elements;
TF_EXPECT_OK(handle_data.NumElements(&num_elements));
EXPECT_EQ(num_elements, 6);
}
TEST(TensorHandleData, BlockingControlNotReadyDebugString) {
LocalTensorHandleData handle_data;
EXPECT_THAT(handle_data.DebugString(),
::testing::HasSubstr("LocalTensorHandleData"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle_data.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/eager/tensor_handle_data_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2a390141-a179-474f-84ec-21165faf17ec | cpp | tensorflow/tensorflow | request_id | tensorflow/core/distributed_runtime/request_id.cc | tensorflow/core/distributed_runtime/request_id_test.cc | #include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
int64_t GetUniqueRequestId() {
int64_t request_id = 0;
while (request_id == 0) {
request_id = tsl::random::ThreadLocalNew64();
}
return request_id;
}
} | #include "tensorflow/core/distributed_runtime/request_id.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(GetUniqueRequestId, Basic) {
for (int i = 0; i < 1000000; ++i) {
EXPECT_NE(GetUniqueRequestId(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/request_id.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/request_id_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e510e08c-cc79-4ffc-bf7b-dcf71c6c014e | cpp | google/tsl | curl_http_request | tsl/platform/cloud/curl_http_request.cc | tsl/platform/cloud/curl_http_request_test.cc | #include "tsl/platform/cloud/curl_http_request.h"
#include <algorithm>
#include "xla/tsl/lib/gtl/map_util.h"
#include "xla/tsl/util/env_var.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/scanner.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/types.h"
#define CHECK_CURL_OK(expr) CHECK_EQ(expr, CURLE_OK)
namespace tsl {
namespace {
constexpr uint64 kVerboseOutput = 0;
class LibCurlProxy : public LibCurl {
public:
static LibCurlProxy* Load() {
static LibCurlProxy* libcurl = []() -> LibCurlProxy* {
curl_global_init(CURL_GLOBAL_ALL);
return new LibCurlProxy;
}();
return libcurl;
}
CURL* curl_easy_init() override { return ::curl_easy_init(); }
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
uint64 param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
const char* param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
void* param) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(void*, size_t, size_t,
FILE*)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(const void*, size_t, size_t,
void*)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
int (*param)(void* clientp, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow)) override {
return ::curl_easy_setopt(curl, option, param);
}
CURLcode curl_easy_perform(CURL* curl) override {
return ::curl_easy_perform(curl);
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
uint64* value) override {
return ::curl_easy_getinfo(curl, info, value);
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
double* value) override {
return ::curl_easy_getinfo(curl, info, value);
}
void curl_easy_cleanup(CURL* curl) override {
return ::curl_easy_cleanup(curl);
}
char* curl_easy_escape(CURL* curl, const char* str, int length) override {
return ::curl_easy_escape(curl, str, length);
}
curl_slist* curl_slist_append(curl_slist* list, const char* str) override {
return ::curl_slist_append(list, str);
}
void curl_slist_free_all(curl_slist* list) override {
return ::curl_slist_free_all(list);
}
void curl_free(void* p) override { ::curl_free(p); }
};
}
CurlHttpRequest::CurlHttpRequest() : CurlHttpRequest(LibCurlProxy::Load()) {}
CurlHttpRequest::CurlHttpRequest(LibCurl* libcurl, Env* env)
: libcurl_(libcurl), env_(env) {
default_response_buffer_.reserve(CURL_MAX_WRITE_SIZE);
curl_ = libcurl_->curl_easy_init();
CHECK(curl_ != nullptr) << "Couldn't initialize a curl session.";
std::string value = "";
TF_CHECK_OK(ReadStringFromEnvVar("CURL_CA_BUNDLE", "", &value));
if (!value.empty()) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_CAINFO, value.c_str()));
}
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_VERBOSE, kVerboseOutput));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_USERAGENT, "TSL"));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_NOSIGNAL, 1L));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTP_VERSION,
CURL_HTTP_VERSION_1_1));
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_NOPROGRESS, uint64{0}));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFODATA, this));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_XFERINFOFUNCTION,
&CurlHttpRequest::ProgressCallback));
SetResultBuffer(&default_response_buffer_);
}
CurlHttpRequest::~CurlHttpRequest() {
if (curl_headers_) {
libcurl_->curl_slist_free_all(curl_headers_);
}
if (resolve_list_) {
libcurl_->curl_slist_free_all(resolve_list_);
}
if (put_body_) {
if (fclose(put_body_) != 0) {
LOG(ERROR) << "fclose() failed: " << strerror(errno);
}
}
if (curl_) {
libcurl_->curl_easy_cleanup(curl_);
}
}
string CurlHttpRequest::EscapeString(const string& str) {
char* out_char_str = libcurl_->curl_easy_escape(curl_, str.c_str(), 0);
string out_str(out_char_str);
libcurl_->curl_free(out_char_str);
return out_str;
}
void CurlHttpRequest::SetUri(const string& uri) {
CheckNotSent();
is_uri_set_ = true;
uri_ = uri;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_URL, uri.c_str()));
}
void CurlHttpRequest::SetRange(uint64 start, uint64 end) {
CheckNotSent();
CHECK_CURL_OK(libcurl_->curl_easy_setopt(
curl_, CURLOPT_RANGE, strings::StrCat(start, "-", end).c_str()));
}
void CurlHttpRequest::AddHeader(const string& name, const string& value) {
CheckNotSent();
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat(name, ": ", value).c_str());
}
void CurlHttpRequest::AddResolveOverride(const string& hostname, int64_t port,
const string& ip_addr) {
CheckNotSent();
resolve_list_ = libcurl_->curl_slist_append(
resolve_list_,
strings::StrCat(hostname, ":", port, ":", ip_addr).c_str());
}
void CurlHttpRequest::AddAuthBearerHeader(const string& auth_token) {
CheckNotSent();
if (!auth_token.empty()) {
AddHeader("Authorization", strings::StrCat("Bearer ", auth_token));
}
}
void CurlHttpRequest::SetRequestStats(RequestStats* stats) {
CheckNotSent();
CHECK(stats_ == nullptr) << "SetRequestStats already called";
stats_ = stats;
}
void CurlHttpRequest::SetDeleteRequest() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kDelete;
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_CUSTOMREQUEST, "DELETE"));
}
absl::Status CurlHttpRequest::SetPutFromFile(const string& body_filepath,
size_t offset) {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPut;
if (put_body_) {
if (fclose(put_body_) != 0) {
LOG(ERROR) << "fclose() failed: " << strerror(errno);
}
}
put_body_ = fopen(body_filepath.c_str(), "r");
if (!put_body_) {
return errors::InvalidArgument("Couldn't open the specified file: " +
body_filepath);
}
fseek(put_body_, 0, SEEK_END);
const auto size = ftell(put_body_) - offset;
fseek(put_body_, offset, SEEK_SET);
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat("Content-Length: ", size).c_str());
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(put_body_)));
return absl::OkStatus();
}
void CurlHttpRequest::SetPutEmptyBody() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPut;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_PUT, 1));
AddHeader("Content-Length", "0");
AddHeader("Transfer-Encoding", "identity");
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
}
void CurlHttpRequest::SetPostFromBuffer(const char* buffer, size_t size) {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPost;
curl_headers_ = libcurl_->curl_slist_append(
curl_headers_, strings::StrCat("Content-Length: ", size).c_str());
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
post_body_buffer_ = absl::string_view(buffer, size);
}
void CurlHttpRequest::SetPostEmptyBody() {
CheckNotSent();
CheckMethodNotSet();
is_method_set_ = true;
method_ = RequestMethod::kPost;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_POST, 1));
AddHeader("Content-Length", "0");
AddHeader("Transfer-Encoding", "identity");
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_READFUNCTION,
&CurlHttpRequest::ReadCallback));
}
void CurlHttpRequest::SetResultBuffer(std::vector<char>* out_buffer) {
CheckNotSent();
CHECK(out_buffer != nullptr);
out_buffer->clear();
response_buffer_ = out_buffer;
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEFUNCTION,
&CurlHttpRequest::WriteCallback));
}
void CurlHttpRequest::SetResultBufferDirect(char* buffer, size_t size) {
CHECK(buffer != nullptr);
CheckNotSent();
direct_response_ = DirectResponseState{buffer, size, 0, 0};
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_WRITEDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(
curl_, CURLOPT_WRITEFUNCTION, &CurlHttpRequest::WriteCallbackDirect));
}
bool CurlHttpRequest::IsDirectResponse() const {
return direct_response_.buffer_ != nullptr;
}
size_t CurlHttpRequest::WriteCallbackDirect(const void* ptr, size_t size,
size_t nmemb, void* userdata) {
CHECK(ptr != nullptr);
auto that = reinterpret_cast<CurlHttpRequest*>(userdata);
DirectResponseState* state = &that->direct_response_;
CHECK(state->buffer_ != nullptr);
CHECK(state->bytes_transferred_ <= state->buffer_size_);
size_t curl_bytes_received = size * nmemb;
size_t user_buffer_bytes_available =
state->buffer_size_ - state->bytes_transferred_;
size_t bytes_to_copy =
std::min<size_t>(curl_bytes_received, user_buffer_bytes_available);
memcpy(&state->buffer_[state->bytes_transferred_], ptr, bytes_to_copy);
state->bytes_transferred_ += bytes_to_copy;
state->bytes_received_ += curl_bytes_received;
return bytes_to_copy;
}
size_t CurlHttpRequest::GetResultBufferDirectBytesTransferred() {
CHECK(direct_response_.buffer_ != nullptr);
return direct_response_.bytes_transferred_;
}
void CurlHttpRequest::SetTimeouts(uint32 connection, uint32 inactivity,
uint32 total) {
CheckNotSent();
connect_timeout_secs_ = connection;
inactivity_timeout_secs_ = inactivity;
request_timeout_secs_ = total;
}
size_t CurlHttpRequest::WriteCallback(const void* ptr, size_t size,
size_t nmemb, void* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
CHECK(that->response_buffer_);
const size_t bytes_to_copy = size * nmemb;
that->response_buffer_->insert(
that->response_buffer_->end(), reinterpret_cast<const char*>(ptr),
reinterpret_cast<const char*>(ptr) + bytes_to_copy);
return bytes_to_copy;
}
size_t CurlHttpRequest::ReadCallback(void* ptr, size_t size, size_t nmemb,
FILE* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
CHECK(that->post_body_read_ <= that->post_body_buffer_.size());
const size_t bytes_to_copy = std::min(
size * nmemb, that->post_body_buffer_.size() - that->post_body_read_);
memcpy(ptr, that->post_body_buffer_.data() + that->post_body_read_,
bytes_to_copy);
that->post_body_read_ += bytes_to_copy;
return bytes_to_copy;
}
size_t CurlHttpRequest::HeaderCallback(const void* ptr, size_t size,
size_t nmemb, void* this_object) {
CHECK(ptr);
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
absl::string_view header(reinterpret_cast<const char*>(ptr), size * nmemb);
absl::string_view name, value;
if (strings::Scanner(header)
.ScanEscapedUntil(':')
.StopCapture()
.OneLiteral(": ")
.GetResult(&value, &name)) {
string str_value(value);
absl::StripTrailingAsciiWhitespace(&str_value);
that->response_headers_[string(name)] = str_value;
}
return size * nmemb;
}
absl::Status CurlHttpRequest::Send() {
CheckNotSent();
CHECK(is_uri_set_) << "URI has not been set.";
is_sent_ = true;
if (curl_headers_) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_HTTPHEADER, curl_headers_));
}
if (resolve_list_) {
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_RESOLVE, resolve_list_));
}
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERDATA,
reinterpret_cast<void*>(this)));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_HEADERFUNCTION,
&CurlHttpRequest::HeaderCallback));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_TIMEOUT,
request_timeout_secs_));
CHECK_CURL_OK(libcurl_->curl_easy_setopt(curl_, CURLOPT_CONNECTTIMEOUT,
connect_timeout_secs_));
char error_buffer[CURL_ERROR_SIZE] = {0};
CHECK_CURL_OK(
libcurl_->curl_easy_setopt(curl_, CURLOPT_ERRORBUFFER, error_buffer));
if (stats_ != nullptr) {
stats_->RecordRequest(this, uri_, method_);
}
const CURLcode curl_result = libcurl_->curl_easy_perform(curl_);
TF_RETURN_IF_ERROR(CURLcodeToStatus(curl_result, error_buffer));
double written_size = 0;
CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_SIZE_DOWNLOAD,
&written_size));
CHECK_CURL_OK(libcurl_->curl_easy_getinfo(curl_, CURLINFO_RESPONSE_CODE,
&response_code_));
auto get_error_message = [this]() -> string {
string error_message = strings::StrCat(
"Error executing an HTTP request: HTTP response code ", response_code_);
absl::string_view body = GetResponse();
if (!body.empty()) {
return strings::StrCat(
error_message, " with body '",
body.substr(0, std::min(body.size(), response_to_error_limit_)), "'");
}
return error_message;
};
absl::Status result;
switch (response_code_) {
case 200:
case 201:
case 204:
case 206:
result = absl::OkStatus();
break;
case 416:
response_buffer_->clear();
if (IsDirectResponse()) {
direct_response_.bytes_transferred_ = 0;
}
result = absl::OkStatus();
break;
case 400:
case 406:
case 411:
case 414:
result = errors::InvalidArgument(get_error_message());
break;
case 401:
case 403:
case 407:
result = errors::PermissionDenied(get_error_message());
break;
case 404:
case 410:
result = errors::NotFound(get_error_message());
break;
case 302:
case 303:
case 304:
case 307:
case 412:
case 413:
result = errors::FailedPrecondition(get_error_message());
break;
case 308:
case 409:
case 429:
case 500:
case 502:
case 503:
default:
result = errors::Unavailable(get_error_message());
break;
}
if (!result.ok()) {
response_buffer_->clear();
}
if (stats_ != nullptr) {
stats_->RecordResponse(this, uri_, method_, result);
}
return result;
}
void CurlHttpRequest::CheckMethodNotSet() const {
CHECK(!is_method_set_) << "HTTP method has been already set.";
}
void CurlHttpRequest::CheckNotSent() const {
CHECK(!is_sent_) << "The request has already been sent.";
}
absl::string_view CurlHttpRequest::GetResponse() const {
absl::string_view response;
if (IsDirectResponse()) {
response = absl::string_view(direct_response_.buffer_,
direct_response_.bytes_transferred_);
} else {
response =
absl::string_view(response_buffer_->data(), response_buffer_->size());
}
return response;
}
string CurlHttpRequest::GetResponseHeader(const string& name) const {
const auto& header = response_headers_.find(name);
return header != response_headers_.end() ? header->second : "";
}
uint64 CurlHttpRequest::GetResponseCode() const { return response_code_; }
int CurlHttpRequest::ProgressCallback(void* this_object, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow) {
auto that = reinterpret_cast<CurlHttpRequest*>(this_object);
const auto now = that->env_->NowSeconds();
const auto current_progress = dlnow + ulnow;
if (that->last_progress_timestamp_ == 0 ||
current_progress > that->last_progress_bytes_) {
that->last_progress_timestamp_ = now;
that->last_progress_bytes_ = current_progress;
return 0;
}
if (now - that->last_progress_timestamp_ > that->inactivity_timeout_secs_) {
double lookup_time = -1;
const auto lookup_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_NAMELOOKUP_TIME, &lookup_time);
double connect_time = -1;
const auto connect_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_CONNECT_TIME, &connect_time);
double pretransfer_time = -1;
const auto pretransfer_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_PRETRANSFER_TIME, &pretransfer_time);
double starttransfer_time = -1;
const auto starttransfer_time_status = that->libcurl_->curl_easy_getinfo(
that->curl_, CURLINFO_STARTTRANSFER_TIME, &starttransfer_time);
LOG(ERROR) << "The transmission of request " << this_object
<< " (URI: " << that->uri_ << ") has been stuck at "
<< current_progress << " of " << dltotal + ultotal
<< " bytes for " << now - that->last_progress_timestamp_
<< " seconds and will be aborted. CURL timing information: "
<< "lookup time: " << lookup_time << " ("
<< curl_easy_strerror(lookup_time_status)
<< "), connect time: " << connect_time << " ("
<< curl_easy_strerror(connect_time_status)
<< "), pre-transfer time: " << pretransfer_time << " ("
<< curl_easy_strerror(pretransfer_time_status)
<< "), start-transfer time: " << starttransfer_time << " ("
<< curl_easy_strerror(starttransfer_time_status) << ")";
return 1;
}
return 0;
}
absl::Status CurlHttpRequest::CURLcodeToStatus(CURLcode code,
const char* error_buffer) {
if (code == CURLE_OK) {
return absl::OkStatus();
}
string error_message = strings::StrCat(
"Error executing an HTTP request: libcurl code ", code, " meaning '",
curl_easy_strerror(code), "', error details: ");
if (code == CURLE_WRITE_ERROR && IsDirectResponse() &&
direct_response_.bytes_received_ > direct_response_.buffer_size_) {
string overflow_message = strings::StrCat(
"Received ", direct_response_.bytes_received_, " response bytes ",
"for a ", direct_response_.buffer_size_, "-byte buffer");
uint64 response_code = 0;
const CURLcode get_response_result = libcurl_->curl_easy_getinfo(
curl_, CURLINFO_RESPONSE_CODE, &response_code);
if (get_response_result == CURLE_OK && response_code == 416) {
return absl::OkStatus();
}
return errors::FailedPrecondition(
strings::StrCat(error_message, overflow_message));
}
if (code == CURLE_COULDNT_RESOLVE_HOST || code == CURLE_SSL_CACERT_BADFILE) {
return errors::FailedPrecondition(
strings::StrCat(error_message, error_buffer));
}
return errors::Unavailable(
strings::StrCat(error_message, *error_buffer ? error_buffer : "(none)"));
}
} | #include "tsl/platform/cloud/curl_http_request.h"
#include <fstream>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/mem.h"
#include "tsl/platform/path.h"
#include "tsl/platform/platform.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
const string kTestContent = "random original scratch content";
class FakeEnv : public EnvWrapper {
public:
FakeEnv() : EnvWrapper(Env::Default()) {}
uint64 NowSeconds() const override { return now_; }
uint64 now_ = 10000;
};
class FakeLibCurl : public LibCurl {
public:
FakeLibCurl(const string& response_content, uint64 response_code)
: response_content_(response_content), response_code_(response_code) {}
FakeLibCurl(const string& response_content, uint64 response_code,
std::vector<std::tuple<uint64, curl_off_t>> progress_ticks,
FakeEnv* env)
: response_content_(response_content),
response_code_(response_code),
progress_ticks_(std::move(progress_ticks)),
env_(env) {}
FakeLibCurl(const string& response_content, uint64 response_code,
const std::vector<string>& response_headers)
: response_content_(response_content),
response_code_(response_code),
response_headers_(response_headers) {}
CURL* curl_easy_init() override {
is_initialized_ = true;
return reinterpret_cast<CURL*>(this);
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
uint64 param) override {
switch (option) {
case CURLOPT_POST:
is_post_ = param;
break;
case CURLOPT_PUT:
is_put_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
const char* param) override {
return curl_easy_setopt(curl, option,
reinterpret_cast<void*>(const_cast<char*>(param)));
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
void* param) override {
switch (option) {
case CURLOPT_URL:
url_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_RANGE:
range_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_CUSTOMREQUEST:
custom_request_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_HTTPHEADER:
headers_ = reinterpret_cast<std::vector<string>*>(param);
break;
case CURLOPT_ERRORBUFFER:
error_buffer_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_CAINFO:
ca_info_ = reinterpret_cast<char*>(param);
break;
case CURLOPT_WRITEDATA:
write_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_HEADERDATA:
header_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_READDATA:
read_data_ = reinterpret_cast<FILE*>(param);
break;
case CURLOPT_XFERINFODATA:
progress_data_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(void*, size_t, size_t,
FILE*)) override {
read_callback_ = param;
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
size_t (*param)(const void*, size_t, size_t,
void*)) override {
switch (option) {
case CURLOPT_WRITEFUNCTION:
write_callback_ = param;
break;
case CURLOPT_HEADERFUNCTION:
header_callback_ = param;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_setopt(CURL* curl, CURLoption option,
int (*param)(void* clientp, curl_off_t dltotal,
curl_off_t dlnow, curl_off_t ultotal,
curl_off_t ulnow)) override {
progress_callback_ = param;
return CURLE_OK;
}
CURLcode curl_easy_perform(CURL* curl) override {
if (is_post_ || is_put_) {
char buffer[3];
int bytes_read;
posted_content_ = "";
do {
bytes_read = read_callback_(buffer, 1, sizeof(buffer), read_data_);
posted_content_ = strings::StrCat(
posted_content_, absl::string_view(buffer, bytes_read));
} while (bytes_read > 0);
}
if (write_data_ || write_callback_) {
size_t bytes_handled = write_callback_(
response_content_.c_str(), 1, response_content_.size(), write_data_);
if (bytes_handled != response_content_.size()) {
curl_easy_perform_result_ = CURLE_WRITE_ERROR;
}
}
for (const auto& header : response_headers_) {
header_callback_(header.c_str(), 1, header.size(), header_data_);
}
if (error_buffer_) {
strncpy(error_buffer_, curl_easy_perform_error_message_.c_str(),
curl_easy_perform_error_message_.size() + 1);
}
for (const auto& tick : progress_ticks_) {
env_->now_ = std::get<0>(tick);
if (progress_callback_(progress_data_, 0, std::get<1>(tick), 0, 0)) {
return CURLE_ABORTED_BY_CALLBACK;
}
}
return curl_easy_perform_result_;
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
uint64* value) override {
switch (info) {
case CURLINFO_RESPONSE_CODE:
*value = response_code_;
break;
default:
break;
}
return CURLE_OK;
}
CURLcode curl_easy_getinfo(CURL* curl, CURLINFO info,
double* value) override {
switch (info) {
case CURLINFO_SIZE_DOWNLOAD:
*value = response_content_.size();
break;
default:
break;
}
return CURLE_OK;
}
void curl_easy_cleanup(CURL* curl) override { is_cleaned_up_ = true; }
curl_slist* curl_slist_append(curl_slist* list, const char* str) override {
std::vector<string>* v = list ? reinterpret_cast<std::vector<string>*>(list)
: new std::vector<string>();
v->push_back(str);
return reinterpret_cast<curl_slist*>(v);
}
char* curl_easy_escape(CURL* curl, const char* str, int length) override {
const string victim = "/";
const string encoded = "%2F";
string temp_str = str;
std::string::size_type n = 0;
while ((n = temp_str.find(victim, n)) != std::string::npos) {
temp_str.replace(n, victim.size(), encoded);
n += encoded.size();
}
char* out_char_str = reinterpret_cast<char*>(
port::Malloc(sizeof(char) * temp_str.size() + 1));
std::copy(temp_str.begin(), temp_str.end(), out_char_str);
out_char_str[temp_str.size()] = '\0';
return out_char_str;
}
void curl_slist_free_all(curl_slist* list) override {
delete reinterpret_cast<std::vector<string>*>(list);
}
void curl_free(void* p) override { port::Free(p); }
string response_content_;
uint64 response_code_;
std::vector<string> response_headers_;
string url_;
string range_;
string custom_request_;
string ca_info_;
char* error_buffer_ = nullptr;
bool is_initialized_ = false;
bool is_cleaned_up_ = false;
std::vector<string>* headers_ = nullptr;
bool is_post_ = false;
bool is_put_ = false;
void* write_data_ = nullptr;
size_t (*write_callback_)(const void* ptr, size_t size, size_t nmemb,
void* userdata) = nullptr;
void* header_data_ = nullptr;
size_t (*header_callback_)(const void* ptr, size_t size, size_t nmemb,
void* userdata) = nullptr;
FILE* read_data_ = nullptr;
size_t (*read_callback_)(void* ptr, size_t size, size_t nmemb,
FILE* userdata) = &fread;
int (*progress_callback_)(void* clientp, curl_off_t dltotal, curl_off_t dlnow,
curl_off_t ultotal, curl_off_t ulnow) = nullptr;
void* progress_data_ = nullptr;
string posted_content_;
CURLcode curl_easy_perform_result_ = CURLE_OK;
string curl_easy_perform_error_message_;
std::vector<std::tuple<uint64, curl_off_t>> progress_ticks_;
FakeEnv* env_ = nullptr;
};
TEST(CurlHttpRequestTest, GetRequest) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(100, 0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBufferDirect(scratch.data(), scratch.capacity());
TF_EXPECT_OK(http_request.Send());
string expected_response = "get response";
size_t response_bytes_transferred =
http_request.GetResultBufferDirectBytesTransferred();
EXPECT_EQ(expected_response.size(), response_bytes_transferred);
EXPECT_EQ(
"get response",
string(scratch.begin(), scratch.begin() + response_bytes_transferred));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CustomCaInfoFlag) {
static char set_var[] = "CURL_CA_BUNDLE=test";
putenv(set_var);
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ("test", libcurl.ca_info_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_Direct_ResponseTooLarge) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch(5, 0);
http_request.SetUri("http:
http_request.SetResultBufferDirect(scratch.data(), scratch.size());
const absl::Status& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 23 meaning "
"'Failed writing received data to disk/application', error details: "
"Received 12 response bytes for a 5-byte buffer",
status.message());
EXPECT_EQ(5, http_request.GetResultBufferDirectBytesTransferred());
EXPECT_EQ("get r", string(scratch.begin(), scratch.begin() + 5));
}
TEST(CurlHttpRequestTest, GetRequest_Direct_RangeOutOfBound) {
FakeLibCurl libcurl("get response", 416);
CurlHttpRequest http_request(&libcurl);
const string initialScratch = "abcde";
std::vector<char> scratch;
scratch.insert(scratch.end(), initialScratch.begin(), initialScratch.end());
http_request.SetUri("http:
http_request.SetRange(0, 4);
http_request.SetResultBufferDirect(scratch.data(), scratch.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ(416, http_request.GetResponseCode());
EXPECT_EQ(0, http_request.GetResultBufferDirectBytesTransferred());
EXPECT_EQ("get r", string(scratch.begin(), scratch.end()));
}
TEST(CurlHttpRequestTest, GetRequest_Empty) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.resize(0);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(scratch.empty());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("100-199", libcurl.range_);
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
EXPECT_EQ(200, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_RangeOutOfBound) {
FakeLibCurl libcurl("get response", 416);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(scratch.empty());
EXPECT_EQ(416, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_503) {
FakeLibCurl libcurl("get response", 503);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
http_request.SetResultBuffer(&scratch);
const auto& status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: HTTP response code 503 with body "
"'get response'",
status.message());
}
TEST(CurlHttpRequestTest, GetRequest_HttpCode0) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_OPERATION_TIMEDOUT;
libcurl.curl_easy_perform_error_message_ = "Operation timed out";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 28 meaning "
"'Timeout was reached', error details: Operation timed out",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_CouldntResolveHost) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_COULDNT_RESOLVE_HOST;
libcurl.curl_easy_perform_error_message_ =
"Could not resolve host 'metadata'";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
absl::StrCat(
"Error executing an HTTP request: libcurl code 6 meaning ",
(kIsOpenSource ? "'Couldn't resolve host name', error details: "
: "'Could not resolve hostname', error details: "),
"Could not resolve host ", "'metadata'"),
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, GetRequest_SslBadCertfile) {
FakeLibCurl libcurl("get response", 0);
libcurl.curl_easy_perform_result_ = CURLE_SSL_CACERT_BADFILE;
libcurl.curl_easy_perform_error_message_ =
"error setting certificate verify locations:";
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.end(), kTestContent.begin(), kTestContent.end());
http_request.SetUri("http:
const auto& status = http_request.Send();
EXPECT_EQ(error::FAILED_PRECONDITION, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 77 meaning "
"'Problem with the SSL CA cert (path? access rights?)', error details: "
"error setting certificate verify locations:",
status.message());
EXPECT_EQ(0, http_request.GetResponseCode());
}
TEST(CurlHttpRequestTest, ResponseHeaders) {
FakeLibCurl libcurl(
"get response", 200,
{"Location: abcd", "Content-Type: text", "unparsable header"});
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("abcd", http_request.GetResponseHeader("Location"));
EXPECT_EQ("text", http_request.GetResponseHeader("Content-Type"));
EXPECT_EQ("", http_request.GetResponseHeader("Not-Seen-Header"));
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 0));
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("post body content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithBody_FromFile_NonZeroOffset) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
auto content_filename = io::JoinPath(testing::TmpDir(), "content");
std::ofstream content(content_filename, std::ofstream::binary);
content << "post body content";
content.close();
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
TF_EXPECT_OK(http_request.SetPutFromFile(content_filename, 7));
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("dy content", libcurl.posted_content_);
std::remove(content_filename.c_str());
}
TEST(CurlHttpRequestTest, PutRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPutEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_put_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithBody_FromMemory) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
string content = "post body content";
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(2, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 17", (*libcurl.headers_)[1]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("post body content", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, PostRequest_WithoutBody) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPostEmptyBody();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("", libcurl.custom_request_);
EXPECT_EQ(3, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_EQ("Content-Length: 0", (*libcurl.headers_)[1]);
EXPECT_EQ("Transfer-Encoding: identity", (*libcurl.headers_)[2]);
EXPECT_TRUE(libcurl.is_post_);
EXPECT_EQ("", libcurl.posted_content_);
}
TEST(CurlHttpRequestTest, DeleteRequest) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetDeleteRequest();
TF_EXPECT_OK(http_request.Send());
EXPECT_TRUE(libcurl.is_initialized_);
EXPECT_EQ("http:
EXPECT_EQ("DELETE", libcurl.custom_request_);
EXPECT_EQ(1, libcurl.headers_->size());
EXPECT_EQ("Authorization: Bearer fake-bearer", (*libcurl.headers_)[0]);
EXPECT_FALSE(libcurl.is_post_);
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_NoUri) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
ASSERT_DEATH((void)http_request.Send(), "URI has not been set");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_TwoSends) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
ASSERT_DEATH((void)http_request.Send(), "The request has already been sent");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_ReusingAfterSend) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
ASSERT_DEATH(http_request.SetUri("http:
"The request has already been sent");
}
TEST(CurlHttpRequestTest, WrongSequenceOfCalls_SettingMethodTwice) {
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetDeleteRequest();
ASSERT_DEATH(http_request.SetPostEmptyBody(),
"HTTP method has been already set");
}
TEST(CurlHttpRequestTest, EscapeString) {
FakeLibCurl libcurl("get response", 200);
CurlHttpRequest http_request(&libcurl);
const string test_string = "a/b/c";
EXPECT_EQ("a%2Fb%2Fc", http_request.EscapeString(test_string));
}
TEST(CurlHttpRequestTest, ErrorReturnsNoResponse) {
FakeLibCurl libcurl("get response", 500);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
EXPECT_EQ(error::UNAVAILABLE, http_request.Send().code());
EXPECT_EQ("", string(scratch.begin(), scratch.end()));
}
TEST(CurlHttpRequestTest, ProgressIsOk) {
FakeEnv env;
FakeLibCurl libcurl(
"test", 200,
{
std::make_tuple(100, 0) ,
std::make_tuple(110, 0) ,
std::make_tuple(200, 100)
},
&env);
CurlHttpRequest http_request(&libcurl, &env);
http_request.SetUri("http:
TF_EXPECT_OK(http_request.Send());
}
TEST(CurlHttpRequestTest, ProgressIsStuck) {
FakeEnv env;
FakeLibCurl libcurl(
"test", 200,
{
std::make_tuple(100, 10) ,
std::make_tuple(130, 10) ,
std::make_tuple(170, 10)
},
&env);
CurlHttpRequest http_request(&libcurl, &env);
http_request.SetUri("http:
auto status = http_request.Send();
EXPECT_EQ(error::UNAVAILABLE, status.code());
EXPECT_EQ(
"Error executing an HTTP request: libcurl code 42 meaning 'Operation "
"was aborted by an application callback', error details: (none)",
status.message());
}
class TestStats : public HttpRequest::RequestStats {
public:
~TestStats() override = default;
void RecordRequest(const HttpRequest* request, const string& uri,
HttpRequest::RequestMethod method) override {
has_recorded_request_ = true;
record_request_request_ = request;
record_request_uri_ = uri;
record_request_method_ = method;
}
void RecordResponse(const HttpRequest* request, const string& uri,
HttpRequest::RequestMethod method,
const absl::Status& result) override {
has_recorded_response_ = true;
record_response_request_ = request;
record_response_uri_ = uri;
record_response_method_ = method;
record_response_result_ = result;
}
const HttpRequest* record_request_request_ = nullptr;
string record_request_uri_ = "http:
HttpRequest::RequestMethod record_request_method_ =
HttpRequest::RequestMethod::kGet;
const HttpRequest* record_response_request_ = nullptr;
string record_response_uri_ = "http:
HttpRequest::RequestMethod record_response_method_ =
HttpRequest::RequestMethod::kGet;
absl::Status record_response_result_;
bool has_recorded_request_ = false;
bool has_recorded_response_ = false;
};
class StatsTestFakeLibCurl : public FakeLibCurl {
public:
StatsTestFakeLibCurl(TestStats* stats, const string& response_content,
uint64 response_code)
: FakeLibCurl(response_content, response_code), stats_(stats) {}
CURLcode curl_easy_perform(CURL* curl) override {
CHECK(!performed_request_);
performed_request_ = true;
stats_had_recorded_request_ = stats_->has_recorded_request_;
stats_had_recorded_response_ = stats_->has_recorded_response_;
return FakeLibCurl::curl_easy_perform(curl);
};
TestStats* stats_;
bool performed_request_ = false;
bool stats_had_recorded_request_;
bool stats_had_recorded_response_;
};
TEST(CurlHttpRequestTest, StatsGetSuccessful) {
TestStats stats;
StatsTestFakeLibCurl libcurl(&stats, "get response", 200);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
TF_EXPECT_OK(http_request.Send());
EXPECT_EQ("get response", string(scratch.begin(), scratch.end()));
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
EXPECT_TRUE(libcurl.performed_request_);
EXPECT_TRUE(libcurl.stats_had_recorded_request_);
EXPECT_FALSE(libcurl.stats_had_recorded_response_);
}
TEST(CurlHttpRequestTest, StatsGetNotFound) {
TestStats stats;
StatsTestFakeLibCurl libcurl(&stats, "get other response", 404);
CurlHttpRequest http_request(&libcurl);
std::vector<char> scratch;
scratch.insert(scratch.begin(), kTestContent.begin(), kTestContent.end());
scratch.reserve(100);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetRange(100, 199);
http_request.SetResultBuffer(&scratch);
absl::Status s = http_request.Send();
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kGet, stats.record_response_method_);
EXPECT_TRUE(absl::IsNotFound(stats.record_response_result_));
EXPECT_EQ(s, stats.record_response_result_);
EXPECT_TRUE(libcurl.performed_request_);
EXPECT_TRUE(libcurl.stats_had_recorded_request_);
EXPECT_FALSE(libcurl.stats_had_recorded_response_);
}
TEST(CurlHttpRequestTest, StatsPost) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
string content = "post body content";
http_request.SetUri("http:
http_request.SetPostFromBuffer(content.c_str(), content.size());
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPost, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPost, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
}
TEST(CurlHttpRequestTest, StatsDelete) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.SetDeleteRequest();
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kDelete, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kDelete, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
}
TEST(CurlHttpRequestTest, StatsPut) {
TestStats stats;
FakeLibCurl libcurl("", 200);
CurlHttpRequest http_request(&libcurl);
http_request.SetRequestStats(&stats);
http_request.SetUri("http:
http_request.AddAuthBearerHeader("fake-bearer");
http_request.SetPutEmptyBody();
TF_EXPECT_OK(http_request.Send());
ASSERT_TRUE(stats.has_recorded_request_);
EXPECT_EQ(&http_request, stats.record_request_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPut, stats.record_request_method_);
ASSERT_TRUE(stats.has_recorded_response_);
EXPECT_EQ(&http_request, stats.record_response_request_);
EXPECT_EQ("http:
EXPECT_EQ(HttpRequest::RequestMethod::kPut, stats.record_response_method_);
TF_EXPECT_OK(stats.record_response_result_);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/curl_http_request.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/cloud/curl_http_request_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
00b7dd90-cd98-4777-86c0-fa8ab725430b | cpp | tensorflow/tensorflow | xplane_to_step_events | tensorflow/core/profiler/convert/xplane_to_step_events.cc | tensorflow/core/profiler/convert/xplane_to_step_events_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include <cstdint>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/match.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "xla/tsl/profiler/utils/tpu_xplane_utils.h"
#include "xla/tsl/profiler/utils/xplane_schema.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/steps_db.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/op_metrics_db_utils.h"
#include "tensorflow/core/profiler/utils/trace_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
namespace {
inline bool IsExplicitHostStepMarker(absl::string_view event_name) {
return (absl::StartsWith(event_name, "train") ||
absl::StartsWith(event_name, "test") ||
absl::StartsWith(event_name, "TraceContext")) &&
!absl::StrContains(event_name, "/");
}
inline bool IsRealCpuCompute(absl::string_view event_name) {
bool not_real = absl::StartsWith(event_name, "EagerExecute") ||
absl::StartsWith(event_name, "EagerLocalExecute") ||
absl::StartsWith(event_name, "EagerKernelExecute") ||
absl::StartsWith(event_name, "FunctionRun") ||
IsExplicitHostStepMarker(event_name);
return !not_real;
}
uint64 ParseNumBytesFromMemcpyDetail(absl::string_view memcpy_detail) {
const std::vector<absl::string_view> params =
absl::StrSplit(memcpy_detail, absl::ByAnyChar(":\n"));
for (uint32 ii = 0; ii < params.size(); ii += 2) {
if (params[ii] != "num_bytes") continue;
uint64 value = 0;
if (absl::SimpleAtoi(params[ii + 1], &value)) return value;
break;
}
return 0ULL;
}
EventType ClassifyGpuCompute(absl::string_view event_name,
absl::string_view tensor_shapes) {
if (tensor_shapes.empty()) {
return (absl::StrContains(event_name, "half") ||
absl::StrContains(event_name, "fp16"))
? DEVICE_COMPUTE_16
: DEVICE_COMPUTE_32;
} else {
return (absl::StrContains(tensor_shapes, "half")) ? DEVICE_COMPUTE_16
: DEVICE_COMPUTE_32;
}
}
EventType ClassifyGpuEvent(absl::string_view event_name,
absl::string_view tensor_shapes) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(event_name);
if (tsl::profiler::IsMemcpyHToDOp(tf_op)) {
return HOST_TO_DEVICE;
} else if (tsl::profiler::IsMemcpyDToHOp(tf_op)) {
return DEVICE_TO_HOST;
} else if (tsl::profiler::IsMemcpyDToDOp(tf_op)) {
return DEVICE_TO_DEVICE;
} else if (absl::StartsWithIgnoreCase(event_name, "nccl")) {
return DEVICE_COLLECTIVES;
} else {
return ClassifyGpuCompute(event_name, tensor_shapes);
}
}
EventType ClassifyCpuEvent(absl::string_view event_name, bool has_device,
bool has_correlation_id) {
tsl::profiler::TfOp tf_op = tsl::profiler::ParseTfOpFullname(event_name);
if (tsl::profiler::IsInfeedEnqueueOp(tf_op) ||
tsl::profiler::IsMemcpyHToDOp(tf_op)) {
return HOST_TO_DEVICE;
} else if (tsl::profiler::IsMemcpyHToHOp(tf_op)) {
return HOST_TO_HOST;
} else if (has_device && (has_correlation_id ||
absl::StartsWithIgnoreCase(
event_name, "ExecutorState::Process"))) {
return HOST_PREPARE;
} else if (absl::StartsWithIgnoreCase(event_name, "IteratorGetNext")) {
return HOST_WAIT_INPUT;
} else {
return HOST_COMPUTE;
}
}
}
StepEvents ConvertHostThreadsXLineToStepEvents(
const XLineVisitor& line, const StepEvents* device_step_events) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t correlation_id = -1;
int64_t group_id = -1;
absl::string_view step_name;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kCorrelationId:
correlation_id = stat.IntValue();
break;
case StatType::kGroupId:
group_id = stat.IntValue();
break;
case StatType::kStepName:
step_name = stat.StrOrRefValue();
break;
}
});
if (group_id < 0) return;
bool has_device = (device_step_events != nullptr);
if (has_device && !device_step_events->contains(group_id)) return;
if (IsExplicitHostStepMarker(event.Name())) {
result[group_id].AddMarker(
StepMarker(StepMarkerType::kExplicitHostStepMarker, event.Name(),
event.GetTimespan()));
} else if (!step_name.empty()) {
result[group_id].AddMarker(
StepMarker(StepMarkerType::kImplicitHostStepMarker, event.Name(),
event.GetTimespan()));
} else if (IsRealCpuCompute(event.Name())) {
result[group_id].AddEvent(EventTypeSpan(
ClassifyCpuEvent(event.Name(), has_device, correlation_id >= 0),
event.GetTimespan()));
}
if (!step_name.empty()) {
result[group_id].SetStepName(std::string(step_name));
}
});
return result;
}
StepEvents ConvertHostThreadsXPlaneToStepEvents(
const XPlane& host_trace, const StepEvents* device_step_events) {
StepEvents host_step_events;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&host_trace);
plane.ForEachLine([&](const XLineVisitor& line) {
StepEvents thread_step_events =
ConvertHostThreadsXLineToStepEvents(line, device_step_events);
UnionCombineStepEvents(thread_step_events, &host_step_events);
});
return host_step_events;
}
StepEvents ConvertDeviceStepInfoToStepMarkers(const XLineVisitor& line) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
if (std::optional<XStatVisitor> stat = event.GetStat(StatType::kGroupId)) {
result[stat->IntValue()].AddMarker(
StepMarker(StepMarkerType::kDeviceStepMarker, event.Name(),
event.GetTimespan()));
}
});
return result;
}
StepEvents ConvertDeviceTraceXLineToStepEvents(const uint64 device_id,
const XLineVisitor& line) {
StepEvents result;
line.ForEachEvent([&](const XEventVisitor& event) {
int64_t correlation_id = -1;
int64_t group_id = -1;
absl::string_view tensor_shapes;
absl::string_view memcpy_details;
event.ForEachStat([&](const XStatVisitor& stat) {
if (!stat.Type().has_value()) return;
switch (stat.Type().value()) {
case StatType::kCorrelationId:
correlation_id = stat.IntValue();
break;
case StatType::kGroupId:
group_id = stat.IntValue();
break;
case StatType::kTensorShapes:
tensor_shapes = stat.StrOrRefValue();
break;
case StatType::kMemcpyDetails:
memcpy_details = stat.StrOrRefValue();
break;
}
});
if (correlation_id >= 0 && group_id >= 0) {
EventType event_type = ClassifyGpuEvent(event.Name(), tensor_shapes);
EventTypeSpan event_type_span(event_type, event.GetTimespan());
result[group_id].AddEvent(event_type_span);
switch (event_type) {
case DEVICE_COLLECTIVES: {
AllReduceInfo collective_ops;
collective_ops.set_start_time_ps(event.TimestampPs());
collective_ops.set_end_time_ps(event.EndOffsetPs());
result[group_id].AddCollectiveOpEvent(device_id, collective_ops);
break;
}
case HOST_TO_DEVICE:
case DEVICE_TO_DEVICE:
case DEVICE_TO_HOST: {
uint64 bytes_transferred =
ParseNumBytesFromMemcpyDetail(memcpy_details);
result[group_id].AddDeviceMemoryTransferEvent(
event_type, event.GetTimespan(), bytes_transferred);
break;
}
default:
return;
}
}
});
return result;
}
StepEvents ConvertTpuDeviceTraceXLineToStepEvents(const uint64 device_id,
const XLineVisitor& line) {
StepEvents result;
absl::flat_hash_map<int64_t , XEventsOpMetricsDbBuilder>
op_metrics_builder;
line.ForEachEvent([&](const XEventVisitor& event) {
auto group_id = event.GetStat(StatType::kGroupId);
if (!group_id.has_value()) return;
op_metrics_builder[group_id->IntOrUintValue()].AddOpMetric(event);
});
for (auto& [group_id, builder] : op_metrics_builder) {
result[group_id].SetPerCoreOpMetricsDb(builder.Finalize(), device_id);
}
return result;
}
StepEvents ConvertDeviceTraceXPlaneToStepEvents(const XPlane& device_trace) {
StepEvents device_step_events;
XPlaneVisitor plane = tsl::profiler::CreateTfXPlaneVisitor(&device_trace);
std::optional<int> tpu_core_id = tsl::profiler::GetTensorCoreId(plane.Name());
std::optional<int> sc_core_id = tsl::profiler::GetSparseCoreId(plane.Name());
plane.ForEachLine([&](const XLineVisitor& line) {
int64_t line_id = line.Id();
if (line_id == kThreadIdStepInfo ||
(tpu_core_id.has_value() &&
line.Name() == tsl::profiler::kStepLineName) ||
(sc_core_id.has_value() &&
line.Name() == tsl::profiler::kSparseCoreStepLineName)) {
StepEvents step_marker_events = ConvertDeviceStepInfoToStepMarkers(line);
UnionCombineStepEvents(step_marker_events, &device_step_events);
} else if (IsDerivedThreadId(line_id)) {
return;
} else {
StepEvents stream_step_events;
if (tpu_core_id.has_value()) {
stream_step_events =
ConvertTpuDeviceTraceXLineToStepEvents(*tpu_core_id, line);
IntersectCombineStepEvents(stream_step_events, &device_step_events);
} else if (sc_core_id.has_value()) {
stream_step_events = ConvertTpuDeviceTraceXLineToStepEvents(
kSparseCoreIndexStart + *sc_core_id, line);
IntersectCombineStepEvents(stream_step_events, &device_step_events);
} else {
stream_step_events =
ConvertDeviceTraceXLineToStepEvents(plane.Id(), line);
UnionCombineStepEvents(stream_step_events, &device_step_events);
}
}
});
return device_step_events;
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_step_events.h"
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/event_span.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
TEST(ConvertXPlaneToOpStats, CpuOnlyStepDbTest) {
constexpr int64_t kFirstStepNum = 123;
constexpr int64_t kSecondStepNum = 456;
constexpr int64_t kFirstStepId = 0;
constexpr int64_t kSecondStepId = 1;
constexpr int64_t kFirstCorrelationId = 100;
constexpr int64_t kSecondCorrelationId = 200;
XSpace space;
XPlane* host_plane = GetOrCreateHostXPlane(&space);
XPlaneBuilder host_plane_builder(host_plane);
host_plane_builder.ReserveLines(2);
auto main_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
0, 100, {{StatType::kStepNum, kFirstStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
10, 90,
{{StatType::kStepId, kFirstStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kTraceContext,
300, 100, {{StatType::kStepNum, kSecondStepNum}});
CreateXEvent(&host_plane_builder, &main_thread, HostEventType::kFunctionRun,
310, 90,
{{StatType::kStepId, kSecondStepId},
{StatType::kProducerType, int64_t{1}},
{StatType::kProducerId, kSecondStepId}});
auto tf_executor_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 20, 20,
{{StatType::kStepId, kFirstStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kFirstStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 30, 10,
{{StatType::kCorrelationId, kFirstCorrelationId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread,
HostEventType::kExecutorStateProcess, 320, 20,
{{StatType::kStepId, kSecondStepId},
{StatType::kConsumerType, int64_t{1}},
{StatType::kConsumerId, kSecondStepId}});
CreateXEvent(&host_plane_builder, &tf_executor_thread, "matmul", 330, 10,
{{StatType::kCorrelationId, kSecondCorrelationId}});
XPlane* device_plane = space.add_planes();
XPlaneBuilder device_plane_builder(device_plane);
device_plane_builder.ReserveLines(1);
auto stream = device_plane_builder.GetOrCreateLine(0);
CreateXEvent(&device_plane_builder, &stream, "matmul", 50, 40,
{{StatType::kCorrelationId, kFirstCorrelationId}});
tsl::profiler::GroupTfEvents(&space);
StepEvents device_step_events =
ConvertDeviceTraceXPlaneToStepEvents(*device_plane);
EXPECT_EQ(device_step_events.size(), 1);
EXPECT_EQ(device_step_events[0].Events().size(), 1);
StepEvents host_step_events =
ConvertHostThreadsXPlaneToStepEvents(*host_plane, &device_step_events);
EXPECT_EQ(host_step_events.size(), 1);
EXPECT_EQ(host_step_events[0].Markers().size(), 1);
EXPECT_EQ(host_step_events[0].Events().size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_step_events.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_step_events_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bb58629f-8bc9-481c-a8f4-361cf88721bc | cpp | tensorflow/tensorflow | parallel_task_assignment | third_party/xla/xla/service/cpu/parallel_task_assignment.cc | third_party/xla/xla/service/cpu/parallel_task_assignment_test.cc | #include "xla/service/cpu/parallel_task_assignment.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/service/cpu/shape_partition.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
namespace cpu {
class SimpleCostModel : public ParallelCostModel {
public:
SimpleCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size)
: max_parallelism_(max_parallelism), shape_size_(shape_size) {}
~SimpleCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
const int64_t instruction_cost = shape_size_(instruction->shape());
const int64_t min_cost_per_thread = 256LL << 10;
return std::min(
max_parallelism_,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
};
class DefaultCostModel : public ParallelCostModel {
public:
DefaultCostModel(const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size,
std::unique_ptr<HloCostAnalysis> cost_analysis)
: max_parallelism_(max_parallelism),
shape_size_(shape_size),
cost_analysis_(std::move(cost_analysis)) {}
~DefaultCostModel() override {}
int64_t GetParallelTaskCount(HloInstruction* instruction) override {
int64_t instruction_cost;
int64_t min_cost_per_thread;
int64_t max_parallelism;
const int64_t bytes_accessed =
std::max(int64_t{1}, cost_analysis_->bytes_accessed(*instruction));
const float flops_to_bytes_ratio =
cost_analysis_->flop_count(*instruction) /
static_cast<float>(bytes_accessed);
if (flops_to_bytes_ratio <= 1.0) {
max_parallelism = std::min<int64_t>(
max_parallelism_, std::ceil(std::sqrt(tsl::port::MaxParallelism())));
instruction_cost = bytes_accessed;
min_cost_per_thread = 256LL << 10;
} else {
max_parallelism = max_parallelism_;
instruction_cost =
1 * cost_analysis_->flop_count(*instruction) +
2 * cost_analysis_->transcendental_count(*instruction) +
10 * cost_analysis_->bytes_accessed(*instruction);
min_cost_per_thread = 100000;
}
return std::min(
max_parallelism,
std::max(int64_t{1}, instruction_cost / min_cost_per_thread));
}
private:
const int64_t max_parallelism_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
const std::unique_ptr<HloCostAnalysis> cost_analysis_;
};
ParallelTaskAssignment::ParallelTaskAssignment(
const int64_t max_parallelism,
const HloCostAnalysis::ShapeSizeFunction& shape_size, HloModule* module,
const TargetMachineFeatures* target_machine_features)
: target_machine_features_(*target_machine_features) {
VLOG(1) << "ParallelTaskAssignment max_parallelism: " << max_parallelism;
auto cost_analysis = std::make_unique<HloCostAnalysis>(shape_size);
HloComputation* computation = module->entry_computation();
absl::Status status =
computation->root_instruction()->Accept(cost_analysis.get());
if (status.ok()) {
cost_model_ = std::make_unique<DefaultCostModel>(
max_parallelism, shape_size, std::move(cost_analysis));
} else {
cost_model_ =
std::make_unique<SimpleCostModel>(max_parallelism, shape_size);
}
}
int64_t ParallelTaskAssignment::GetTargetParallelTaskCount(
HloInstruction* instruction) {
auto opcode = instruction->opcode();
if (llvm_ir::MayBeImplementedAsInPlaceDynamicUpdateSlice(instruction) ||
instruction->shape().IsTuple() || opcode == HloOpcode::kRng ||
opcode == HloOpcode::kConstant) {
return 1;
}
if (instruction->IsElementwise() || instruction->IsLoopFusion() ||
opcode == HloOpcode::kBroadcast || opcode == HloOpcode::kConcatenate ||
opcode == HloOpcode::kDynamicSlice ||
opcode == HloOpcode::kDynamicUpdateSlice ||
opcode == HloOpcode::kGather || opcode == HloOpcode::kIota ||
opcode == HloOpcode::kPad || opcode == HloOpcode::kReduce ||
opcode == HloOpcode::kReduceWindow || opcode == HloOpcode::kReshape ||
opcode == HloOpcode::kReverse || opcode == HloOpcode::kSlice ||
opcode == HloOpcode::kTranspose ||
(opcode == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*instruction,
target_machine_features_))) {
return cost_model_->GetParallelTaskCount(instruction);
}
return 1;
}
absl::StatusOr<bool> ParallelTaskAssigner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "ParallelTaskAssigner ENTRY");
XLA_VLOG_LINES(3, module->ToString());
HloToParallelTasks hlo_to_parallel_tasks;
ComputeTargetParallelTasks(module, &hlo_to_parallel_tasks);
bool changed = AssignParallelTasks(module, hlo_to_parallel_tasks);
XLA_VLOG_LINES(2, "ParallelTaskAssigner EXIT");
XLA_VLOG_LINES(3, module->ToString());
return changed;
}
bool ParallelTaskAssigner::AssignParallelTasks(
HloModule* module, const HloToParallelTasks& hlo_to_parallel_tasks) {
return AssignParallelTasksHelper(module, module->entry_computation(),
hlo_to_parallel_tasks);
}
bool ParallelTaskAssigner::AssignParallelTasksHelper(
HloModule* module, HloComputation* computation,
const HloToParallelTasks& hlo_to_parallel_tasks) {
bool changed = false;
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
for (auto* instruction : instructions) {
if (instruction->opcode() == HloOpcode::kWhile) {
changed |= AssignParallelTasksHelper(module, instruction->while_body(),
hlo_to_parallel_tasks);
continue;
} else if (instruction->opcode() == HloOpcode::kCall) {
changed |= AssignParallelTasksHelper(module, instruction->to_apply(),
hlo_to_parallel_tasks);
continue;
}
auto it = hlo_to_parallel_tasks.find(instruction);
if (it == hlo_to_parallel_tasks.end()) {
continue;
}
const int64_t target_parallel_task_count = (*it).second;
auto dim_partition_counts = ShapePartitionAssigner(instruction->shape())
.Run(target_parallel_task_count);
const int64_t total_partition_count =
ShapePartitionAssigner::GetTotalPartitionCount(dim_partition_counts);
if (total_partition_count <= 1) {
continue;
}
auto* call = module->OutlineExpressionFromComputation(
{instruction}, absl::StrCat("parallel_", instruction->name()),
computation);
auto* new_root = call->to_apply()->root_instruction();
BackendConfig backend_config;
absl::c_copy(dim_partition_counts,
tsl::protobuf::RepeatedFieldBackInserter(
backend_config.mutable_outer_dimension_partitions()));
TF_CHECK_OK(new_root->set_backend_config(backend_config));
VLOG(2) << "Assigned parallel task count: " << total_partition_count
<< " to instruction: " << new_root->name()
<< " parent: " << new_root->parent()->name();
changed = true;
}
return changed;
}
void ParallelTaskAssigner::ComputeTargetParallelTasks(
HloModule* module, HloToParallelTasks* hlo_to_parallel_tasks) {
ParallelTaskAssignment parallel_task_assignment(max_parallelism_,
shape_size_function_, module,
&target_machine_features_);
for (auto* computation : module->MakeNonfusionComputations()) {
for (auto* instruction : computation->instructions()) {
const int64_t target_parallel_task_count =
parallel_task_assignment.GetTargetParallelTaskCount(instruction);
if (target_parallel_task_count > 1) {
hlo_to_parallel_tasks->insert(
{instruction, target_parallel_task_count});
}
}
}
}
}
} | #include "xla/service/cpu/parallel_task_assignment.h"
#include <cstdint>
#include <memory>
#include <string>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/cpu_executable.h"
#include "xla/service/cpu/target_machine_features.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class ParallelTaskAssignmentTest : public HloTestBase {
protected:
const int max_parallelism_ = 10;
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features_;
ParallelTaskAssignmentTest()
: HloTestBase(), target_machine_features_([](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
}) {}
absl::StatusOr<bool> RunParallelTaskAssigner(HloModule* module) {
return cpu::ParallelTaskAssigner(max_parallelism_, shape_size_func_,
&target_machine_features_)
.Run(module);
}
const HloCostAnalysis::ShapeSizeFunction shape_size_func_ =
cpu::CpuExecutable::ShapeSizeBytes;
};
TEST_F(ParallelTaskAssignmentTest, ReduceWindowParallelized) {
constexpr char hlo_string[] = R"(
HloModule m
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY e {
p0 = f32[512,256] parameter(0)
p1 = f32[] parameter(1)
ROOT reduce-window = f32[16,256] reduce-window(p0, p1),
window={size=32x1 stride=32x1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_TRUE(changed);
auto* reduce_window = FindInstruction(m.get(), HloOpcode::kReduceWindow);
TF_ASSERT_OK_AND_ASSIGN(auto backend_config,
reduce_window->backend_config<cpu::BackendConfig>());
EXPECT_EQ(backend_config.outer_dimension_partitions_size(), 1);
EXPECT_EQ(backend_config.outer_dimension_partitions(0), 2);
}
TEST_F(ParallelTaskAssignmentTest, DotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_Dot
ENTRY Dot {
dot_lhs = f32[196614,2]{1,0} parameter(0)
dot_rhs = f32[2,1]{1,0} parameter(1)
ROOT dot = f32[196614,1]{1,0} dot(dot_lhs, dot_rhs),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest,
FusedComputationWithDotOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_DotNestedInFusedComp
fused_computation.0 {
parameter.0 = f32[196614,2]{1,0} parameter(0)
parameter.0.1 = f32[2,1]{1,0} parameter(1)
parameter.0.2 = f32[196614,1]{1,0} parameter(2)
dot.0 = f32[196614,1]{1,0} dot(parameter.0, parameter.0.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT add.0 = f32[196614,1]{1,0} add(dot.0, parameter.0.2)
}
ENTRY DotNestedInFusedComp {
parameter = f32[196614,2]{1,0} parameter(0)
parameter.1 = f32[2,1]{1,0} parameter(1)
parameter.2 = f32[196614,1]{1,0} parameter(2)
ROOT fusion = f32[196614,1]{1,0} fusion(parameter, parameter.1,
parameter.2), kind=kOutput, calls=fused_computation.0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, RngOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_rng
ENTRY Rng {
src0 = f32[] parameter(0)
src1 = f32[] parameter(1)
ROOT rng0 = f32[1234567,2]{1,0} rng(f32[] src0, f32[] src1),
distribution=rng_uniform
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InfeedOutfeedOperationNotParallelized) {
const std::string hlo_string = R"(
HloModule TestTaskParallel_infeed_outfeed
ENTRY InfeedOutfeed {
token0 = token[] after-all()
infeed0 = (u32[12345678,2]{1,0}, token[]) infeed(token0)
infeed0.data = u32[12345678,2]{1,0} get-tuple-element((u32[12345678,2]{1,0}, token[]) infeed0), index=0
ROOT outfeed0 = token[] outfeed(infeed0.data, token0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, InPlaceDynamicUpdateSliceNotParallelized) {
const std::string hlo_string = R"(
HloModule test
body {
zero = s32[] constant(0)
one = s32[] constant(1)
ten = s32[] constant(10)
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
i = s32[] get-tuple-element(loop_carry), index=0
i_plus_ten = s32[] add(i, ten)
update = u32[1,100] get-tuple-element(loop_carry), index=1
data = u32[10000,100] get-tuple-element(loop_carry), index=2
new_data = u32[10000,100] dynamic-update-slice(data, update, i_plus_ten, zero)
new_i = s32[] add(i, one)
ROOT tuple = (s32[], u32[1,100], u32[10000,100]) tuple(new_i, update, new_data)
}
cond {
loop_carry = (s32[], u32[1,100], u32[10000,100]) parameter(0)
two = s32[] constant(2)
i = s32[] get-tuple-element(loop_carry), index=0
ROOT less-than = pred[] compare(i, two), direction=LT
}
ENTRY test {
zero = s32[] constant(0)
initial_i = s32[] parameter(0)
update = u32[1,100] parameter(1)
data = u32[10000,100] parameter(2)
tuple = (s32[], u32[1,100], u32[10000,100]) tuple(initial_i, update, data)
ROOT while = (s32[], u32[1,100], u32[10000,100]) while(tuple), condition=cond, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, AllReduceNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_allreduce
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY CRS {
input = f32[1234567] parameter(0)
ROOT crs = f32[1234567] all-reduce(input), replica_groups={}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
TEST_F(ParallelTaskAssignmentTest, ConstantNotParallelized) {
constexpr char hlo_string[] = R"(
HloModule TestTaskParallel_constant
ENTRY const {
ROOT constant = f32[1234567] constant({...})
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> m,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(bool changed, RunParallelTaskAssigner(m.get()));
EXPECT_FALSE(changed);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/parallel_task_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/parallel_task_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241962ce-9e32-45be-80a8-91426c83ee4c | cpp | tensorflow/tensorflow | device_id_manager | third_party/xla/xla/tsl/framework/device_id_manager.cc | tensorflow/core/common_runtime/device/device_id_manager_test.cc | #include "xla/tsl/framework/device_id_manager.h"
#include <unordered_map>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/tsl/framework/device_id.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tsl {
namespace {
class TfToPlatformDeviceIdMap {
public:
static TfToPlatformDeviceIdMap* singleton() {
static auto* id_map = new TfToPlatformDeviceIdMap;
return id_map;
}
absl::Status Insert(const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId platform_device_id)
TF_LOCKS_EXCLUDED(mu_) {
std::pair<IdMapType::iterator, bool> result;
{
mutex_lock lock(mu_);
TypeIdMapType::iterator device_id_map_iter =
id_map_.insert({type.type_string(), IdMapType()}).first;
result = device_id_map_iter->second.insert(
{tf_device_id.value(), platform_device_id.value()});
}
if (!result.second && platform_device_id.value() != result.first->second) {
return errors::AlreadyExists(
"TensorFlow device (", type, ":", tf_device_id.value(),
") is being mapped to multiple devices (", platform_device_id.value(),
" now, and ", result.first->second,
" previously), which is not supported. "
"This may be the result of providing different ",
type, " configurations (ConfigProto.gpu_options, for example ",
"different visible_device_list) when creating multiple Sessions in ",
"the same process. This is not currently supported, see ",
"https:
}
return absl::OkStatus();
}
bool Find(const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId* platform_device_id) const TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
auto type_id_map_iter = id_map_.find(type.type_string());
if (type_id_map_iter == id_map_.end()) return false;
auto id_map_iter = type_id_map_iter->second.find(tf_device_id.value());
if (id_map_iter == type_id_map_iter->second.end()) return false;
*platform_device_id = id_map_iter->second;
return true;
}
absl::StatusOr<std::vector<TfDeviceId>> GetTfDevicesOnPlatform(
const DeviceType& type, PlatformDeviceId platform_device_id) const
TF_LOCKS_EXCLUDED(mu_) {
tf_shared_lock lock(mu_);
auto type_id_map_iter = id_map_.find(type.type_string());
if (type_id_map_iter == id_map_.end()) {
return absl::NotFoundError(
absl::StrCat("TensorFlow device type: ", type.type_string(),
" was not registered"));
}
std::vector<TfDeviceId> tf_device_ids;
for (const auto& [tf_device, platform_device] : type_id_map_iter->second) {
if (platform_device == platform_device_id.value()) {
tf_device_ids.push_back(TfDeviceId(tf_device));
}
}
return tf_device_ids;
}
private:
TfToPlatformDeviceIdMap() = default;
void TestOnlyReset() TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
id_map_.clear();
}
using IdMapType = std::unordered_map<int32, int32>;
using TypeIdMapType = std::unordered_map<std::string, IdMapType>;
mutable mutex mu_;
TypeIdMapType id_map_ TF_GUARDED_BY(mu_);
friend class ::tsl::DeviceIdManager;
TfToPlatformDeviceIdMap(const TfToPlatformDeviceIdMap&) = delete;
void operator=(const TfToPlatformDeviceIdMap&) = delete;
};
}
absl::Status DeviceIdManager::InsertTfPlatformDeviceIdPair(
const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId platform_device_id) {
return TfToPlatformDeviceIdMap::singleton()->Insert(type, tf_device_id,
platform_device_id);
}
absl::Status DeviceIdManager::TfToPlatformDeviceId(
const DeviceType& type, TfDeviceId tf_device_id,
PlatformDeviceId* platform_device_id) {
if (TfToPlatformDeviceIdMap::singleton()->Find(type, tf_device_id,
platform_device_id)) {
return absl::OkStatus();
}
return errors::NotFound("TensorFlow device ", type, ":", tf_device_id.value(),
" was not registered");
}
absl::StatusOr<std::vector<TfDeviceId>> DeviceIdManager::GetTfDevicesOnPlatform(
const DeviceType& type, PlatformDeviceId platform_device_id) {
return TfToPlatformDeviceIdMap::singleton()->GetTfDevicesOnPlatform(
type, platform_device_id);
}
void DeviceIdManager::TestOnlyReset() {
TfToPlatformDeviceIdMap::singleton()->TestOnlyReset();
}
} | #include "tensorflow/core/common_runtime/device/device_id_manager.h"
#include <vector>
#include <gmock/gmock.h>
#include "tensorflow/core/common_runtime/device/device_id.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
PlatformDeviceId TfToPlatformDeviceId(const DeviceType& type, TfDeviceId tf) {
PlatformDeviceId platform_device_id;
TF_CHECK_OK(
DeviceIdManager::TfToPlatformDeviceId(type, tf, &platform_device_id));
return platform_device_id;
}
TEST(DeviceIdManagerTest, Basics) {
DeviceType device_type("GPU");
TfDeviceId key_0(0);
PlatformDeviceId value_0(0);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_0,
value_0));
EXPECT_EQ(value_0, TfToPlatformDeviceId(device_type, key_0));
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_0,
value_0));
EXPECT_EQ(value_0, TfToPlatformDeviceId(device_type, key_0));
TfDeviceId key_1(3);
PlatformDeviceId value_1(2);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_1,
value_1));
EXPECT_EQ(value_1, TfToPlatformDeviceId(device_type, key_1));
TfDeviceId key_2(10);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_2,
value_1));
EXPECT_EQ(value_1, TfToPlatformDeviceId(device_type, key_2));
ASSERT_FALSE(
DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type, key_2, value_0)
.ok());
ASSERT_FALSE(DeviceIdManager::TfToPlatformDeviceId(device_type,
TfDeviceId(100), &value_0)
.ok());
}
TEST(DeviceIdManagerTest, TwoDevices) {
DeviceType device_type0("GPU");
TfDeviceId key_0(0);
PlatformDeviceId value_0(0);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type0,
key_0, value_0));
DeviceType device_type1("XPU");
TfDeviceId key_1(2);
PlatformDeviceId value_1(3);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(device_type1,
key_1, value_1));
EXPECT_EQ(value_0, TfToPlatformDeviceId(device_type0, key_0));
EXPECT_EQ(value_1, TfToPlatformDeviceId(device_type1, key_1));
ASSERT_FALSE(
DeviceIdManager::TfToPlatformDeviceId(device_type0, key_1, &value_0)
.ok());
ASSERT_FALSE(
DeviceIdManager::TfToPlatformDeviceId(device_type1, key_0, &value_1)
.ok());
ASSERT_FALSE(
DeviceIdManager::TfToPlatformDeviceId("FOO", key_0, &value_0).ok());
}
TEST(DeviceIdManagerTest, GetTfDevicesOnSamePlatform) {
DeviceType device_gpu("GPU");
TfDeviceId tf_device_0(0);
PlatformDeviceId platform_0(0);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
device_gpu, tf_device_0, platform_0));
TfDeviceId tf_device_1(1);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
device_gpu, tf_device_1, platform_0));
DeviceType device_xpu("XPU");
TfDeviceId tf_device_2(2);
PlatformDeviceId platform_1(3);
TF_ASSERT_OK(DeviceIdManager::InsertTfPlatformDeviceIdPair(
device_xpu, tf_device_2, platform_1));
TF_ASSERT_OK_AND_ASSIGN(
std::vector<TfDeviceId> tf_device_ids_gpu,
DeviceIdManager::GetTfDevicesOnPlatform(device_gpu, platform_0));
EXPECT_THAT(tf_device_ids_gpu,
UnorderedElementsAre(tf_device_0, tf_device_1));
TF_ASSERT_OK_AND_ASSIGN(
tf_device_ids_gpu,
DeviceIdManager::GetTfDevicesOnPlatform(device_gpu, platform_1));
EXPECT_THAT(tf_device_ids_gpu, IsEmpty());
TF_ASSERT_OK_AND_ASSIGN(
std::vector<TfDeviceId> tf_device_ids_xpu,
DeviceIdManager::GetTfDevicesOnPlatform(device_xpu, platform_1));
EXPECT_THAT(tf_device_ids_xpu, UnorderedElementsAre(tf_device_2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/device_id_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/device/device_id_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8065cdc5-9bf1-4411-a088-8e357458b8c8 | cpp | tensorflow/tensorflow | custom_call_program_serdes | third_party/xla/xla/python/ifrt/custom_call_program_serdes.cc | third_party/xla/xla/python/ifrt/custom_call_program_serdes_test.cc | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ExtensibleRTTI.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/array_spec.pb.h"
#include "xla/python/ifrt/custom_call_program.h"
#include "xla/python/ifrt/custom_call_program.pb.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/sharding.pb.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace ifrt {
namespace {
class CustomCallProgramSerDes
: public llvm::RTTIExtends<CustomCallProgramSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::CustomCallProgram";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
const CustomCallProgram& program =
llvm::cast<CustomCallProgram>(serializable);
CustomCallProgramProto proto;
proto.set_type(program.type);
proto.set_name(program.name);
absl::CopyCordToString(program.serialized_program_text,
proto.mutable_serialized_program_text());
*proto.mutable_devices() = program.devices->ToProto();
for (const ArraySpec& spec : program.input_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_input_specs(), spec.ToProto());
}
for (const ArraySpec& spec : program.output_specs) {
TF_ASSIGN_OR_RETURN(*proto.add_output_specs(), spec.ToProto());
}
return proto.SerializeAsString();
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
const auto* deserialize_program_options =
llvm::cast<DeserializeProgramOptions>(options.get());
CustomCallProgramProto proto;
if (!proto.ParseFromString(serialized)) {
return absl::InvalidArgumentError(
"Failed to parse serialized CustomCallProgramProto");
}
TF_ASSIGN_OR_RETURN(
tsl::RCReference<DeviceList> devices,
DeviceList::FromProto(deserialize_program_options->lookup_device,
proto.devices()));
std::vector<ArraySpec> input_specs;
input_specs.reserve(proto.input_specs_size());
for (const ArraySpecProto& spec_proto : proto.input_specs()) {
TF_ASSIGN_OR_RETURN(
ArraySpec spec,
ArraySpec::FromProto(deserialize_program_options->lookup_device,
spec_proto));
input_specs.push_back(std::move(spec));
}
std::vector<ArraySpec> output_specs;
output_specs.reserve(proto.output_specs_size());
for (const ArraySpecProto& spec_proto : proto.output_specs()) {
TF_ASSIGN_OR_RETURN(
ArraySpec spec,
ArraySpec::FromProto(deserialize_program_options->lookup_device,
spec_proto));
output_specs.push_back(std::move(spec));
}
return std::make_unique<CustomCallProgram>(
proto.type(), proto.name(),
absl::Cord(std::move(*proto.mutable_serialized_program_text())),
std::move(devices),
std::move(input_specs),
std::move(output_specs));
}
static char ID;
};
class CustomCallCompileOptionsSerDes
: public llvm::RTTIExtends<CustomCallCompileOptionsSerDes, SerDes> {
public:
absl::string_view type_name() const override {
return "xla::ifrt::CustomCallCompileOptions";
}
absl::StatusOr<std::string> Serialize(Serializable& serializable) override {
return "";
}
absl::StatusOr<std::unique_ptr<Serializable>> Deserialize(
const std::string& serialized,
std::unique_ptr<DeserializeOptions> options) override {
if (!serialized.empty()) {
return absl::InvalidArgumentError(
"Invalid serialized CustomCallCompileOptions; a serialized "
"CustomCallCompileOptions is expected to be an empty string");
}
return std::make_unique<CustomCallCompileOptions>();
}
static char ID;
};
[[maybe_unused]] char CustomCallProgramSerDes::ID = 0;
[[maybe_unused]] char CustomCallCompileOptionsSerDes::ID = 0;
bool register_custom_call_program_serdes = ([]{
RegisterSerDes<CustomCallProgram>(
std::make_unique<CustomCallProgramSerDes>());
}(), true);
bool register_custom_call_compile_options_serdes = ([]{
RegisterSerDes<CustomCallCompileOptions>(
std::make_unique<CustomCallCompileOptionsSerDes>());
}(), true);
}
}
} | #include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/bind_front.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "llvm/Support/Casting.h"
#include "xla/python/ifrt/array_spec.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/custom_call_program.h"
#include "xla/python/ifrt/device_list.h"
#include "xla/python/ifrt/device_test_util.h"
#include "xla/python/ifrt/dtype.h"
#include "xla/python/ifrt/memory.h"
#include "xla/python/ifrt/program_serdes.h"
#include "xla/python/ifrt/serdes.h"
#include "xla/python/ifrt/shape.h"
#include "xla/python/ifrt/sharding.h"
#include "xla/tsl/concurrency/ref_count.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
using ::testing::MatchesRegex;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class CustomCallProgramSerDesTest : public test_util::DeviceTest {};
TEST_P(CustomCallProgramSerDesTest, RoundTrip) {
Shape shape0({10, 20});
Shape shard_shape0({5, 20});
tsl::RCReference<DeviceList> devices = GetDevices({0, 1});
std::shared_ptr<const Sharding> sharding0 =
ConcreteEvenSharding::Create(devices, MemoryKind(),
shape0,
shard_shape0);
Shape shape1({});
Shape shard_shape1({});
std::shared_ptr<const Sharding> sharding1 =
ConcreteEvenSharding::Create(devices, MemoryKind(),
shape1,
shard_shape1);
CustomCallProgram orig(
"test type",
"test name",
absl::Cord("test\0program\0text\0"),
std::move(devices),
{
ArraySpec{DType(DType::kF32), shape0,
sharding0},
},
{
ArraySpec{DType(DType::kF32), shape1,
sharding1},
});
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<CustomCallProgram> deserialized_program,
Deserialize<CustomCallProgram>(
serialized, std::make_unique<DeserializeProgramOptions>(
absl::bind_front(&Client::LookupDevice, client()))));
EXPECT_EQ(deserialized_program->type, "test type");
EXPECT_EQ(deserialized_program->name, "test name");
EXPECT_EQ(deserialized_program->serialized_program_text,
absl::Cord("test\0program\0text\0").Flatten());
EXPECT_EQ(*deserialized_program->devices, *orig.devices);
ASSERT_THAT(deserialized_program->input_specs, SizeIs(1));
EXPECT_EQ(deserialized_program->input_specs.front().dtype,
DType(DType::kF32));
EXPECT_EQ(deserialized_program->input_specs.front().shape, shape0);
const auto* deserialized_sharding0 = llvm::dyn_cast<ConcreteEvenSharding>(
deserialized_program->input_specs.front().sharding.get());
ASSERT_NE(deserialized_sharding0, nullptr);
EXPECT_EQ(*deserialized_sharding0->devices(), *sharding0->devices());
EXPECT_EQ(deserialized_sharding0->shape(), shape0);
EXPECT_EQ(deserialized_sharding0->shard_shape(), shard_shape0);
ASSERT_THAT(deserialized_program->output_specs, SizeIs(1));
EXPECT_EQ(deserialized_program->output_specs.front().dtype,
DType(DType::kF32));
EXPECT_EQ(deserialized_program->output_specs.front().shape, shape1);
const auto* deserialized_sharding1 = llvm::dyn_cast<ConcreteEvenSharding>(
deserialized_program->output_specs.front().sharding.get());
ASSERT_NE(deserialized_sharding1, nullptr);
EXPECT_EQ(*deserialized_sharding1->devices(), *sharding1->devices());
EXPECT_EQ(deserialized_sharding1->shape(), shape1);
EXPECT_EQ(deserialized_sharding1->shard_shape(), shard_shape1);
}
INSTANTIATE_TEST_SUITE_P(NumDevices, CustomCallProgramSerDesTest,
testing::Values(test_util::DeviceTestParam{
2,
2}));
TEST(CustomCallCompileOptionsSerDesTest, RoundTrip) {
CustomCallCompileOptions orig;
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
TF_EXPECT_OK(
Deserialize<CustomCallCompileOptions>(serialized, nullptr)
.status());
}
TEST(CustomCallCompileOptionsSerDesTest, InvalidSerialized) {
CustomCallCompileOptions orig;
TF_ASSERT_OK_AND_ASSIGN(Serialized serialized, Serialize(orig));
serialized.set_data("abc");
EXPECT_THAT(
Deserialize<CustomCallCompileOptions>(serialized, nullptr),
StatusIs(absl::StatusCode::kInvalidArgument,
MatchesRegex("Invalid serialized CustomCallCompileOptions.*")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/custom_call_program_serdes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/custom_call_program_serdes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66841324-c4b6-443c-b0ba-e0af50224d8b | cpp | tensorflow/tensorflow | time_util | third_party/xla/third_party/tsl/tsl/platform/cloud/time_util.cc | third_party/xla/third_party/tsl/tsl/platform/cloud/time_util_test.cc | #include "tsl/platform/cloud/time_util.h"
#include <time.h>
#include <cmath>
#include <cstdio>
#include <ctime>
#ifdef _WIN32
#define timegm _mkgmtime
#endif
#include "tsl/platform/errors.h"
namespace tsl {
namespace {
constexpr int64_t kNanosecondsPerSecond = 1000 * 1000 * 1000;
}
absl::Status ParseRfc3339Time(const string& time, int64_t* mtime_nsec) {
tm parsed{0};
float seconds;
if (sscanf(time.c_str(), "%4d-%2d-%2dT%2d:%2d:%fZ", &(parsed.tm_year),
&(parsed.tm_mon), &(parsed.tm_mday), &(parsed.tm_hour),
&(parsed.tm_min), &seconds) != 6) {
return errors::Internal(
strings::StrCat("Unrecognized RFC 3339 time format: ", time));
}
const int int_seconds = std::floor(seconds);
parsed.tm_year -= 1900;
parsed.tm_mon -= 1;
parsed.tm_sec = int_seconds;
*mtime_nsec = timegm(&parsed) * kNanosecondsPerSecond +
static_cast<int64_t>(std::floor((seconds - int_seconds) *
kNanosecondsPerSecond));
return absl::OkStatus();
}
} | #include "tsl/platform/cloud/time_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace tsl {
TEST(TimeUtil, ParseRfc3339Time) {
int64_t mtime_nsec;
TF_EXPECT_OK(ParseRfc3339Time("2016-04-29T23:15:24.896Z", &mtime_nsec));
EXPECT_NEAR(1461971724896, mtime_nsec / 1000 / 1000, 1);
}
TEST(TimeUtil, ParseRfc3339Time_ParseError) {
int64_t mtime_nsec;
EXPECT_EQ("Unrecognized RFC 3339 time format: 2016-04-29",
ParseRfc3339Time("2016-04-29", &mtime_nsec).message());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/time_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/cloud/time_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1838a903-3723-4d83-9f01-335ffe87693a | cpp | tensorflow/tensorflow | hlo_module_config | third_party/xla/xla/service/hlo_module_config.cc | third_party/xla/xla/service/hlo_module_config_test.cc | #include "xla/service/hlo_module_config.h"
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape_layout.h"
#include "xla/xla.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrAppend;
HloModuleConfig::HloModuleConfig(const ProgramShape& program_shape,
bool ignore_layouts)
: entry_computation_layout_(
ComputationLayout(program_shape, ignore_layouts)) {}
HloModuleConfig::HloModuleConfig(ComputationLayout entry_computation_layout)
: entry_computation_layout_(std::move(entry_computation_layout)) {}
void HloModuleConfig::SetDefaultComputationLayout(
const ProgramShape& program_shape) {
entry_computation_layout_ = ComputationLayout(program_shape);
}
void HloModuleConfig::SetComputationLayoutIfExists(
const ProgramShape& program_shape) {
entry_computation_layout_ = ComputationLayout(program_shape,
false);
}
std::string HloModuleConfig::compilation_cache_key() const {
std::string key = absl::StrCat("profiling=", hlo_profiling_enabled());
StrAppend(&key, "::(");
std::vector<std::string> params;
if (entry_computation_layout_.has_value()) {
for (const ShapeLayout& param_layout :
entry_computation_layout_->parameter_layouts()) {
params.push_back(param_layout.shape().DebugString());
}
StrAppend(&key, absl::StrJoin(params, ", "), ") => ",
entry_computation_layout_->result_shape().SerializeAsString());
}
if (seed() != 0) {
static std::atomic<int> counter{0};
StrAppend(&key, "forcing recompile ", counter++);
}
if (replica_count() != 1) {
StrAppend(&key, "::replica_count=", replica_count());
}
StrAppend(&key, debug_options_.DebugString());
if (intra_op_parallelism_threads() > 0) {
StrAppend(&key, "::intra_op_parallelism_threads=",
intra_op_parallelism_threads());
}
if (!device_type().empty()) {
StrAppend(&key, device_type());
}
StrAppend(&key, "::alias_passthrough_params=", alias_passthrough_params_);
StrAppend(&key, "::allow_spmd_sharding_propagation_to_parameters={",
absl::StrJoin(allow_spmd_sharding_propagation_to_parameters_, ","),
"}");
StrAppend(&key, "::allow_spmd_sharding_propagation_to_output={",
absl::StrJoin(allow_spmd_sharding_propagation_to_output_, ","),
"}");
if (!fdo_profile().empty()) {
StrAppend(&key, "::fdo_profile=", absl::BytesToHexString(fdo_profile()));
}
if (device_memory_size() != 0) {
StrAppend(&key, "::device_memory_size=", device_memory_size());
}
StrAppend(&key, "::use_shardy_partitioner=", use_shardy_partitioner());
return key;
}
void HloModuleConfig::AssignProtoShardableValueUpdatePairs(
tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>*
proto_update_pairs,
const std::vector<HloModuleConfig::ShardableValueUpdatePair>&
update_pairs) {
using ProtoShard = std::decay_t<decltype(proto_update_pairs->at(0))>;
proto_update_pairs->Reserve(update_pairs.size());
for (const auto& pair : update_pairs) {
ProtoShard shard;
shard.set_input_parameter_number(pair.input_parameter_number);
for (int64_t val : pair.parameter_shape_index) {
shard.add_parameter_shape_index(val);
}
for (int64_t val : pair.output_shape_index) {
shard.add_output_shape_index(val);
}
proto_update_pairs->Add(std::move(shard));
}
}
static HloModuleConfigProto::BoolList BoolVectorToProto(
const std::vector<bool>& vals) {
HloModuleConfigProto::BoolList list;
for (int i = 0; i < vals.size(); ++i) {
list.add_vals(vals[i]);
}
return list;
}
static void AssignProtoFusionConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<bool>>& fusion_config) {
auto* proto_config = proto.mutable_fusion_config();
proto_config->Reserve(fusion_config.size());
for (const auto& vals : fusion_config) {
proto_config->Add(BoolVectorToProto(vals));
}
}
static void AssignProtoDotConfig(
HloModuleConfigProto& proto,
const absl::flat_hash_map<std::string, std::vector<int64_t>>& dot_config) {
std::map<std::string, std::vector<int64_t>> sorted_dot_config;
sorted_dot_config.insert(dot_config.begin(), dot_config.end());
for (const auto& [key, list_vector] : sorted_dot_config) {
HloModuleConfigProto::Int64List list;
for (int64_t val : list_vector) {
list.add_vals(val);
}
proto.mutable_dot_config()->try_emplace(key, std::move(list));
}
}
static void AssignProtoLayoutConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<std::vector<int64_t>>>& layout_config) {
auto* proto_layout_config = proto.mutable_layout_config();
proto_layout_config->Reserve(layout_config.size());
for (const auto& config_row : layout_config) {
HloModuleConfigProto::Int64ListList proto_list_list;
proto_list_list.mutable_lists()->Reserve(config_row.size());
for (const auto& cell : config_row) {
HloModuleConfigProto::Int64List list;
for (int64_t val : cell) {
list.add_vals(val);
}
*proto_list_list.add_lists() = std::move(list);
}
proto_layout_config->Add(std::move(proto_list_list));
}
}
static void AssignProtoPhaseOrderingConfig(
HloModuleConfigProto& proto,
const std::vector<std::vector<bool>>& phase_config) {
auto* proto_config = proto.mutable_phase_ordering_config();
proto_config->Reserve(phase_config.size());
for (const auto& vals : phase_config) {
proto_config->Add(BoolVectorToProto(vals));
}
}
void HloModuleConfig::AssignStructShardableValueUpdatePairs(
HloModuleConfig& config,
const tsl::protobuf::RepeatedPtrField<ShardableValueUpdatePairProto>&
pairs) {
std::vector<HloModuleConfig::ShardableValueUpdatePair> cfg_pairs;
cfg_pairs.reserve(pairs.size());
for (const auto& proto_pair : pairs) {
HloModuleConfig::ShardableValueUpdatePair pair;
pair.input_parameter_number = proto_pair.input_parameter_number();
const auto param_idx = proto_pair.parameter_shape_index();
pair.parameter_shape_index.assign(param_idx.begin(), param_idx.end());
const auto output_idx = proto_pair.output_shape_index();
pair.output_shape_index.assign(output_idx.begin(), output_idx.end());
cfg_pairs.push_back(pair);
}
config.set_shardable_value_update_pairs(std::move(cfg_pairs));
}
static void AssignStructFusionConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<bool>> module_config;
auto& proto_config = proto.fusion_config();
module_config.reserve(proto_config.size());
for (auto& list : proto_config) {
std::vector<bool> temp;
for (bool val : list.vals()) {
temp.push_back(val);
}
module_config.push_back(std::move(temp));
}
*config.mutable_fusion_config() = std::move(module_config);
}
static void AssignStructDotConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
auto& proto_config = proto.dot_config();
for (auto& [key, int_list] : proto_config) {
std::vector<int64_t> value{int_list.vals().begin(), int_list.vals().end()};
config.mutable_dot_config()->insert(std::pair{key, value});
}
}
static void AssignStructLayoutConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<std::vector<int64_t>>> module_config;
auto proto_config = proto.layout_config();
module_config.reserve(proto_config.size());
for (const auto& proto_row_wrapper : proto_config) {
const auto& proto_row = proto_row_wrapper.lists();
std::vector<std::vector<int64_t>> module_row;
module_row.reserve(proto_row.size());
for (const auto& proto_cell : proto_row) {
const auto& cell = proto_cell.vals();
module_row.push_back(std::vector<int64_t>(cell.begin(), cell.end()));
}
module_config.push_back(std::move(module_row));
}
*config.mutable_layout_config() = std::move(module_config);
}
static void AssignStructPhaseOrderingConfig(HloModuleConfig& config,
const HloModuleConfigProto& proto) {
std::vector<std::vector<bool>> module_config;
auto& proto_config = proto.phase_ordering_config();
module_config.reserve(proto_config.size());
for (auto& list : proto_config) {
std::vector<bool> temp;
for (bool val : list.vals()) {
temp.push_back(val);
}
module_config.push_back(std::move(temp));
}
*config.mutable_phase_ordering_config() = std::move(module_config);
}
HloModuleConfigProto HloModuleConfig::ToProto() const {
HloModuleConfigProto proto;
if (has_entry_computation_layout()) {
*proto.mutable_entry_computation_layout() =
entry_computation_layout().ComputeProgramShape().ToProto();
}
proto.set_seed(seed_);
proto.set_launch_id(launch_id_);
proto.set_replica_count(replica_count_);
proto.set_num_partitions(num_partitions_);
for (bool requirement : param_requires_broadcast_via_collectives_) {
proto.add_param_requires_broadcast_via_collectives(requirement);
}
proto.set_use_spmd_partitioning(use_spmd_partitioning_);
proto.set_use_auto_spmd_partitioning(use_auto_spmd_partitioning_);
for (int64_t partitioning_shape : auto_spmd_partitioning_mesh_shape_) {
proto.add_auto_spmd_partitioning_mesh_shape(partitioning_shape);
}
for (int64_t partitioning_id : auto_spmd_partitioning_mesh_ids_) {
proto.add_auto_spmd_partitioning_mesh_ids(partitioning_id);
}
proto.set_deduplicate_hlo(deduplicate_hlo_);
proto.set_intra_op_parallelism_threads(intra_op_parallelism_threads_);
proto.set_device_type(device_type_);
*proto.mutable_debug_options() = debug_options_;
if (has_static_device_assignment()) {
auto proto_assignment = proto.mutable_static_device_assignment();
static_device_assignment_->Serialize(proto_assignment);
}
AssignProtoShardableValueUpdatePairs(
proto.mutable_shardable_value_update_pairs(),
shardable_value_update_pairs_);
proto.set_alias_passthrough_params(alias_passthrough_params_);
proto.set_content_aware_computation_sorting(
content_aware_computation_sorting_);
proto.set_fusion_config_collection(
static_cast<HloModuleConfigProto::FusionConfigCollection>(
fusion_config_collection_));
AssignProtoFusionConfig(proto, fusion_config_);
AssignProtoDotConfig(proto, dot_config_);
AssignProtoLayoutConfig(proto, layout_config_);
for (uint64_t cfg : memory_space_assignment_config_) {
proto.add_memory_space_assignment_config(cfg);
}
AssignProtoPhaseOrderingConfig(proto, phase_ordering_config_);
proto.set_phase_index(phase_index_);
for (bool value : allow_spmd_sharding_propagation_to_parameters_) {
proto.add_allow_spmd_sharding_propagation_to_parameters(value);
}
for (bool value : allow_spmd_sharding_propagation_to_output_) {
proto.add_allow_spmd_sharding_propagation_to_output(value);
}
auto proto_analysis_map = proto.mutable_analysis_allowance_map();
for (const auto& [key, value] : analysis_allowance_map_) {
proto_analysis_map->insert({std::string(key), value});
}
proto.set_matrix_unit_operand_precision(matrix_unit_operand_precision_);
proto.set_allow_separate_sharding_programs(allow_separate_sharding_programs_);
proto.set_fdo_profile(fdo_profile_);
proto.set_device_memory_size(device_memory_size_);
proto.set_use_shardy_partitioner(use_shardy_partitioner_);
return proto;
}
absl::StatusOr<std::unique_ptr<HloModuleConfig>>
HloModuleConfig::CreateFromProto(const HloModuleConfigProto& proto) {
auto config = std::make_unique<HloModuleConfig>();
if (proto.has_entry_computation_layout()) {
auto comp_layout = ProgramShape{proto.entry_computation_layout()};
config->SetComputationLayoutIfExists(comp_layout);
} else {
config->clear_entry_computation_layout();
}
config->seed_ = proto.seed();
config->launch_id_ = proto.launch_id();
config->replica_count_ = proto.replica_count();
config->num_partitions_ = proto.num_partitions();
config->param_requires_broadcast_via_collectives_.assign(
proto.param_requires_broadcast_via_collectives().begin(),
proto.param_requires_broadcast_via_collectives().end());
config->use_spmd_partitioning_ = proto.use_spmd_partitioning();
config->use_auto_spmd_partitioning_ = proto.use_auto_spmd_partitioning();
config->auto_spmd_partitioning_mesh_shape_.assign(
proto.auto_spmd_partitioning_mesh_shape().begin(),
proto.auto_spmd_partitioning_mesh_shape().end());
config->auto_spmd_partitioning_mesh_ids_.assign(
proto.auto_spmd_partitioning_mesh_ids().begin(),
proto.auto_spmd_partitioning_mesh_ids().end());
config->deduplicate_hlo_ = proto.deduplicate_hlo();
config->intra_op_parallelism_threads_ = proto.intra_op_parallelism_threads();
config->device_type_ = proto.device_type();
if (proto.has_debug_options()) {
config->debug_options_ = proto.debug_options();
}
if (proto.has_static_device_assignment()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<DeviceAssignment> device_assignment,
DeviceAssignment::Deserialize(proto.static_device_assignment()));
config->static_device_assignment_ = std::move(*device_assignment);
}
AssignStructShardableValueUpdatePairs(*config,
proto.shardable_value_update_pairs());
config->alias_passthrough_params_ = proto.alias_passthrough_params();
config->content_aware_computation_sorting_ =
proto.content_aware_computation_sorting();
config->fusion_config_collection_ =
static_cast<FusionConfigCollection>(proto.fusion_config_collection());
AssignStructFusionConfig(*config, proto);
AssignStructDotConfig(*config, proto);
AssignStructLayoutConfig(*config, proto);
config->memory_space_assignment_config_.assign(
proto.memory_space_assignment_config().begin(),
proto.memory_space_assignment_config().end());
AssignStructPhaseOrderingConfig(*config, proto);
config->phase_index_ = proto.phase_index();
config->allow_spmd_sharding_propagation_to_parameters_.assign(
proto.allow_spmd_sharding_propagation_to_parameters().begin(),
proto.allow_spmd_sharding_propagation_to_parameters().end());
config->allow_spmd_sharding_propagation_to_output_.assign(
proto.allow_spmd_sharding_propagation_to_output().begin(),
proto.allow_spmd_sharding_propagation_to_output().end());
config->analysis_allowance_map_.insert(proto.analysis_allowance_map().begin(),
proto.analysis_allowance_map().end());
config->matrix_unit_operand_precision_ =
proto.matrix_unit_operand_precision();
config->allow_separate_sharding_programs_ =
proto.allow_separate_sharding_programs();
config->fdo_profile_ = proto.fdo_profile();
config->device_memory_size_ = proto.device_memory_size();
config->use_shardy_partitioner_ = proto.use_shardy_partitioner();
return std::move(config);
}
} | #include "xla/service/hlo_module_config.h"
#include <string>
#include "xla/tests/test_utils.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
TEST(HloModuleConfigTest, ShardableValueUpdatePairProtoRoundTrip) {
const std::string text_proto = R"(
shardable_value_update_pairs {
input_parameter_number: 2
parameter_shape_index: 0
parameter_shape_index: 1
output_shape_index: 1
output_shape_index: 0
}
shardable_value_update_pairs {
input_parameter_number: 1
parameter_shape_index: 2
output_shape_index: 3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto input_proto,
ParseTextProto<HloModuleConfigProto>(text_proto));
HloModuleConfig config;
HloModuleConfig::AssignStructShardableValueUpdatePairs(
config, input_proto.shardable_value_update_pairs());
EXPECT_EQ(config.shardable_value_update_pairs().size(), 2);
HloModuleConfigProto output_proto;
HloModuleConfig::AssignProtoShardableValueUpdatePairs(
output_proto.mutable_shardable_value_update_pairs(),
config.shardable_value_update_pairs());
EXPECT_EQ(input_proto.SerializeAsString(), output_proto.SerializeAsString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_module_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04ba64e2-bf21-4b89-ab30-0f9608f75f9e | cpp | tensorflow/tensorflow | convert_operand_folding | third_party/xla/xla/service/convert_operand_folding.cc | third_party/xla/xla/service/convert_operand_folding_test.cc | #include "xla/service/convert_operand_folding.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsUpcastConvert(const HloInstruction* hlo) {
if (!hlo->shape().IsArray()) {
return false;
}
switch (hlo->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose: {
return IsUpcastConvert(hlo->operand(0));
}
case HloOpcode::kReduce: {
if (ShapeUtil::ElementsIn(hlo->shape()) ==
ShapeUtil::ElementsIn(hlo->operand(0)->shape())) {
return IsUpcastConvert(hlo->operand(0));
}
return false;
}
case HloOpcode::kConvert:
return primitive_util::CastPreservesValues(
hlo->operand(0)->shape().element_type(), hlo->shape().element_type());
default:
return false;
}
}
HloInstruction* EffectiveOperand(HloInstruction* hlo) {
switch (hlo->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGather:
case HloOpcode::kReshape:
case HloOpcode::kSlice:
case HloOpcode::kTranspose: {
HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0));
HloInstruction* clone = hlo->AddInstruction(hlo->Clone());
*(clone->mutable_shape()) = ShapeUtil::ChangeElementType(
clone->shape(), operand->shape().element_type());
clone->ReplaceOperandWithDifferentShape(0, operand).IgnoreError();
return clone;
}
case HloOpcode::kReduce: {
HloInstruction* operand = EffectiveOperand(hlo->mutable_operand(0));
return hlo->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::ChangeElementType(hlo->shape(),
operand->shape().element_type()),
operand));
}
case HloOpcode::kConvert:
return hlo->mutable_operand(0);
default:
return nullptr;
}
}
}
bool ConvertOperandFolding::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDot &&
instruction->opcode() != HloOpcode::kConvolution) {
return false;
}
for (auto* operand : instruction->operands()) {
if (IsUpcastConvert(operand)) {
return true;
}
}
return false;
}
absl::StatusOr<HloInstruction*> ConvertOperandFolding::ExpandInstruction(
HloInstruction* instruction) {
for (int i = 0; i < instruction->operand_count(); ++i) {
auto* operand = instruction->mutable_operand(i);
if (IsUpcastConvert(operand)) {
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape(
i, EffectiveOperand(operand)));
}
}
return nullptr;
}
} | #include "xla/service/convert_operand_folding.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ConvertOperandFoldingTest = HloTestBase;
TEST_F(ConvertOperandFoldingTest, IntegralUpcastConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s16[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, FloatingUpcastConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f16[2,3]{1,0} parameter(0)
p1 = bf16[3,2]{0,1} parameter(1)
c0 = f32[2,3]{1,0} convert(p0)
c1 = f32[3,2]{0,1} convert(p1)
ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, IntegralToFloatingConvertFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = f16[2,3]{1,0} convert(p0)
c1 = f32[3,2]{0,1} convert(p1)
ROOT dot = f32[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), op::Parameter(1)),
op::Shape("f32[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, DowncastConvertNotFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s32[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s8[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_FALSE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(
op::Dot(
AllOf(op::Convert(op::Parameter(0)), op::Shape("s16[2,3]{1,0}")),
AllOf(op::Convert(op::Parameter(1)), op::Shape("s8[3,2]{0,1}"))),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, OneOperandFolded) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s16[3,2]{0,1} parameter(1)
c0 = s16[2,3]{1,0} convert(p0)
c1 = s8[3,2]{0,1} convert(p1)
ROOT dot = s16[2,2]{1,0} dot(c0, c1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), AllOf(op::Convert(op::Parameter(1)),
op::Shape("s8[3,2]{0,1}"))),
op::Shape("s16[2,2]{1,0}")));
}
TEST_F(ConvertOperandFoldingTest, FoldedWithFormatting) {
absl::string_view module_string = R"(
HloModule module
sum {
a = s16[] parameter(0)
b = s16[] parameter(1)
ROOT r = add(a,b)
}
ENTRY main {
p0 = s8[3,10] parameter(0)
c0 = s16[3,10] convert(p0)
r0 = s16[3,2,5] reshape(c0)
t0 = s16[2,5,3] transpose(r0), dimensions={1,2,0}
s0 = s16[2,1,3] slice(t0), slice={[0:2], [2:3], [0:3]}
rs0 = s16[2,3] reshape(s0)
p1 = s8[3,1,2] parameter(1)
c1 = s16[3,1,2] convert(p1)
r1 = s16[1,3,2] transpose(c1), dimensions={1,0,2}
z = s16[] constant(0)
rr1 = s16[3,2] reduce(r1,z), dimensions={0}, to_apply=sum
ROOT dot = s16[2,2] dot(rs0, rr1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(
op::Reshape(op::Slice(op::Transpose(op::Reshape(op::Parameter(0))))),
op::Reshape(op::Transpose(op::Parameter(1)))));
}
TEST_F(ConvertOperandFoldingTest, FoldedWithDSAndGather) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[100,3] parameter(0)
c0 = s16[100,3] convert(p0)
ids = s32[20] parameter(2)
g = s16[20,3] gather(c0, ids), offset_dims={1}, collapsed_slice_dims={0}, start_index_map={0}, index_vector_dim=1, slice_sizes={1,3}
t = s16[3,20] transpose(g), dimensions={1,0}
p1 = s8[25,3] parameter(1)
c1 = s16[25,3] convert(p1)
z = s32[] constant(0)
s = s32[] parameter(3)
ds = s16[20,3] dynamic-slice(c1, s, z), dynamic_slice_sizes={20,3}
ROOT dot = s16[3,3] dot(t, ds), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool folded,
ConvertOperandFolding().Run(module.get()));
EXPECT_TRUE(folded);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Dot(op::Transpose(op::Gather(op::Parameter(0), op::Parameter(2))),
op::DynamicSlice(op::Parameter(1), op::Parameter(3),
op::Constant())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_operand_folding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/convert_operand_folding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5ac5caa6-167b-4fcd-a4de-786282f38404 | cpp | tensorflow/tensorflow | i4 | tensorflow/lite/experimental/shlo/i4.h | tensorflow/lite/experimental/shlo/i4_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_I4_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_I4_H_
#include <cstdint>
#include <limits>
#include <ostream>
#include <type_traits>
namespace shlo_ref {
struct I4 {
int8_t data = 0;
constexpr I4() = default;
constexpr I4(const I4&) = default;
constexpr I4& operator=(const I4&) = default;
template <class T>
constexpr I4(T v) : data(v) {}
template <class T>
constexpr operator T() const {
return static_cast<T>(data);
}
friend I4& operator++(I4& lhs) {
++lhs.data;
return lhs;
}
friend I4& operator--(I4& lhs) {
--lhs.data;
return lhs;
}
friend I4 operator++(I4& lhs, int) {
I4 ret = lhs;
++lhs.data;
return ret;
}
friend I4 operator--(I4& lhs, int) {
I4 ret = lhs;
--lhs.data;
return ret;
}
friend I4& operator+=(I4& lhs, I4 rhs) {
lhs.data += rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator+=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data += static_cast<C>(rhs);
return lhs;
}
friend I4& operator-=(I4& lhs, I4 rhs) {
lhs.data -= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator-=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data -= static_cast<C>(rhs);
return lhs;
}
friend I4& operator*=(I4& lhs, I4 rhs) {
lhs.data *= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator*=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data *= static_cast<C>(rhs);
return lhs;
}
friend I4& operator/=(I4& lhs, I4 rhs) {
lhs.data /= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator/=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data /= static_cast<C>(rhs);
return lhs;
}
friend I4& operator%=(I4& lhs, I4 rhs) {
lhs.data %= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator%=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data %= static_cast<C>(rhs);
return lhs;
}
friend I4& operator&=(I4& lhs, I4 rhs) {
lhs.data &= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator&=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data &= static_cast<C>(rhs);
return lhs;
}
friend I4& operator|=(I4& lhs, I4 rhs) {
lhs.data |= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator|=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data |= static_cast<C>(rhs);
return lhs;
}
friend I4& operator^=(I4& lhs, I4 rhs) {
lhs.data ^= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator^=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data ^= static_cast<C>(rhs);
return lhs;
}
friend I4& operator<<=(I4& lhs, I4 rhs) {
lhs.data <<= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator<<=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data <<= static_cast<C>(rhs);
return lhs;
}
friend I4& operator>>=(I4& lhs, I4 rhs) {
lhs.data >>= rhs.data;
return lhs;
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend I4& operator>>=(I4& lhs, T rhs) {
using C = std::common_type_t<T, int>;
lhs.data >>= static_cast<C>(rhs);
return lhs;
}
friend auto operator+(I4 lhs) { return +lhs.data; }
friend auto operator-(I4 lhs) { return -lhs.data; }
friend auto operator+(I4 lhs, I4 rhs) { return lhs.data + rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator+(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data + static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator+(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) + rhs.data;
}
friend auto operator-(I4 lhs, I4 rhs) { return lhs.data - rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator-(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data - static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator-(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) - rhs.data;
}
friend auto operator*(I4 lhs, I4 rhs) { return lhs.data * rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator*(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data * static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator*(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) * rhs.data;
}
friend auto operator/(I4 lhs, I4 rhs) { return lhs.data / rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator/(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data / static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator/(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) / rhs.data;
}
friend auto operator%(I4 lhs, I4 rhs) { return lhs.data % rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator%(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data % static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator%(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) % rhs.data;
}
friend auto operator~(I4 lhs) { return ~lhs.data; }
friend auto operator&(I4 lhs, I4 rhs) { return lhs.data & rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator&(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data & static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator&(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) & rhs.data;
}
friend auto operator|(I4 lhs, I4 rhs) { return lhs.data | rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator|(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data | static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator|(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) | rhs.data;
}
friend auto operator^(I4 lhs, I4 rhs) { return lhs.data ^ rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator^(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data ^ static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator^(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) ^ rhs.data;
}
friend auto operator<<(I4 lhs, I4 rhs) { return lhs.data << rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator<<(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data << static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator<<(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) << rhs.data;
}
friend auto operator>>(I4 lhs, I4 rhs) { return lhs.data >> rhs.data; }
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator>>(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data >> static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_integral_v<T>>>
friend auto operator>>(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) >> rhs.data;
}
friend bool operator!(I4 v) { return !v.data; }
friend auto operator&&(I4 lhs, I4 rhs) { return lhs.data && rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator&&(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data && static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator&&(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) && rhs.data;
}
friend auto operator||(I4 lhs, I4 rhs) { return lhs.data || rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator||(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data || static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend auto operator||(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) || rhs.data;
}
friend bool operator==(I4 lhs, I4 rhs) { return lhs.data == rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator==(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data == static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator==(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) == rhs.data;
}
friend bool operator!=(I4 lhs, I4 rhs) { return lhs.data != rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator!=(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data != static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator!=(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) != rhs.data;
}
friend bool operator<(I4 lhs, I4 rhs) { return lhs.data < rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data < static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) < rhs.data;
}
friend bool operator>(I4 lhs, I4 rhs) { return lhs.data > rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data > static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) > rhs.data;
}
friend bool operator<=(I4 lhs, I4 rhs) { return lhs.data <= rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<=(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data <= static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator<=(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) <= rhs.data;
}
friend bool operator>=(I4 lhs, I4 rhs) { return lhs.data >= rhs.data; }
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>=(I4 lhs, T rhs) {
using C = std::common_type_t<T, int>;
return lhs.data >= static_cast<C>(rhs);
}
template <class T, class = std::enable_if_t<std::is_arithmetic_v<T>>>
friend bool operator>=(T lhs, I4 rhs) {
using C = std::common_type_t<T, int>;
return static_cast<C>(lhs) >= rhs.data;
}
friend std::ostream& operator<<(std::ostream& os, I4 v) { return os << +v; }
};
}
namespace std {
template <>
struct numeric_limits<shlo_ref::I4> : std::numeric_limits<int8_t> {
static constexpr shlo_ref::I4 min() noexcept { return shlo_ref::I4(-8); }
static constexpr shlo_ref::I4 lowest() noexcept { return min(); }
static constexpr shlo_ref::I4 max() noexcept { return shlo_ref::I4(7); }
};
}
#endif | #include "tensorflow/lite/experimental/shlo/i4.h"
#include <cstdint>
#include <gtest/gtest.h>
namespace shlo_ref {
namespace {
TEST(I4Test, ConstructFromArithmeticType) {
const I4 from_int8(static_cast<int8_t>(1));
EXPECT_EQ(from_int8.data, 1);
const I4 from_int16(static_cast<int16_t>(1));
EXPECT_EQ(from_int16.data, 1);
const I4 from_int32(static_cast<int32_t>(1));
EXPECT_EQ(from_int32.data, 1);
const I4 from_int64(static_cast<int64_t>(1));
EXPECT_EQ(from_int64.data, 1);
const I4 from_float(static_cast<float>(1));
EXPECT_EQ(from_float.data, 1);
const I4 from_double(static_cast<double>(1));
EXPECT_EQ(from_double.data, 1);
}
template <class T>
T ImplicitConversion(T v) {
return v;
}
TEST(I4Test, ConvertToArithmeticType) {
const I4 ref(-1);
EXPECT_EQ(ImplicitConversion<int8_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int16_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int32_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<int64_t>(ref), -1);
EXPECT_EQ(ImplicitConversion<float>(ref), -1);
EXPECT_EQ(ImplicitConversion<double>(ref), -1);
}
TEST(I4Test, Arithmetic) {
for (int i = -8; i < 8; ++i) {
for (int j = -8; j < 8; ++j) {
EXPECT_EQ(I4(i) == I4(j), i == j);
EXPECT_EQ(I4(i) != I4(j), i != j);
EXPECT_EQ(I4(i) > I4(j), i > j);
EXPECT_EQ(I4(i) >= I4(j), i >= j);
EXPECT_EQ(I4(i) < I4(j), i < j);
EXPECT_EQ(I4(i) <= I4(j), i <= j);
}
}
I4 val(0);
EXPECT_EQ(++val, 1);
EXPECT_EQ(val++, 1);
EXPECT_EQ(val, 2);
EXPECT_EQ(val--, 2);
EXPECT_EQ(val, 1);
EXPECT_EQ(--val, 0);
EXPECT_EQ(val += I4(1), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= I4(2), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= I4(2), 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= I4(4), -3);
EXPECT_EQ(val, -3);
EXPECT_EQ(val %= I4(2), -1);
EXPECT_EQ(val, -1);
EXPECT_EQ(val = I4(7), 7);
EXPECT_EQ(val, 7);
EXPECT_EQ(val &= I4(2), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val |= I4(1), 3);
EXPECT_EQ(val, 3);
EXPECT_EQ(val ^= I4(7), 4);
EXPECT_EQ(val, 4);
EXPECT_EQ(val >>= I4(1), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val <<= I4(1), 4);
EXPECT_EQ(val, 4);
EXPECT_EQ(val >>= I4(1), 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val <<= I4(1), 4);
EXPECT_EQ(val, 4);
EXPECT_EQ(+val, 4);
EXPECT_EQ(-val, -4);
EXPECT_EQ(!val, false);
EXPECT_EQ(~val, ~4);
EXPECT_EQ(val && I4(2), true);
EXPECT_EQ(val && I4(0), false);
EXPECT_EQ(val || I4(0), true);
EXPECT_EQ(I4(0) || I4(0), false);
}
using IntegralTypeList = ::testing::Types<int8_t, int16_t, int32_t, int64_t>;
using ArithmeticTypeList =
::testing::Types<int8_t, int16_t, int32_t, int64_t, float, double>;
template <class T>
struct ArithmeticTypeI4Test : testing::Test {};
TYPED_TEST_SUITE(ArithmeticTypeI4Test, ArithmeticTypeList);
TYPED_TEST(ArithmeticTypeI4Test, Arithmetic) {
for (TypeParam i = -8; i < 8; ++i) {
for (TypeParam j = -8; j < 8; ++j) {
EXPECT_EQ(I4(i) == j, i == j);
EXPECT_EQ(i == I4(j), i == j);
EXPECT_EQ(I4(i) != j, i != j);
EXPECT_EQ(i != I4(j), i != j);
EXPECT_EQ(I4(i) > j, i > j);
EXPECT_EQ(i > I4(j), i > j);
EXPECT_EQ(I4(i) >= j, i >= j);
EXPECT_EQ(i >= I4(j), i >= j);
EXPECT_EQ(I4(i) < j, i < j);
EXPECT_EQ(i < I4(j), i < j);
EXPECT_EQ(I4(i) <= j, i <= j);
EXPECT_EQ(i <= I4(j), i <= j);
}
}
I4 val(0);
const TypeParam one = TypeParam(1);
const TypeParam two = TypeParam(2);
const TypeParam three = TypeParam(3);
const TypeParam four = TypeParam(4);
EXPECT_EQ(val += one, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val *= two, 2);
EXPECT_EQ(val, 2);
EXPECT_EQ(val /= two, 1);
EXPECT_EQ(val, 1);
EXPECT_EQ(val -= four, -3);
EXPECT_EQ(val, -3);
const I4 i4_three(3);
EXPECT_EQ(i4_three + one, four);
EXPECT_EQ(i4_three - one, two);
EXPECT_EQ(i4_three * two, three * two);
EXPECT_EQ(i4_three / two, three / two);
}
template <class T>
struct IntegralTypeI4Test : testing::Test {};
TYPED_TEST_SUITE(IntegralTypeI4Test, IntegralTypeList);
TYPED_TEST(IntegralTypeI4Test, Arithmetic) {
const TypeParam minus_one = TypeParam(-1);
const TypeParam one = TypeParam(1);
const TypeParam two = TypeParam(2);
const TypeParam three = TypeParam(3);
const TypeParam four = TypeParam(4);
const TypeParam six = TypeParam(6);
const TypeParam seven = TypeParam(7);
const I4 i4_three(3);
EXPECT_EQ(i4_three % two, one);
EXPECT_EQ(i4_three & two, two);
EXPECT_EQ(i4_three | four, seven);
EXPECT_EQ(i4_three ^ four, seven);
EXPECT_EQ(i4_three << one, six);
EXPECT_EQ(i4_three >> one, one);
I4 val(-3);
EXPECT_EQ(val %= two, minus_one);
EXPECT_EQ(val, -1);
EXPECT_EQ(val = I4(7), seven);
EXPECT_EQ(val, 7);
EXPECT_EQ(val &= two, two);
EXPECT_EQ(val, 2);
EXPECT_EQ(val |= one, three);
EXPECT_EQ(val, 3);
EXPECT_EQ(val ^= seven, four);
EXPECT_EQ(val, 4);
EXPECT_EQ(val >>= one, two);
EXPECT_EQ(val, 2);
EXPECT_EQ(val <<= one, four);
EXPECT_EQ(val, 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/i4.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/i4_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
233cc73f-0e99-48c3-84bf-b56c721034a9 | cpp | tensorflow/tensorflow | sharding_utils | tensorflow/core/tpu/kernels/sharding_utils.cc | tensorflow/core/tpu/kernels/sharding_utils_test.cc | #include "tensorflow/core/tpu/kernels/sharding_utils.h"
#include <cstdint>
#include <functional>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/macros.h"
namespace tensorflow {
namespace sharding_internal {
absl::Status ValidateShapesForSlice(absl::string_view input_name,
const Tensor* input,
const std::vector<int32_t>& num_splits,
const std::vector<int32_t>& paddings) {
const auto& ishape = input->shape();
Status s;
const int rank = ishape.dims();
const auto& input_shape = ishape.dim_sizes();
if (rank <= 0 || rank > 8) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " must have rank in range (0, 8], but got ", rank, "."));
} else if (rank != num_splits.size()) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " rank must be the same as 'num_splits' length ",
num_splits.size(), ", but got rank ", rank, "."));
} else {
for (int dim = 0; dim < rank; ++dim) {
const auto input_shape_dim = input_shape[dim];
const auto paddings_dim = paddings[dim];
const auto num_splits_dim = num_splits[dim];
if ((input_shape_dim + paddings_dim) % num_splits_dim != 0) {
s = absl::InvalidArgumentError(absl::StrCat(
input_name, " shape dimension ", dim, " (", input_shape_dim,
") with padding ", paddings_dim,
" must be evenly divisible by 'num_splits' ", num_splits_dim, "."));
break;
}
}
}
return s;
}
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 1> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 1>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 1> subscript;
subscript[0] = index * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 2> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 2>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 2> subscript;
subscript[1] = (index % num_partitions[1]) * slice_shape[1];
subscript[0] = (index / num_partitions[1]) * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 3> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 3>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 3> subscript;
subscript[2] = (index % num_partitions[2]) * slice_shape[2];
subscript[1] =
((index / num_partitions[2]) % num_partitions[1]) * slice_shape[1];
subscript[0] =
(index / (num_partitions[2] * num_partitions[1])) * slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 4> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 4>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 4> subscript;
subscript[3] = (index % num_partitions[3]) * slice_shape[3];
subscript[2] =
((index / num_partitions[3]) % num_partitions[2]) * slice_shape[2];
subscript[1] =
((index / (num_partitions[3] * num_partitions[2])) % num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[3] * num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 5> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 5>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 5> subscript;
subscript[4] = (index % num_partitions[4]) * slice_shape[4];
subscript[3] =
((index / num_partitions[4]) % num_partitions[3]) * slice_shape[3];
subscript[2] =
((index / (num_partitions[4] * num_partitions[3])) % num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[4] * num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] = (index / (num_partitions[4] * num_partitions[3] *
num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 6> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 6>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 6> subscript;
subscript[5] = (index % num_partitions[5]) * slice_shape[5];
subscript[4] =
((index / num_partitions[5]) % num_partitions[4]) * slice_shape[4];
subscript[3] =
((index / (num_partitions[5] * num_partitions[4])) % num_partitions[3]) *
slice_shape[3];
subscript[2] =
((index / (num_partitions[5] * num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] = ((index / (num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[5] * num_partitions[4] * num_partitions[3] *
num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 7> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 7>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 7> subscript;
subscript[6] = (index % num_partitions[6]) * slice_shape[6];
subscript[5] =
((index / num_partitions[6]) % num_partitions[5]) * slice_shape[5];
subscript[4] =
((index / (num_partitions[6] * num_partitions[5])) % num_partitions[4]) *
slice_shape[4];
subscript[3] =
((index / (num_partitions[6] * num_partitions[5] * num_partitions[4])) %
num_partitions[3]) *
slice_shape[3];
subscript[2] = ((index / (num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[6] * num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[6] * num_partitions[5] * num_partitions[4] *
num_partitions[3] * num_partitions[2] * num_partitions[1])) *
slice_shape[0];
return subscript;
}
template <>
Eigen::DSizes<Eigen::DenseIndex, 8> GetSliceIndices(
absl::Span<const int32_t> num_partitions,
const Eigen::DSizes<Eigen::DenseIndex, 8>& slice_shape, const int index) {
Eigen::DSizes<Eigen::DenseIndex, 8> subscript;
subscript[7] = (index % num_partitions[7]) * slice_shape[7];
subscript[6] =
((index / num_partitions[7]) % num_partitions[6]) * slice_shape[6];
subscript[5] =
((index / (num_partitions[7] * num_partitions[6])) % num_partitions[5]) *
slice_shape[5];
subscript[4] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5])) %
num_partitions[4]) *
slice_shape[4];
subscript[3] = ((index / (num_partitions[7] * num_partitions[6] *
num_partitions[5] * num_partitions[4])) %
num_partitions[3]) *
slice_shape[3];
subscript[2] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3])) %
num_partitions[2]) *
slice_shape[2];
subscript[1] =
((index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3] * num_partitions[2])) %
num_partitions[1]) *
slice_shape[1];
subscript[0] =
(index / (num_partitions[7] * num_partitions[6] * num_partitions[5] *
num_partitions[4] * num_partitions[3] * num_partitions[2] *
num_partitions[1])) *
slice_shape[0];
return subscript;
}
} | #define EIGEN_USE_THREADS
#include "tensorflow/core/tpu/kernels/sharding_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/env.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tensorflow {
namespace {
Eigen::ThreadPoolDevice CreateThreadPoolDevice() {
constexpr int kMaxParallelism = 16;
auto thread_pool = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), tsl::ThreadOptions(), "Resharding", kMaxParallelism);
Eigen::ThreadPoolDevice device(thread_pool->AsEigenThreadPool(),
kMaxParallelism);
return device;
}
TEST(XlaNDSplitterTest, NoSplits) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2, 2});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings(num_splits.size(), 0);
const int num_outputs = 1;
auto input_tensor =
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
false)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST(XlaNDSplitterTest, NoSplitsWithPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 1, 1});
const std::vector<int32_t> num_splits = {1, 1, 1};
const std::vector<int> paddings = {0, 1, 1};
const int num_outputs = 1;
auto input_tensor = test::AsTensor<int32_t>({0, 1}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
std::vector<int32_t> expected_values(3 * 3 * 3);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 0, 0, 0, 1, 0, 0, 0},
TensorShape({2, 2, 2})));
}
TEST(XlaNDSplitterTest, SplitNoPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({4, 4});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings(num_splits.size(), 0);
const int num_outputs = 4;
auto input_tensor = test::AsTensor<int32_t>(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 4, 5}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 3, 6, 7}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({8, 9, 12, 13}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({10, 11, 14, 15}, TensorShape({2, 2})));
}
TEST(XlaNDSplitterTest, SplitPartialPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({3, 3});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {1, 1};
const int num_outputs = 4;
auto input_tensor =
test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 3, 4}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({2, 0, 5, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({6, 7, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({8, 0, 0, 0}, TensorShape({2, 2})));
}
TEST(XlaNDSplitterTest, SplitCompletePadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 1});
const std::vector<int32_t> num_splits = {2, 2};
const std::vector<int32_t> paddings = {2, 3};
const int num_outputs = 4;
auto input_tensor = test::AsTensor<int32_t>({0, 1}, input_shape);
std::vector<Tensor> output_tensors;
output_tensors.resize(num_outputs);
auto allocate_output_fn = [&](int i, const TensorShape& output_slice_shape,
Tensor** tensor) {
if (i < 0 || i >= output_tensors.size()) {
return absl::InvalidArgumentError(absl::StrCat(
"Index ", i, " out of range [0, ", output_tensors.size(), "]"));
}
output_tensors[i] = Tensor(tensorflow::DT_INT32, output_slice_shape);
*tensor = &output_tensors[i];
return absl::OkStatus();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors[0] = input;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto splitter, (XlaNDSplitter<Eigen::ThreadPoolDevice, int32_t>::Create(
num_splits, num_outputs, paddings,
true)));
TF_ASSERT_OK(splitter.Split(&input_tensor, "test", assign_or_copy_value_fn,
allocate_output_fn, device));
ASSERT_EQ(output_tensors.size(), num_outputs);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 0, 1, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[1],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[2],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
test::ExpectTensorEqual<int32_t>(
output_tensors[3],
test::AsTensor<int32_t>({0, 0, 0, 0}, TensorShape({2, 2})));
}
TEST(XlaNDConcatenatorTest, NoConcats) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2, 2});
const TensorShape output_shape({2, 2, 2});
const std::vector<int32_t> num_concats = {1, 1, 1};
const std::vector<int> paddings(num_concats.size(), 0);
int num_slices = 1;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 2, 3, 4, 5, 6, 7},
TensorShape({2, 2, 2})));
}
TEST(XlaNDConcatenatorTest, ConcatNoPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({4, 4});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings(num_concats.size(), 0);
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 5, 2, 3, 6, 7, 8, 9,
12, 13, 10, 11, 14, 15},
TensorShape({4, 4})));
}
TEST(XlaNDConcatenatorTest, ConcatPartialPadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({3, 3});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings = {1, 1};
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0], test::AsTensor<int32_t>({0, 1, 4, 2, 3, 6, 8, 9, 12},
TensorShape({3, 3})));
}
TEST(XlaNDConcatenatorTest, ConcatCompletePadding) {
auto device = CreateThreadPoolDevice();
const TensorShape input_shape({2, 2});
const TensorShape output_shape({2, 2});
const std::vector<int32_t> num_concats = {2, 2};
const std::vector<int> paddings = {2, 2};
int num_slices = 4;
auto tensor0 = test::AsTensor<int32_t>({0, 1, 2, 3}, input_shape);
auto tensor1 = test::AsTensor<int32_t>({4, 5, 6, 7}, input_shape);
auto tensor2 = test::AsTensor<int32_t>({8, 9, 10, 11}, input_shape);
auto tensor3 = test::AsTensor<int32_t>({12, 13, 14, 15}, input_shape);
std::vector<Tensor> input_tensors;
input_tensors.push_back(tensor0);
input_tensors.push_back(tensor1);
input_tensors.push_back(tensor2);
input_tensors.push_back(tensor3);
std::vector<Tensor> output_tensors;
output_tensors.reserve(1);
auto get_output_fn = [&]() {
output_tensors.push_back(Tensor(tensorflow::DT_INT32, output_shape));
return &output_tensors.back();
};
auto assign_or_copy_value_fn = [&](const Tensor& input) -> Status {
output_tensors.push_back(input);
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(
auto concatenator,
(XlaNDConcatenator<Eigen::ThreadPoolDevice, int32_t>::Create(
num_concats, num_slices, paddings,
true)));
TF_ASSERT_OK(concatenator.ComputeInternal(absl::MakeSpan(input_tensors),
assign_or_copy_value_fn,
get_output_fn, device));
ASSERT_EQ(output_tensors.size(), 1);
test::ExpectTensorEqual<int32_t>(
output_tensors[0],
test::AsTensor<int32_t>({0, 1, 2, 3}, TensorShape({2, 2})));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sharding_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tpu/kernels/sharding_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f4bdf13-5af9-4796-999f-d114acc952de | cpp | google/cel-cpp | container_function_registrar | eval/public/container_function_registrar.cc | eval/public/container_function_registrar_test.cc | #include "eval/public/container_function_registrar.h"
#include "eval/public/cel_options.h"
#include "runtime/runtime_options.h"
#include "runtime/standard/container_functions.h"
namespace google::api::expr::runtime {
absl::Status RegisterContainerFunctions(CelFunctionRegistry* registry,
const InterpreterOptions& options) {
cel::RuntimeOptions runtime_options = ConvertToRuntimeOptions(options);
return cel::RegisterContainerFunctions(registry->InternalGetRegistry(),
runtime_options);
}
} | #include "eval/public/container_function_registrar.h"
#include <memory>
#include <string>
#include "eval/public/activation.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/equality_function_registrar.h"
#include "eval/public/testing/matchers.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace google::api::expr::runtime {
namespace {
using google::api::expr::v1alpha1::Expr;
using google::api::expr::v1alpha1::SourceInfo;
using ::testing::ValuesIn;
struct TestCase {
std::string test_name;
std::string expr;
absl::StatusOr<CelValue> result = CelValue::CreateBool(true);
};
const CelList& CelNumberListExample() {
static ContainerBackedListImpl* example =
new ContainerBackedListImpl({CelValue::CreateInt64(1)});
return *example;
}
void ExpectResult(const TestCase& test_case) {
auto parsed_expr = parser::Parse(test_case.expr);
ASSERT_OK(parsed_expr);
const Expr& expr_ast = parsed_expr->expr();
const SourceInfo& source_info = parsed_expr->source_info();
InterpreterOptions options;
options.enable_timestamp_duration_overflow_errors = true;
options.enable_comprehension_list_append = true;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterContainerFunctions(builder->GetRegistry(), options));
ASSERT_OK(RegisterEqualityFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expression,
builder->CreateExpression(&expr_ast, &source_info));
Activation activation;
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(auto value,
cel_expression->Evaluate(activation, &arena));
EXPECT_THAT(value, test::EqualsCelValue(*test_case.result));
}
using ContainerFunctionParamsTest = testing::TestWithParam<TestCase>;
TEST_P(ContainerFunctionParamsTest, StandardFunctions) {
ExpectResult(GetParam());
}
INSTANTIATE_TEST_SUITE_P(
ContainerFunctionParamsTest, ContainerFunctionParamsTest,
ValuesIn<TestCase>(
{{"FilterNumbers", "[1, 2, 3].filter(num, num == 1)",
CelValue::CreateList(&CelNumberListExample())},
{"ListConcatEmptyInputs", "[] + [] == []", CelValue::CreateBool(true)},
{"ListConcatRightEmpty", "[1] + [] == [1]",
CelValue::CreateBool(true)},
{"ListConcatLeftEmpty", "[] + [1] == [1]", CelValue::CreateBool(true)},
{"ListConcat", "[2] + [1] == [2, 1]", CelValue::CreateBool(true)},
{"ListSize", "[1, 2, 3].size() == 3", CelValue::CreateBool(true)},
{"MapSize", "{1: 2, 2: 4}.size() == 2", CelValue::CreateBool(true)},
{"EmptyListSize", "size({}) == 0", CelValue::CreateBool(true)}}),
[](const testing::TestParamInfo<ContainerFunctionParamsTest::ParamType>&
info) { return info.param.test_name; });
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/container_function_registrar.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/container_function_registrar_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
e5e0a49a-1f40-435f-80d7-28442c2c513e | cpp | tensorflow/tensorflow | shape_inference_testutil | tensorflow/core/framework/shape_inference_testutil.cc | tensorflow/core/framework/shape_inference_testutil_test.cc | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include <algorithm>
#include <memory>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/scanner.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace shape_inference {
using errors::Unknown;
Status ShapeInferenceTestutil::InferShapes(ShapeInferenceTestOp op,
const string& ins,
const string& expected_outs) {
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUp(op.name, &op_reg_data));
std::vector<string> ins_v = str_util::Split(ins, ';');
InferenceContext::ShapeManager manager;
std::vector<ShapeHandle> in_shapes;
for (const string& spec : ins_v) {
ShapeHandle shape;
TF_RETURN_IF_ERROR(MakeShapeFromString(&manager, spec, &shape));
in_shapes.push_back(shape);
}
std::vector<std::unique_ptr<std::vector<shape_inference::ShapeAndType>>>
input_resource_handle_shapes_and_types;
for (const auto p : op.input_resource_handle_shapes_and_types) {
if (p == nullptr) {
input_resource_handle_shapes_and_types.push_back(nullptr);
} else {
std::unique_ptr<std::vector<ShapeAndType>> v(
new std::vector<ShapeAndType>());
for (const auto& shape_and_type : *p) {
ShapeHandle shape;
TF_RETURN_IF_ERROR(
MakeShapeFromString(&manager, shape_and_type.first, &shape));
v->emplace_back(shape, shape_and_type.second);
}
input_resource_handle_shapes_and_types.emplace_back(v.release());
}
}
shape_inference::InferenceContext c(
op.graph_def_version, op.node_def, op_reg_data->op_def, in_shapes,
op.input_tensors, {}, std::move(input_resource_handle_shapes_and_types));
TF_RETURN_IF_ERROR(c.construction_status());
if (op_reg_data->shape_inference_fn == nullptr) {
return errors::InvalidArgument(
"No shape inference function exists for op '", op.name,
"', did you forget to define it?");
}
TF_RETURN_IF_ERROR(c.Run(op_reg_data->shape_inference_fn));
const int num_outputs = c.num_outputs();
if (expected_outs == "e") {
return Unknown("Shape inference should have returned error");
}
std::vector<string> expected_outs_v = str_util::Split(expected_outs, ';');
if (num_outputs != expected_outs_v.size()) {
return Unknown("The expected output string lists the wrong number of ",
"outputs. It lists ", expected_outs_v.size(),
" but should list ", num_outputs);
}
for (int i = 0; i < num_outputs; ++i) {
StringPiece expected(expected_outs_v[i]);
shape_inference::ShapeHandle out = c.output(i);
string err_prefix = strings::StrCat("Output ", i);
string err_suffix =
strings::StrCat(". Output shape was ", c.DebugString(out));
int in_index = -1;
for (int i = 0; i < c.num_inputs(); ++i) {
if (c.input(i).SameHandle(out)) {
in_index = i;
}
}
if (absl::StartsWith(expected, "in")) {
if (in_index == -1) {
return Unknown(err_prefix,
" should have matched an input shape by "
"handle, but matched no input shape. This means the ",
"shape function was expected to pass an input "
"ShapeHandle through for this output, but did not",
err_suffix);
}
auto v = str_util::Split(expected, '|');
if (std::find(v.begin(), v.end(), strings::StrCat("in", in_index)) ==
v.end()) {
return Unknown(
err_prefix, " matched input ", in_index,
" by handle, but should have matched one of (", expected,
") instead. This means the shape function passed the ShapeHandle ",
"for input ", in_index,
" to the output, but should have passed a different input ",
"ShapeHandle through", err_suffix);
}
continue;
}
if (in_index != -1) {
return Unknown(err_prefix, " matched input ", in_index,
" by ShapeHandle, but was expected to not match an input ",
"shape by handle", err_suffix);
}
if (expected == "?") {
if (c.RankKnown(out)) {
return Unknown(err_prefix, " expected to be unknown", err_suffix);
}
continue;
}
CHECK(absl::StartsWith(expected, "[") && absl::EndsWith(expected, "]"))
<< expected;
expected.remove_prefix(1);
expected.remove_suffix(1);
auto expected_dims = str_util::Split(expected, ',');
if (!c.RankKnown(out)) {
return Unknown(err_prefix, " expected rank ", expected_dims.size(),
" but was ?", err_suffix);
}
if (c.Rank(out) != expected_dims.size()) {
return Unknown(err_prefix, " expected rank ", expected_dims.size(),
" but was ", c.Rank(out), err_suffix);
}
for (int j = 0; j < expected_dims.size(); ++j) {
err_prefix = strings::StrCat("Output dim ", i, ",", j);
StringPiece expected_dim(expected_dims[j]);
DimensionHandle out_dim = c.Dim(out, j);
std::pair<int, int> in_dim_idx(-1, -1);
for (int i = 0; i < c.num_inputs(); ++i) {
auto in = c.input(i);
for (int j = 0; j < c.Rank(in); ++j) {
if (c.Dim(in, j).SameHandle(out_dim)) {
in_dim_idx = std::make_pair(i, j);
}
}
}
if (expected_dim == "?") {
if (in_dim_idx.first != -1) {
return Unknown(err_prefix,
" expected to be an unknown but matched input d",
in_dim_idx.first, "_", in_dim_idx.second,
". The shape function passed through ",
"a DimensionHandle from an input instead of making ",
"a new unknown dimension", err_suffix);
} else if (c.ValueKnown(out_dim)) {
return Unknown(err_prefix, " expected to be unknown but was ",
c.Value(out_dim), err_suffix);
}
} else if (absl::StartsWith(expected_dim, "d")) {
auto v = str_util::Split(expected_dim, '|');
if (in_dim_idx.first == -1) {
return Unknown(
err_prefix, " was expected to match the dimension of an input, ",
"but did not match any input dimension. The shape ",
"function was expected to pass through a ",
"DimensionHandle for an input, but did not", err_suffix);
}
if (std::find(v.begin(), v.end(),
strings::StrCat("d", in_dim_idx.first, "_",
in_dim_idx.second)) == v.end()) {
return Unknown(err_prefix, " matched input d", in_dim_idx.first, "_",
in_dim_idx.second,
", but should have matched one of (", expected_dim,
"). The shape function passed through "
"the DimensionHandle for an input, but ",
"was expected to pass a different one", err_suffix);
}
} else {
int64_t value = -1;
if (!strings::safe_strto64(expected_dim, &value)) {
return Unknown(err_prefix, ": the expected dimension value '",
expected_dim, "' failed to parse as int64",
err_suffix);
}
if (in_dim_idx.first != -1) {
return Unknown(
err_prefix, " expected to be ", value, " but matched input d",
in_dim_idx.first, "_", in_dim_idx.second,
". The shape function was not expected to pass a DimensionHandle "
"from the input to the output, but did. Note that even if the "
"passed through output has the same dimension value as the "
"expected value, this is considered a failure for the test; "
"switch to using d#_# syntax if passing through the "
"DimensionHandle should be the expected behavior",
err_suffix);
} else if (value != c.Value(out_dim)) {
return Unknown(err_prefix, " expected to be ", value, " but was ",
c.DebugString(out_dim), err_suffix);
}
}
}
}
return absl::OkStatus();
}
Status ShapeInferenceTestutil::MakeShapeFromString(
InferenceContext::ShapeManager* manager, const string& spec,
ShapeHandle* output) {
if (spec == "?") {
*output = manager->UnknownShape();
return absl::OkStatus();
}
std::vector<DimensionHandle> dims;
strings::Scanner scanner(spec);
scanner.OneLiteral("[");
while (scanner.Peek() != ']') {
if (scanner.Peek() == '?') {
scanner.OneLiteral("?");
dims.push_back(manager->MakeDim(InferenceContext::kUnknownDim));
} else {
scanner.RestartCapture().Many(strings::Scanner::DIGIT);
StringPiece match;
int64_t dim_size = 0;
if (!scanner.GetResult(nullptr, &match) ||
!strings::safe_strto64(match, &dim_size)) {
return errors::InvalidArgument("Could not parse number in ", spec);
}
dims.push_back(manager->MakeDim(dim_size));
}
if (scanner.Peek() == ',') {
scanner.OneLiteral(",");
} else if (scanner.Peek() != ']') {
return errors::InvalidArgument(
"Invalid input spec (] not found in dim shape): ", spec);
}
}
if (!scanner.OneLiteral("]").Eos().GetResult()) {
return errors::InvalidArgument("Malformed shape spec: did not end in ']'.");
}
*output = manager->MakeShape(dims);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/framework/shape_inference_testutil.h"
#include <string>
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace shape_inference {
namespace {
#define EXPECT_CONTAINS(str, substr) \
do { \
string s = (str); \
EXPECT_TRUE(absl::StrContains(s, substr)) << "String: " << s; \
} while (false)
static OpShapeInferenceFn* global_fn_ptr = nullptr;
REGISTER_OP("OpOneOut")
.Input("inputs: N * T")
.Output("o1: T")
.Attr("N: int >= 1")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) { return (*global_fn_ptr)(c); });
REGISTER_OP("OpTwoOut")
.Input("inputs: N * T")
.Output("o1: T")
.Output("o2: T")
.Attr("N: int >= 1")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) { return (*global_fn_ptr)(c); });
string RunInferShapes(const string& op_name, const string& ins,
const string& expected_outs, OpShapeInferenceFn fn) {
ShapeInferenceTestOp op(op_name);
const int num_inputs = 1 + std::count(ins.begin(), ins.end(), ';');
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.reserve(num_inputs);
for (int i = 0; i < num_inputs; ++i) src_list.emplace_back("a", 0, DT_FLOAT);
NodeDef node_def;
TF_CHECK_OK(NodeDefBuilder("dummy", op_name)
.Input(src_list)
.Attr("N", num_inputs)
.Finalize(&op.node_def));
global_fn_ptr = &fn;
return std::string(
ShapeInferenceTestutil::InferShapes(op, ins, expected_outs).message());
}
}
TEST(ShapeInferenceTestutilTest, Failures) {
auto fn_copy_input_0 = [](InferenceContext* c) {
c->set_output(0, c->input(0));
return absl::OkStatus();
};
auto fn_copy_input_2 = [](InferenceContext* c) {
c->set_output(0, c->input(2));
return absl::OkStatus();
};
auto fn_output_unknown_shapes = [](InferenceContext* c) {
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->UnknownShape());
}
return absl::OkStatus();
};
auto fn_output_1_2 = [](InferenceContext* c) {
c->set_output(0, c->Matrix(1, 2));
return absl::OkStatus();
};
auto fn_output_u_2 = [](InferenceContext* c) {
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, 2));
return absl::OkStatus();
};
const string& op = "OpOneOut";
EXPECT_EQ("Shape inference should have returned error",
RunInferShapes(op, "[1];[2];[1]", "e", fn_copy_input_0));
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "[1];[2]", fn_copy_input_0),
"wrong number of outputs");
auto s = ShapeInferenceTestutil::InferShapes(ShapeInferenceTestOp("NoSuchOp"),
"", "");
EXPECT_TRUE(
absl::StartsWith(s.message(), "Op type not registered 'NoSuchOp'"));
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "?", fn_copy_input_0),
"expected to not match");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "in2", fn_copy_input_0),
"should have matched one of (in2)");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "in1|in2", fn_copy_input_0),
"should have matched one of (in1|in2)");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "[1]", fn_copy_input_2),
"but was expected to not match");
EXPECT_CONTAINS(RunInferShapes(op, "[1];[2];[1]", "in0|in1", fn_output_1_2),
"Output 0 should have matched an input shape");
EXPECT_EQ("Output 0 expected to be unknown. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "?", fn_output_1_2));
EXPECT_EQ("Output 0 expected rank 3 but was 2. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[1,2,3]", fn_output_1_2));
EXPECT_EQ(
"Output 0 expected rank 2 but was ?. Output shape was ?",
RunInferShapes(op, "[1];[2];[1]", "[1,2]", fn_output_unknown_shapes));
EXPECT_EQ("Output 1 expected rank 3 but was ?. Output shape was ?",
RunInferShapes("OpTwoOut", "[1];[2];[1]", "?;[1,2,3]",
fn_output_unknown_shapes));
EXPECT_EQ("Output dim 0,1 expected to be 3 but was 2. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[1,3]", fn_output_1_2));
EXPECT_EQ("Output dim 0,0 expected to be 2 but was 1. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[2,2]", fn_output_1_2));
EXPECT_EQ(
"Output dim 0,0 expected to be unknown but was 1. Output shape was [1,2]",
RunInferShapes(op, "[1];[2];[1]", "[?,2]", fn_output_1_2));
EXPECT_EQ("Output dim 0,1 expected to be 1 but was 2. Output shape was [?,2]",
RunInferShapes(op, "[1];[2];[1]", "[?,1]", fn_output_u_2));
EXPECT_EQ("Output dim 0,0 expected to be 1 but was ?. Output shape was [?,2]",
RunInferShapes(op, "[0,1,?];[2];[1]", "[1,2]", fn_output_u_2));
auto fn = [](InferenceContext* c) {
c->set_output(0, c->MakeShape({c->Dim(c->input(0), 1), c->MakeDim(2),
c->UnknownDim(), c->Dim(c->input(2), 0)}));
return absl::OkStatus();
};
const string ins = "[0,1,?];[2];[1]";
EXPECT_CONTAINS(RunInferShapes(op, ins, "[?,2,?,d2_0]", fn),
"Output dim 0,0 expected to be an unknown");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[0,2,?,d2_0]", fn),
"Output dim 0,0 expected to be 0 but matched input d0_1.");
EXPECT_CONTAINS(
RunInferShapes(op, ins, "[d0_0,2,?,d2_0]", fn),
"dim 0,0 matched input d0_1, but should have matched one of (d0_0).");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[x,2,?,d2_0]", fn),
"Output dim 0,0: the expected dimension value 'x' failed to "
"parse as int64.");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[d0_0|d0_2,2,?,d2_0]", fn),
"dim 0,0 matched input d0_1, but should have matched one of "
"(d0_0|d0_2).");
EXPECT_CONTAINS(RunInferShapes(op, ins, "[d0_1,?,?,d0_0|d2_0]", fn),
("Output dim 0,1 expected to be unknown but was 2. "
"Output shape was [1,2,?,1]"));
EXPECT_EQ(
"Output dim 0,2 expected to be 8 but was ?. Output shape was [1,2,?,1]",
RunInferShapes(op, ins, "[d0_1,2,8,d0_0|d2_0]", fn));
EXPECT_CONTAINS(RunInferShapes(op, ins, "[d0_1,2,d0_1|d2_0,d0_0|d2_0]", fn),
"expected to match");
EXPECT_EQ("",
RunInferShapes(op, ins, "[d0_1,2,?,d0_0|d2_0]", fn));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/shape_inference_testutil.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/shape_inference_testutil_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8136dfc5-5900-48d6-82d2-d733ec8bfd59 | cpp | tensorflow/tensorflow | relu | tensorflow/lite/delegates/gpu/gl/kernels/relu.cc | tensorflow/lite/delegates/xnnpack/relu_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/relu.h"
#include <algorithm>
#include <any>
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
class ReLU : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
const auto& attr = std::any_cast<const ReLUAttributes&>(ctx.op_attr);
std::vector<Variable> params;
std::string min;
if (attr.alpha == 0) {
min = "vec4($activation_min$)";
params.push_back({"activation_min", attr.activation_min});
} else {
min = "min($alpha$ * value_0, 0.0)";
params.push_back({"alpha", attr.alpha});
}
std::string code;
if (attr.activation_max == 0) {
code = "value_0 = max(value_0, " + min + ");";
} else {
code = "value_0 = clamp(value_0, " + min + ", vec4($activation_max$));";
params.push_back({"activation_max", attr.activation_max});
}
*generated_code = {
std::move(params),
{},
{},
uint3(),
uint3(),
std::move(code),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
};
}
std::unique_ptr<NodeShader> NewReLUNodeShader() {
return std::make_unique<ReLU>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Relu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
TEST(Relu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
TEST(Relu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
TEST(Relu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester().Shape({batch}).Test(BuiltinOperator_RELU,
xnnpack_delegate.get());
}
TEST(Relu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.Test(BuiltinOperator_RELU, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/relu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/relu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0e0bd9f-6772-4b67-a6fa-86f8287c9d5b | cpp | tensorflow/tensorflow | loss | tensorflow/core/kernels/loss.h | tensorflow/core/kernels/loss_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_LOSS_H_
#define TENSORFLOW_CORE_KERNELS_LOSS_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class DualLossUpdater {
public:
virtual ~DualLossUpdater() {}
virtual double ComputeUpdatedDual(
const int num_loss_partitions, const double label,
const double example_weight, const double current_dual, const double wx,
const double weighted_example_norm) const = 0;
virtual double ComputeDualLoss(const double current_dual,
const double example_label,
const double example_weight) const = 0;
virtual double ComputePrimalLoss(const double wx, const double example_label,
const double example_weight) const = 0;
virtual double PrimalLossDerivative(const double wx,
const double example_label,
const double example_weight) const = 0;
virtual double SmoothnessConstant() const = 0;
virtual Status ConvertLabel(float* const example_label) const = 0;
};
}
#endif | #include <limits>
#include "tensorflow/core/kernels/hinge-loss.h"
#include "tensorflow/core/kernels/logistic-loss.h"
#include "tensorflow/core/kernels/poisson-loss.h"
#include "tensorflow/core/kernels/smooth-hinge-loss.h"
#include "tensorflow/core/kernels/squared-loss.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
void TestComputeUpdatedDual(const DualLossUpdater &loss_updater,
const int num_loss_partitions, const double label,
const double example_weight,
const double current_dual, const double wx,
const double weighted_example_norm) {
double new_dual = loss_updater.ComputeUpdatedDual(
num_loss_partitions, label, example_weight, current_dual, wx,
weighted_example_norm);
double new_wx = wx + (new_dual - current_dual) * num_loss_partitions *
weighted_example_norm * example_weight;
EXPECT_NEAR(new_dual, -loss_updater.PrimalLossDerivative(new_wx, label, 1.0),
1e-5);
}
TEST(LogisticLoss, ComputePrimalLoss) {
LogisticLossUpdater loss_updater;
EXPECT_NEAR(0.693147,
loss_updater.ComputePrimalLoss(0 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(70 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(-70 , -1 ,
1 ),
1e-3);
}
TEST(LogisticLoss, ComputeDualLoss) {
LogisticLossUpdater loss_updater;
EXPECT_NEAR(0.0,
loss_updater.ComputeDualLoss(0 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputeDualLoss(1 , 1 ,
1 ),
1e-3);
EXPECT_NEAR(
-0.693147,
loss_updater.ComputeDualLoss(0.5 , 1 ,
1 ),
1e-3);
}
TEST(LogisticLoss, ComputeUpdatedDual) {
LogisticLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , 0.5 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 2 , -1.0 ,
1.0 , 0.1 ,
-0.8 , 10.0 );
}
TEST(SquaredLoss, ComputePrimalLoss) {
SquaredLossUpdater loss_updater;
EXPECT_NEAR(0.5,
loss_updater.ComputePrimalLoss(0.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(40.5,
loss_updater.ComputePrimalLoss(10.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.125,
loss_updater.ComputePrimalLoss(-0.5 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(4.84,
loss_updater.ComputePrimalLoss(1.2 , -1.0 ,
2.0 ),
1e-3);
}
TEST(SquaredLoss, ComputeDualLoss) {
SquaredLossUpdater loss_updater;
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
0.66,
loss_updater.ComputeDualLoss(0.2 , -1.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
-0.375,
loss_updater.ComputeDualLoss(1.5 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-1.125,
loss_updater.ComputeDualLoss(0.5 , 1.0 ,
3.0 ),
1e-3);
}
TEST(SquaredLoss, ComputeUpdatedDual) {
SquaredLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , 0.3 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 5 , -1.0 ,
1.0 , -0.4 ,
0.8 , 10.0 );
}
TEST(HingeLoss, ComputePrimalLoss) {
HingeLossUpdater loss_updater;
EXPECT_NEAR(1.0,
loss_updater.ComputePrimalLoss(0.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(10.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.5,
loss_updater.ComputePrimalLoss(-0.5 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(4.4,
loss_updater.ComputePrimalLoss(1.2 , -1.0 ,
2.0 ),
1e-3);
}
TEST(HingeLoss, ComputeDualLoss) {
HingeLossUpdater loss_updater;
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(0.2 , -1.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(1.5 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-1.5,
loss_updater.ComputeDualLoss(0.5 , 1.0 ,
3.0 ),
1e-3);
}
TEST(HingeLoss, ConvertLabel) {
HingeLossUpdater loss_updater;
float example_label = 1.0;
Status status;
TF_EXPECT_OK(loss_updater.ConvertLabel(&example_label));
EXPECT_EQ(1.0, example_label);
example_label = 0.0;
TF_EXPECT_OK(loss_updater.ConvertLabel(&example_label));
EXPECT_EQ(-1.0, example_label);
example_label = 0.5;
status = loss_updater.ConvertLabel(&example_label);
EXPECT_FALSE(status.ok());
}
TEST(HingeLoss, ComputeUpdatedDual) {
HingeLossUpdater loss_updater;
EXPECT_NEAR(0.507,
loss_updater.ComputeUpdatedDual(
1 , 1.0 ,
1.0 , 0.5 ,
0.3 , 100.0 ),
1e-3);
EXPECT_NEAR(-0.416,
loss_updater.ComputeUpdatedDual(
10 , -1.0 ,
1.0 , -0.4 ,
0.6 , 10.0 ),
1e-3);
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , -0.5 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 1 , -1.0 ,
2.0 , -1.0 ,
0.3 , 10.0 );
}
TEST(SmoothHingeLoss, ComputePrimalLoss) {
SmoothHingeLossUpdater loss_updater;
EXPECT_NEAR(0.5,
loss_updater.ComputePrimalLoss(0.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.0,
loss_updater.ComputePrimalLoss(10.0 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(0.125,
loss_updater.ComputePrimalLoss(-0.5 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(3.4,
loss_updater.ComputePrimalLoss(1.2 , -1.0 ,
2.0 ),
1e-3);
}
TEST(SmoothHingeLoss, ComputeDualLoss) {
SmoothHingeLossUpdater loss_updater;
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , -1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(0.2 , -1.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(1.5 , 1.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-1.125,
loss_updater.ComputeDualLoss(0.5 , 1.0 ,
3.0 ),
1e-3);
}
TEST(SmoothHingeLoss, ComputeUpdatedDual) {
SmoothHingeLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 1.0 ,
1.0 , 0.3 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 5 , -1.0 ,
1.0 , -0.4 ,
0.8 , 10.0 );
}
TEST(PoissonLoss, ComputePrimalLoss) {
PoissonLossUpdater loss_updater;
EXPECT_NEAR(1.0,
loss_updater.ComputePrimalLoss(0.0 , 3.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(21996.0,
loss_updater.ComputePrimalLoss(10.0 , 3.0 ,
1.0 ),
1.0);
EXPECT_NEAR(0.606,
loss_updater.ComputePrimalLoss(-0.5 , 0.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(6.64,
loss_updater.ComputePrimalLoss(1.2 , 0.0 ,
2.0 ),
1e-2);
}
TEST(PoissonLoss, ComputeDualLoss) {
PoissonLossUpdater loss_updater;
EXPECT_NEAR(
std::numeric_limits<double>::max(),
loss_updater.ComputeDualLoss(1.0 , 0.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
0.0,
loss_updater.ComputeDualLoss(0.0 , 0.0 ,
3.0 ),
1e-3);
EXPECT_NEAR(
-0.847,
loss_updater.ComputeDualLoss(1.5 , 2.0 ,
1.0 ),
1e-3);
EXPECT_NEAR(
-2.675,
loss_updater.ComputeDualLoss(0.5 , 2.0 ,
3.0 ),
1e-3);
}
TEST(PoissonLoss, ConvertLabel) {
PoissonLossUpdater loss_updater;
float example_label = -1.0;
Status status = loss_updater.ConvertLabel(&example_label);
EXPECT_FALSE(status.ok());
}
TEST(PoissonLoss, ComputeUpdatedDual) {
PoissonLossUpdater loss_updater;
TestComputeUpdatedDual(loss_updater, 1 , 2.0 ,
1.0 , 0.5 ,
0.3 , 10.0 );
TestComputeUpdatedDual(loss_updater, 2 , 0.0 ,
1.0 , 0.0 ,
-0.8 , 10.0 );
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/loss.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/loss_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e44e4956-5339-4165-8837-5ff0460ea0e3 | cpp | tensorflow/tensorflow | example_proto_fast_parsing | tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.cc | tensorflow/core/util/example_proto_fast_parsing_test.cc | #include "tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.h"
#include <algorithm>
#include <utility>
namespace tensorflow {
namespace example {
string ExampleName(const absl::Span<const tstring> example_names, int n) {
return example_names.empty() ? "<unknown>" : example_names[n];
}
void CountSparseFeatures(
const std::vector<std::vector<SparseBuffer>>& sparse_buffers, size_t d,
size_t* total_num_features, size_t* max_num_features) {
for (auto& sparse_values_tmp : sparse_buffers) {
const std::vector<size_t>& end_indices =
sparse_values_tmp[d].example_end_indices;
*total_num_features += end_indices.back();
*max_num_features = std::max(*max_num_features, end_indices[0]);
for (size_t i = 1; i < end_indices.size(); ++i) {
size_t example_size = end_indices[i] - end_indices[i - 1];
*max_num_features = std::max(*max_num_features, example_size);
}
}
}
void CopySparseBufferToTensor(DataType dtype, size_t offset, SparseBuffer* src,
Tensor* dst) {
switch (dtype) {
case DT_INT64: {
std::copy(src->int64_list.begin(), src->int64_list.end(),
dst->flat<int64_t>().data() + offset);
break;
}
case DT_FLOAT: {
std::copy(src->float_list.begin(), src->float_list.end(),
dst->flat<float>().data() + offset);
break;
}
case DT_STRING: {
std::move(src->bytes_list.begin(), src->bytes_list.end(),
dst->flat<tstring>().data() + offset);
break;
}
default:
ReportUnexpectedDataType(dtype);
}
}
uint8 PeekTag(protobuf::io::CodedInputStream* stream) {
DCHECK(stream != nullptr);
const void* ptr;
int size;
if (!stream->GetDirectBufferPointer(&ptr, &size)) return 0;
return *static_cast<const uint8*>(ptr);
}
bool ParseString(protobuf::io::CodedInputStream* stream, StringPiece* result) {
DCHECK(stream != nullptr);
DCHECK(result != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
if (length == 0) {
*result = StringPiece(nullptr, 0);
return true;
}
const void* stream_alias;
int stream_size;
if (!stream->GetDirectBufferPointer(&stream_alias, &stream_size)) {
return false;
}
if (static_cast<uint32>(stream_size) < length) return false;
*result = StringPiece(static_cast<const char*>(stream_alias), length);
stream->Skip(length);
return true;
}
bool ParseFeatureMapEntry(protobuf::io::CodedInputStream* stream,
parsed::FeatureMapEntry* feature_map_entry) {
DCHECK(stream != nullptr);
DCHECK(feature_map_entry != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
auto limit = stream->PushLimit(length);
if (!stream->ExpectTag(kDelimitedTag(1))) return false;
if (!ParseString(stream, &feature_map_entry->first)) return false;
if (!stream->ExpectTag(kDelimitedTag(2))) return false;
StringPiece feature_string_piece;
if (!ParseString(stream, &feature_string_piece)) return false;
feature_map_entry->second = parsed::Feature(feature_string_piece);
if (!stream->ExpectAtEnd()) return false;
stream->PopLimit(limit);
return true;
}
bool ParseFeatures(protobuf::io::CodedInputStream* stream,
parsed::Example* example) {
DCHECK(stream != nullptr);
DCHECK(example != nullptr);
uint32 length;
if (!stream->ReadVarint32(&length)) return false;
auto limit = stream->PushLimit(length);
while (!stream->ExpectAtEnd()) {
parsed::FeatureMapEntry feature_map_entry;
if (!stream->ExpectTag(kDelimitedTag(1))) return false;
if (!ParseFeatureMapEntry(stream, &feature_map_entry)) return false;
example->push_back(std::move(feature_map_entry));
}
stream->PopLimit(limit);
return true;
}
bool ParseExample(protobuf::io::CodedInputStream* stream,
parsed::Example* example) {
DCHECK(stream != nullptr);
DCHECK(example != nullptr);
while (!stream->ExpectAtEnd()) {
if (!stream->ExpectTag(kDelimitedTag(1))) {
if (!SkipExtraneousTag(stream)) return false;
} else {
if (!ParseFeatures(stream, example)) return false;
}
}
return true;
}
bool ParseExample(StringPiece serialized, parsed::Example* example) {
DCHECK(example != nullptr);
protobuf::io::CodedInputStream stream(
reinterpret_cast<const uint8*>(serialized.data()), serialized.size());
EnableAliasing(&stream);
return ParseExample(&stream, example);
}
template <>
void CopyOrMoveBlock(const tstring* b, const tstring* e, tstring* t) {
std::move(b, e, t);
}
template <>
const SmallVector<int64_t>& GetListFromBuffer<int64_t>(
const SparseBuffer& buffer) {
return buffer.int64_list;
}
template <>
const SmallVector<float>& GetListFromBuffer<float>(const SparseBuffer& buffer) {
return buffer.float_list;
}
template <>
const SmallVector<tstring>& GetListFromBuffer<tstring>(
const SparseBuffer& buffer) {
return buffer.bytes_list;
}
}
} | #include "tensorflow/core/util/example_proto_fast_parsing.h"
#include <unordered_set>
#include <utility>
#include <vector>
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/lib/random/philox_random.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/util/example_proto_fast_parsing_test.pb.h"
namespace tensorflow {
namespace example {
namespace {
constexpr char kDenseInt64Key[] = "dense_int64";
constexpr char kDenseFloatKey[] = "dense_float";
constexpr char kDenseStringKey[] = "dense_string";
constexpr char kSparseInt64Key[] = "sparse_int64";
constexpr char kSparseFloatKey[] = "sparse_float";
constexpr char kSparseStringKey[] = "sparse_string";
string SerializedToReadable(string serialized) {
string result;
result += '"';
for (char c : serialized)
result += strings::StrCat("\\x", strings::Hex(c, strings::kZeroPad2));
result += '"';
return result;
}
template <class T>
string Serialize(const T& example) {
string serialized;
example.SerializeToString(&serialized);
return serialized;
}
void TestCorrectness(const string& serialized) {
Example example;
Example fast_example;
EXPECT_TRUE(example.ParseFromString(serialized));
example.DiscardUnknownFields();
EXPECT_TRUE(TestFastParse(serialized, &fast_example));
EXPECT_EQ(example.DebugString(), fast_example.DebugString());
if (example.DebugString() != fast_example.DebugString()) {
LOG(ERROR) << "Bad serialized: " << SerializedToReadable(serialized);
}
}
TEST(FastParse, IgnoresPrecedingUnknownTopLevelFields) {
ExampleWithExtras example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
example.set_extra1("some_str");
example.set_extra2(123);
example.set_extra3(234);
example.set_extra4(345);
example.set_extra5(4.56);
example.add_extra6(5.67);
example.add_extra6(6.78);
(*example.mutable_extra7()->mutable_feature())["extra7"]
.mutable_int64_list()
->add_value(1337);
Example context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, IgnoresTrailingUnknownTopLevelFields) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
ExampleWithExtras context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
context.set_extra1("some_str");
context.set_extra2(123);
context.set_extra3(234);
context.set_extra4(345);
context.set_extra5(4.56);
context.add_extra6(5.67);
context.add_extra6(6.78);
(*context.mutable_extra7()->mutable_feature())["extra7"]
.mutable_int64_list()
->add_value(1337);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, SingleInt64WithContext) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
Example context;
(*context.mutable_features()->mutable_feature())["zipcode"]
.mutable_int64_list()
->add_value(94043);
TestCorrectness(strings::StrCat(Serialize(example), Serialize(context)));
}
TEST(FastParse, DenseInt64WithContext) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(0);
Example context;
(*context.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(15);
string serialized = Serialize(example) + Serialize(context);
{
Example deserialized;
EXPECT_TRUE(deserialized.ParseFromString(serialized));
EXPECT_EQ(deserialized.DebugString(), context.DebugString());
}
TestCorrectness(serialized);
}
TEST(FastParse, NonPacked) {
TestCorrectness(
"\x0a\x0e\x0a\x0c\x0a\x03\x61\x67\x65\x12\x05\x1a\x03\x0a\x01\x0d");
}
TEST(FastParse, Packed) {
TestCorrectness(
"\x0a\x0d\x0a\x0b\x0a\x03\x61\x67\x65\x12\x04\x1a\x02\x08\x0d");
}
TEST(FastParse, ValueBeforeKeyInMap) {
TestCorrectness("\x0a\x12\x0a\x10\x12\x09\x0a\x07\x0a\x05value\x0a\x03key");
}
TEST(FastParse, EmptyFeatures) {
Example example;
example.mutable_features();
TestCorrectness(Serialize(example));
}
void TestCorrectnessJson(const string& json) {
auto resolver = protobuf::util::NewTypeResolverForDescriptorPool(
"type.googleapis.com", protobuf::DescriptorPool::generated_pool());
string serialized;
auto s = protobuf::util::JsonToBinaryString(
resolver, "type.googleapis.com/tensorflow.Example", json, &serialized);
EXPECT_TRUE(s.ok()) << s;
delete resolver;
TestCorrectness(serialized);
}
TEST(FastParse, JsonUnivalent) {
TestCorrectnessJson(
"{'features': {"
" 'feature': {'age': {'int64_list': {'value': [0]} }}, "
" 'feature': {'flo': {'float_list': {'value': [1.1]} }}, "
" 'feature': {'byt': {'bytes_list': {'value': ['WW8='] }}}"
"}}");
}
TEST(FastParse, JsonMultivalent) {
TestCorrectnessJson(
"{'features': {"
" 'feature': {'age': {'int64_list': {'value': [0, 13, 23]} }}, "
" 'feature': {'flo': {'float_list': {'value': [1.1, 1.2, 1.3]} }}, "
" 'feature': {'byt': {'bytes_list': {'value': ['WW8=', 'WW8K'] }}}"
"}}");
}
TEST(FastParse, SingleInt64) {
Example example;
(*example.mutable_features()->mutable_feature())["age"]
.mutable_int64_list()
->add_value(13);
TestCorrectness(Serialize(example));
}
static string ExampleWithSomeFeatures() {
Example example;
(*example.mutable_features()->mutable_feature())[""];
(*example.mutable_features()->mutable_feature())["empty_bytes_list"]
.mutable_bytes_list();
(*example.mutable_features()->mutable_feature())["empty_float_list"]
.mutable_float_list();
(*example.mutable_features()->mutable_feature())["empty_int64_list"]
.mutable_int64_list();
BytesList* bytes_list =
(*example.mutable_features()->mutable_feature())["bytes_list"]
.mutable_bytes_list();
bytes_list->add_value("bytes1");
bytes_list->add_value("bytes2");
FloatList* float_list =
(*example.mutable_features()->mutable_feature())["float_list"]
.mutable_float_list();
float_list->add_value(1.0);
float_list->add_value(2.0);
Int64List* int64_list =
(*example.mutable_features()->mutable_feature())["int64_list"]
.mutable_int64_list();
int64_list->add_value(3);
int64_list->add_value(270);
int64_list->add_value(86942);
return Serialize(example);
}
TEST(FastParse, SomeFeatures) { TestCorrectness(ExampleWithSomeFeatures()); }
static void AddDenseFeature(const char* feature_name, DataType dtype,
PartialTensorShape shape, bool variable_length,
size_t elements_per_stride,
FastParseExampleConfig* out_config) {
out_config->dense.emplace_back();
auto& new_feature = out_config->dense.back();
new_feature.feature_name = feature_name;
new_feature.dtype = dtype;
new_feature.shape = std::move(shape);
new_feature.default_value = Tensor(dtype, {});
new_feature.variable_length = variable_length;
new_feature.elements_per_stride = elements_per_stride;
}
static void AddSparseFeature(const char* feature_name, DataType dtype,
FastParseExampleConfig* out_config) {
out_config->sparse.emplace_back();
auto& new_feature = out_config->sparse.back();
new_feature.feature_name = feature_name;
new_feature.dtype = dtype;
}
TEST(FastParse, StatsCollection) {
const size_t kNumExamples = 13;
std::vector<tstring> serialized(kNumExamples, ExampleWithSomeFeatures());
FastParseExampleConfig config_dense;
AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_dense);
AddDenseFeature("float_list", DT_FLOAT, {2}, false, 2, &config_dense);
AddDenseFeature("int64_list", DT_INT64, {3}, false, 3, &config_dense);
config_dense.collect_feature_stats = true;
FastParseExampleConfig config_varlen;
AddDenseFeature("bytes_list", DT_STRING, {-1}, true, 1, &config_varlen);
AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_varlen);
AddDenseFeature("int64_list", DT_INT64, {-1}, true, 1, &config_varlen);
config_varlen.collect_feature_stats = true;
FastParseExampleConfig config_sparse;
AddSparseFeature("bytes_list", DT_STRING, &config_sparse);
AddSparseFeature("float_list", DT_FLOAT, &config_sparse);
AddSparseFeature("int64_list", DT_INT64, &config_sparse);
config_sparse.collect_feature_stats = true;
FastParseExampleConfig config_mixed;
AddDenseFeature("bytes_list", DT_STRING, {2}, false, 2, &config_mixed);
AddDenseFeature("float_list", DT_FLOAT, {-1}, true, 1, &config_mixed);
AddSparseFeature("int64_list", DT_INT64, &config_mixed);
config_mixed.collect_feature_stats = true;
for (const FastParseExampleConfig& config :
{config_dense, config_varlen, config_sparse, config_mixed}) {
{
Result result;
TF_CHECK_OK(FastParseExample(config, serialized, {}, nullptr, &result));
EXPECT_EQ(kNumExamples, result.feature_stats.size());
for (const PerExampleFeatureStats& stats : result.feature_stats) {
EXPECT_EQ(7, stats.features_count);
EXPECT_EQ(7, stats.feature_values_count);
}
}
{
Result result;
TF_CHECK_OK(FastParseSingleExample(config, serialized[0], &result));
EXPECT_EQ(1, result.feature_stats.size());
EXPECT_EQ(7, result.feature_stats[0].features_count);
EXPECT_EQ(7, result.feature_stats[0].feature_values_count);
}
}
}
string RandStr(random::SimplePhilox* rng) {
static const char key_char_lookup[] =
"0123456789{}~`!@#$%^&*()"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
auto len = 1 + rng->Rand32() % 200;
string str;
str.reserve(len);
while (len-- > 0) {
str.push_back(
key_char_lookup[rng->Rand32() % (sizeof(key_char_lookup) /
sizeof(key_char_lookup[0]))]);
}
return str;
}
void Fuzz(random::SimplePhilox* rng) {
auto num_keys = 1 + rng->Rand32() % 100;
std::unordered_set<string> unique_keys;
for (auto i = 0; i < num_keys; ++i) {
unique_keys.emplace(RandStr(rng));
}
Example example;
string serialized_example;
auto num_concats = 1 + rng->Rand32() % 4;
std::vector<Feature::KindCase> feat_types(
{Feature::kBytesList, Feature::kFloatList, Feature::kInt64List});
std::vector<string> all_keys(unique_keys.begin(), unique_keys.end());
while (num_concats--) {
example.Clear();
auto num_active_keys = 1 + rng->Rand32() % all_keys.size();
for (auto i = 0; i < num_active_keys; ++i) {
auto fkey = all_keys[rng->Rand32() % all_keys.size()];
auto ftype_idx = rng->Rand32() % feat_types.size();
auto num_features = 1 + rng->Rand32() % 5;
switch (static_cast<Feature::KindCase>(feat_types[ftype_idx])) {
case Feature::kBytesList: {
BytesList* bytes_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_bytes_list();
while (num_features--) {
bytes_list->add_value(RandStr(rng));
}
break;
}
case Feature::kFloatList: {
FloatList* float_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_float_list();
while (num_features--) {
float_list->add_value(rng->RandFloat());
}
break;
}
case Feature::kInt64List: {
Int64List* int64_list =
(*example.mutable_features()->mutable_feature())[fkey]
.mutable_int64_list();
while (num_features--) {
int64_list->add_value(rng->Rand64());
}
break;
}
default: {
LOG(QFATAL);
break;
}
}
}
serialized_example += example.SerializeAsString();
}
TestCorrectness(serialized_example);
}
TEST(FastParse, FuzzTest) {
const uint64 seed = 1337;
random::PhiloxRandom philox(seed);
random::SimplePhilox rng(&philox);
auto num_runs = 200;
while (num_runs--) {
LOG(INFO) << "runs left: " << num_runs;
Fuzz(&rng);
}
}
TEST(TestFastParseExample, Empty) {
Result result;
FastParseExampleConfig config;
config.sparse.push_back({"test", DT_STRING});
Status status =
FastParseExample(config, absl::Span<const tstring>(),
absl::Span<const tstring>(), nullptr, &result);
EXPECT_TRUE(status.ok()) << status;
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/parse_example/example_proto_fast_parsing.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/example_proto_fast_parsing_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3afdaf8b-caec-4d41-8aa6-2983d81f5758 | cpp | tensorflow/tensorflow | coordination_service_barrier_proxy | tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.cc | tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy_test.cc | #include "tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/lib/traceme_encode.h"
namespace tensorflow {
std::pair<Status, bool> BarrierProxy::Wait() {
mutex_lock l(mu_);
if (status_set_) {
return std::make_pair(
absl::FailedPreconditionError(absl::StrCat(
"The barrier has already passed or timed out. key=", key_)),
false);
}
if (num_entered_ >= num_local_threads_) {
return std::make_pair(absl::FailedPreconditionError(absl::StrCat(
"Wait() called too many (>", num_local_threads_,
") times. key=", key_)),
false);
}
++num_entered_;
++num_to_exit_;
VLOG(1) << "BarrierProxy " << key_ << " enter: num_entered_=" << num_entered_
<< ", num_to_exit_=" << num_to_exit_;
if (num_entered_ == num_local_threads_) {
if (tasks_.size() != 1) {
tsl::profiler::TraceMe traceme("BarrierProxy::Wait::WaitAtBarrier");
status_ = agent_->WaitAtBarrier(key_, timeout_, tasks_);
} else {
status_ = absl::OkStatus();
}
status_set_ = true;
cv_.notify_all();
} else if (WaitForMilliseconds(&l, &cv_, timeout_ / absl::Milliseconds(1)) ==
kCond_Timeout) {
if (!status_set_) {
if (tasks_.size() != 1) {
agent_->CancelBarrier(key_).IgnoreError();
}
status_ = absl::DeadlineExceededError(
absl::StrCat("BarrierProxy timeout: key=", key_));
status_set_ = true;
cv_.notify_all();
}
} else {
CHECK(status_set_);
}
--num_to_exit_;
VLOG(1) << "BarrierProxy " << key_ << " enter: num_entered_=" << num_entered_
<< ", num_to_exit=" << num_to_exit_;
return std::make_pair(status_, num_to_exit_ == 0);
}
size_t BarrierProxyManager::size() const {
mutex_lock l(mu_);
return barriers_.size();
}
Status BarrierProxyManager::Wait(tsl::CoordinationServiceAgent* agent,
const std::vector<CoordinatedTask>& tasks,
int num_local_threads, absl::string_view key,
absl::Duration timeout) {
if (tasks.size() == 1 && num_local_threads <= 1) return absl::OkStatus();
tsl::profiler::TraceMe traceme([&] {
return tsl::profiler::TraceMeEncode(
"BarrierProxyManager::Wait",
{
{"num_tasks", tasks.size()},
{"num_local_threads", num_local_threads},
});
});
std::shared_ptr<BarrierProxy> barrier;
{
mutex_lock l(mu_);
auto [iter, inserted] = barriers_.try_emplace(key);
if (inserted) {
iter->second = std::make_shared<BarrierProxy>(
agent, tasks, num_local_threads, key, timeout);
VLOG(1) << "BarrierProxy key=" << key << " created.";
}
barrier = iter->second;
}
CHECK(barrier);
auto [status, last_exit] = barrier->Wait();
if (last_exit) {
mutex_lock l(mu_);
barriers_.erase(key);
VLOG(1) << "BarrierProxy key=" << key << " removed.";
}
return status;
}
} | #include "tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.h"
#include <atomic>
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "xla/tsl/distributed_runtime/call_options.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_client.h"
#include "xla/tsl/distributed_runtime/coordination/coordination_service_agent.h"
#include "xla/tsl/protobuf/coordination_config.pb.h"
#include "xla/tsl/protobuf/coordination_service.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/threadpool.h"
namespace tensorflow {
namespace {
using ::testing::_;
using ::testing::Return;
using tsl::CallOptions;
using tsl::CoordinationClient;
using tsl::CoordinationServiceAgent;
class MockCoordinationServiceAgent : public CoordinationServiceAgent {
public:
MOCK_METHOD(Status, WaitAtBarrier,
(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks),
(override));
MOCK_METHOD(Status, CancelBarrier, (std::string_view barrier_id), (override));
MOCK_METHOD(Status, Initialize,
(Env * env, std::string_view job_name, int task_id,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn),
(override));
MOCK_METHOD(Status, Initialize,
(Env * env, const CoordinatedTask& task,
const CoordinationServiceConfig& configs,
std::unique_ptr<CoordinationClient> leader_client,
StatusCallback error_fn),
(override));
MOCK_METHOD(bool, IsInitialized, (), (override));
MOCK_METHOD(bool, IsConnected, (), (override));
MOCK_METHOD(bool, IsError, (), (override));
MOCK_METHOD(Status, Connect, (), (override));
MOCK_METHOD(Status, WaitForAllTasks, (const DeviceInfo& local_devices),
(override));
MOCK_METHOD(const DeviceInfo&, GetClusterDeviceInfo, (), (override));
MOCK_METHOD(absl::StatusOr<CoordinatedTask>, GetOwnTask, (), (override));
MOCK_METHOD(absl::StatusOr<std::vector<CoordinatedTaskStateInfo>>,
GetTaskState, (const std::vector<CoordinatedTask>& task),
(override));
MOCK_METHOD(Status, ReportError, (const Status& error), (override));
MOCK_METHOD(Status, Shutdown, (), (override));
MOCK_METHOD(Status, Reset, (), (override));
MOCK_METHOD(absl::StatusOr<std::string>, GetKeyValue, (std::string_view key),
(override));
MOCK_METHOD(absl::StatusOr<std::string>, GetKeyValue,
(std::string_view key, absl::Duration timeout), (override));
MOCK_METHOD(std::shared_ptr<CallOptions>, GetKeyValueAsync,
(std::string_view key, StatusOrValueCallback done), (override));
MOCK_METHOD(absl::StatusOr<std::string>, TryGetKeyValue,
(std::string_view key), (override));
MOCK_METHOD(absl::StatusOr<std::vector<KeyValueEntry>>, GetKeyValueDir,
(std::string_view key), (override));
MOCK_METHOD(void, GetKeyValueDirAsync,
(std::string_view key, StatusOrValueDirCallback done),
(override));
MOCK_METHOD(Status, InsertKeyValue,
(std::string_view key, std::string_view value), (override));
MOCK_METHOD(Status, InsertKeyValue,
(std::string_view key, std::string_view value,
bool allow_overwrite),
(override));
MOCK_METHOD(Status, DeleteKeyValue, (std::string_view key), (override));
MOCK_METHOD(Status, UpdateKeyValue,
(std::string_view key, std::string_view value), (override));
MOCK_METHOD(Status, StartWatchKey,
(std::string_view key, ChangedKeyValuesCallback on_change),
(override));
MOCK_METHOD(Status, StopWatchKey, (std::string_view key), (override));
MOCK_METHOD(void, WaitAtBarrierAsync,
(std::string_view barrier_id, absl::Duration timeout,
const std::vector<CoordinatedTask>& tasks, StatusCallback done),
(override));
MOCK_METHOD(void, CancelBarrierAsync,
(std::string_view barrier_id, StatusCallback done), (override));
MOCK_METHOD(absl::StatusOr<Env*>, GetEnv, (), (override));
MOCK_METHOD(void, SetError, (const Status& error), (override));
MOCK_METHOD(Status, ActivateWatch,
(std::string_view key,
(const std::map<std::string, std::string>&)),
(override));
};
constexpr auto kTestKey = "test_key";
constexpr auto kTestTimeout = absl::Seconds(1);
const int kThreadPoolSize = 32;
void TestBarrierProxyWait(
int num_tasks, int num_threads_planned, int num_threads_entered,
int expected_ok_count, std::optional<Status> agent_wait_status,
std::optional<Status> expected_same_exit_status_for_all_threads) {
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks(num_tasks);
BarrierProxy barrier(agent.get(), tasks, num_threads_planned, kTestKey,
kTestTimeout);
std::atomic<int> last_exit_count = 0;
std::atomic<int> actual_ok_count = 0;
if (agent_wait_status.has_value()) {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _))
.WillOnce(Return(agent_wait_status.value()));
} else {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _)).Times(0);
}
{
thread::ThreadPool pool(Env::Default(), "TestPool",
kThreadPoolSize);
for (int i = 0; i < num_threads_entered; ++i) {
pool.Schedule([&]() {
auto [status, last_exit] = barrier.Wait();
if (expected_same_exit_status_for_all_threads.has_value()) {
ASSERT_EQ(status, expected_same_exit_status_for_all_threads.value());
}
actual_ok_count += status.ok();
last_exit_count += last_exit;
});
}
}
ASSERT_EQ(actual_ok_count, expected_ok_count);
ASSERT_EQ(last_exit_count, 1);
}
TEST(BarrierProxyTest, AllThreadsExitBarrier) {
TestBarrierProxyWait(
2,
8,
8,
8,
absl::OkStatus(),
absl::OkStatus());
}
TEST(BarrierProxyTest, AgentErrorBroadcastedToAllThreads) {
TestBarrierProxyWait(
2,
8,
8,
0,
errors::Internal(""),
errors::Internal(""));
}
TEST(BarrierProxyTest, AgentIsIgnoredIfThereIsOnlyOneTask) {
TestBarrierProxyWait(
1,
8,
8,
8,
{},
absl::OkStatus());
}
TEST(BarrierProxyTest, TimeoutIfNotEnoughThreadEntered) {
TestBarrierProxyWait(
2,
8,
7,
0,
{},
errors::DeadlineExceeded("BarrierProxy timeout: key=", kTestKey));
}
TEST(BarrierProxyTest, ExtraThreadsEnteringTheBarrierGetErrors) {
TestBarrierProxyWait(
2,
8,
10,
8,
absl::OkStatus(),
{});
}
void TestBarrierProxyManagerWaitSingleKey(
int num_threads_planned, int num_threads_entered,
std::optional<Status> agent_wait_status, int expected_ok_count) {
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks;
BarrierProxyManager mgr;
std::atomic<int> actual_ok_count = 0;
if (agent_wait_status.has_value()) {
EXPECT_CALL(*agent, WaitAtBarrier(kTestKey, kTestTimeout, _))
.WillOnce(Return(agent_wait_status.value()));
}
{
thread::ThreadPool pool(Env::Default(), "TestPool",
num_threads_planned);
for (int i = 0; i < num_threads_entered; ++i) {
pool.Schedule([&]() {
actual_ok_count += mgr.Wait(agent.get(), tasks, num_threads_planned,
kTestKey, kTestTimeout)
.ok();
});
}
}
ASSERT_EQ(actual_ok_count, expected_ok_count);
ASSERT_EQ(mgr.size(), 0);
}
TEST(BarrierProxyManagerTest, AllThreadExited) {
TestBarrierProxyManagerWaitSingleKey(
8,
8,
absl::OkStatus(),
8);
}
TEST(BarrierProxyManagerTest, AllThreadTimedOut) {
TestBarrierProxyManagerWaitSingleKey(
8,
7,
{},
0);
}
TEST(BarrierProxyManagerTest, CoordinationServiceError) {
TestBarrierProxyManagerWaitSingleKey(
8,
8,
errors::Internal(""),
0);
}
TEST(BarrierProxyManagerTest, ExtraThreadsEnteringTheSameKeyGetErrors) {
TestBarrierProxyManagerWaitSingleKey(
8,
10,
absl::OkStatus(),
8);
}
TEST(BarrierProxyManagerTest, DifferentKeysDoNotInterfereWithEachOther) {
constexpr int kNumThreads = 8;
auto agent = std::make_unique<MockCoordinationServiceAgent>();
const std::vector<CoordinatedTask> tasks;
BarrierProxyManager mgr;
EXPECT_CALL(*agent, WaitAtBarrier("key0", kTestTimeout, _))
.WillOnce(Return(absl::OkStatus()));
EXPECT_CALL(*agent, WaitAtBarrier("key1", kTestTimeout, _))
.WillOnce(Return(absl::OkStatus()));
{
thread::ThreadPool pool(Env::Default(), "TestPool",
kThreadPoolSize);
for (int i = 0; i < kNumThreads * 2; ++i) {
pool.Schedule([&, key = absl::StrCat("key", i % 2)]() {
ASSERT_EQ(mgr.Wait(agent.get(), tasks, kNumThreads, key, kTestTimeout),
absl::OkStatus());
});
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/coordination/coordination_service_barrier_proxy_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9f1540d2-c47f-4247-95d8-8101f4e8582d | cpp | google/arolla | string | arolla/util/string.cc | arolla/util/string_test.cc | #include "arolla/util/string.h"
#include <cstddef>
#include <string>
#include "absl/log/check.h"
namespace arolla {
std::string Truncate(std::string str, size_t max_length) {
DCHECK_GT(max_length, 3);
if (str.size() > max_length) {
str.resize(max_length);
str.replace(max_length - 3, 3, "...");
}
return str;
}
} | #include "arolla/util/string.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace arolla {
namespace {
using ::testing::Eq;
TEST(StringTest, Truncate) {
EXPECT_THAT(Truncate("", 7), Eq(""));
EXPECT_THAT(Truncate("fifty seven", 7), Eq("fift..."));
EXPECT_THAT(Truncate("fifty seven", 10), Eq("fifty s..."));
EXPECT_THAT(Truncate("fifty seven", 11), Eq("fifty seven"));
EXPECT_THAT(Truncate("fifty seven", 20), Eq("fifty seven"));
}
TEST(StringTest, IsQualifiedIdentifier) {
static_assert(IsQualifiedIdentifier("foo"));
static_assert(!IsQualifiedIdentifier(".bar"));
static_assert(!IsQualifiedIdentifier("0.bar"));
static_assert(!IsQualifiedIdentifier("9.bar"));
static_assert(!IsQualifiedIdentifier("-.bar"));
static_assert(IsQualifiedIdentifier("_.bar"));
static_assert(IsQualifiedIdentifier("A.bar"));
static_assert(IsQualifiedIdentifier("Z.bar"));
static_assert(IsQualifiedIdentifier("a.bar"));
static_assert(IsQualifiedIdentifier("z.bar"));
static_assert(IsQualifiedIdentifier("_0.bar"));
static_assert(IsQualifiedIdentifier("_9.bar"));
static_assert(!IsQualifiedIdentifier("_-.bar"));
static_assert(IsQualifiedIdentifier("__.bar"));
static_assert(IsQualifiedIdentifier("_A.bar"));
static_assert(IsQualifiedIdentifier("_Z.bar"));
static_assert(IsQualifiedIdentifier("_a.bar"));
static_assert(IsQualifiedIdentifier("_z.bar"));
static_assert(!IsQualifiedIdentifier("foo..bar"));
static_assert(!IsQualifiedIdentifier("foo.0.bar"));
static_assert(!IsQualifiedIdentifier("foo.9.bar"));
static_assert(!IsQualifiedIdentifier("foo.-.bar"));
static_assert(IsQualifiedIdentifier("foo._.bar"));
static_assert(IsQualifiedIdentifier("foo.A.bar"));
static_assert(IsQualifiedIdentifier("foo.Z.bar"));
static_assert(IsQualifiedIdentifier("foo.a.bar"));
static_assert(IsQualifiedIdentifier("foo.z.bar"));
static_assert(IsQualifiedIdentifier("foo._0.bar"));
static_assert(IsQualifiedIdentifier("foo._9.bar"));
static_assert(!IsQualifiedIdentifier("foo._-.bar"));
static_assert(IsQualifiedIdentifier("foo.__.bar"));
static_assert(IsQualifiedIdentifier("foo._A.bar"));
static_assert(IsQualifiedIdentifier("foo._Z.bar"));
static_assert(IsQualifiedIdentifier("foo._a.bar"));
static_assert(IsQualifiedIdentifier("foo._z.bar"));
static_assert(!IsQualifiedIdentifier("foo.bar."));
static_assert(IsQualifiedIdentifier("test.add"));
static_assert(IsQualifiedIdentifier("test.subtest.add"));
}
TEST(StringTest, NonFirstComma) {
bool first_call = true;
EXPECT_EQ(NonFirstComma(first_call), "");
EXPECT_FALSE(first_call);
EXPECT_EQ(NonFirstComma(first_call), ", ");
EXPECT_FALSE(first_call);
}
TEST(StringTest, ContainerAccessString) {
EXPECT_EQ(ContainerAccessString("bar"), ".bar");
EXPECT_EQ(ContainerAccessString("bar.baz"), "['bar.baz']");
EXPECT_EQ(ContainerAccessString(""), "['']");
}
TEST(StringTest, starts_with) {
constexpr bool compile_time_true = starts_with("", "");
EXPECT_TRUE(compile_time_true);
constexpr bool compile_time_false = starts_with("foo", "bar");
EXPECT_FALSE(compile_time_false);
EXPECT_TRUE(starts_with("", ""));
EXPECT_TRUE(starts_with("Hello, World!", "Hello"));
EXPECT_TRUE(starts_with("Hello, World!", "Hello, World!"));
EXPECT_FALSE(starts_with("Hello, World!", "Hello, World! "));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/string.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/string_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ae9adf8a-7c8c-424b-ab3f-557b514e76f7 | cpp | tensorflow/tensorflow | manip_grad | tensorflow/cc/gradients/manip_grad.cc | tensorflow/cc/gradients/manip_grad_test.cc | #include "tensorflow/cc/framework/grad_op_registry.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/ops/manip_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
namespace tensorflow {
namespace ops {
namespace {
Status RollGrad(const Scope& scope, const Operation& op,
const std::vector<Output>& grad_inputs,
std::vector<Output>* grad_outputs) {
auto shift = op.input(1);
auto axis = op.input(2);
auto grad_op = Roll(scope, grad_inputs[0], Neg(scope, shift), axis);
grad_outputs->push_back(grad_op);
grad_outputs->push_back(NoGradient());
grad_outputs->push_back(NoGradient());
return scope.status();
}
REGISTER_GRADIENT_OP("Roll", RollGrad);
}
}
} | #include "tensorflow/cc/framework/gradient_checker.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/manip_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
using ops::Placeholder;
using ops::Roll;
class ManipGradTest : public ::testing::Test {
protected:
ManipGradTest() : scope_(Scope::NewRootScope()) {}
void RunTest(const Output& x, const TensorShape& x_shape, const Output& y,
const TensorShape& y_shape) {
TF_ASSERT_OK(scope_.status());
float max_error;
TF_ASSERT_OK((ComputeGradientError<float, float, float>(
scope_, {x}, {x_shape}, {y}, {y_shape}, &max_error)));
EXPECT_LT(max_error, 1e-4);
}
Scope scope_;
};
TEST_F(ManipGradTest, RollGrad) {
TensorShape shape({5, 4, 3});
auto x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(shape));
auto y = Roll(scope_, x, {2, 1}, {0, 1});
RunTest(x, shape, y, shape);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/manip_grad.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/gradients/manip_grad_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c98c390d-dd6d-4bfc-9171-631eabaf0891 | cpp | tensorflow/tensorflow | op_converter | tensorflow/compiler/tf2tensorrt/convert/op_converter.h | tensorflow/compiler/tf2tensorrt/convert/op_converter_test.cc | #ifndef TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OP_CONVERTER_H_
#define TENSORFLOW_COMPILER_TF2TENSORRT_CONVERT_OP_CONVERTER_H_
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include <memory>
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/tf2tensorrt/convert/trt_parameters.h"
#include "tensorflow/compiler/tf2tensorrt/convert/weights.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
class Converter;
enum class TrtInputArg { kTensor = 1, kWeight = 2, kBoth = 3, kResource = 4 };
struct OpConverterParams {
OpConverterParams(const NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs,
TrtWeightStore* weight_store,
TrtPrecisionMode precision_mode, bool use_calibration,
bool use_implicit_batch, bool use_explicit_precision);
OpConverterParams(Converter* converter, const NodeDef& node_def,
const std::vector<TRT_TensorOrWeights>& inputs,
std::vector<TRT_TensorOrWeights>* outputs,
TrtWeightStore* weight_store);
Converter* converter = nullptr;
const NodeDef& node_def;
const std::vector<TRT_TensorOrWeights>& inputs;
std::vector<TRT_TensorOrWeights>* outputs;
const bool validation_only;
TrtWeightStore* weight_store;
const TrtPrecisionMode precision_mode;
const bool use_calibration;
const bool use_implicit_batch;
const bool use_explicit_precision;
};
using OpConverter = std::function<Status(const OpConverterParams*)>;
struct InputArgSpec {
absl::string_view name;
TrtInputArg allowed_roles;
static constexpr InputArgSpec Create(absl::string_view n, TrtInputArg role) {
return InputArgSpec{n, role};
}
};
template <typename T>
std::string convert_not_supported_dtype_msg(const T& allowed_types,
DataType tf_type,
const NodeDef& node) {
string allowed_types_string =
absl::StrJoin(allowed_types, ", ", [](string* out, const DataType& type) {
absl::StrAppendFormat(out, "%s", DataTypeString(type));
});
return absl::StrCat("Data type ", DataTypeString(tf_type),
" is not supported for ", node.op(), ", must be one of [",
allowed_types_string, "]");
}
std::string convert_not_supported_implicit(const std::string& pOpName,
const std::string& pNodeName,
const char* pOpType = NULL);
template <typename Impl>
class OpConverterBase {
public:
explicit OpConverterBase(const OpConverterParams* params,
const std::vector<DataType>& data_types =
{DataType::DT_FLOAT, DataType::DT_HALF})
: params_(params),
node_def_attrs_(params->node_def),
allowed_dtypes_(data_types) {}
static constexpr const char* NodeDefDataTypeAttributeName() { return "T"; }
Status ValidateNodeDefDataType() {
if (absl::string_view(Impl::NodeDefDataTypeAttributeName()).empty()) {
return OkStatus();
}
auto dtype = GetAttrValue<DataType>(Impl::NodeDefDataTypeAttributeName());
if (!dtype.ok()) {
return errors::InvalidArgument("Attribute with name ",
Impl::NodeDefDataTypeAttributeName(),
" not found.");
}
if (std::find(allowed_dtypes_.begin(), allowed_dtypes_.end(), *dtype) ==
allowed_dtypes_.end()) {
return errors::Unimplemented(convert_not_supported_dtype_msg(
allowed_dtypes_, *dtype, params_->node_def));
}
return OkStatus();
}
static constexpr bool HasFixNumberOfInputs() { return true; }
Status ValidateInputs() {
const NodeDef& node_def = params_->node_def;
const auto& inputs = params_->inputs;
if (Impl::HasFixNumberOfInputs()) {
TRT_ENSURE(inputs.size() == Impl::InputSpec().size());
} else {
TRT_ENSURE(inputs.size() <= Impl::InputSpec().size());
}
for (int i = 0; i < inputs.size(); i++) {
const InputArgSpec arg_spec = Impl::InputSpec()[i];
if (arg_spec.allowed_roles == TrtInputArg::kWeight &&
inputs.at(i).is_tensor()) {
return errors::Unimplemented("The input \"", arg_spec.name, "\" for ",
node_def.op(), " must be a constant, at ",
node_def.name());
}
if (arg_spec.allowed_roles == TrtInputArg::kTensor &&
inputs.at(i).is_weights()) {
return errors::Unimplemented("The input \"", arg_spec.name, "\" for ",
node_def.op(), " must be a tensor, at ",
node_def.name());
}
}
return OkStatus();
}
Status operator()() {
TF_RETURN_IF_ERROR(this->ValidateNodeDefDataType());
TF_RETURN_IF_ERROR(this->ValidateInputs());
TF_RETURN_IF_ERROR(reinterpret_cast<Impl*>(this)->Validate());
if (params_->validation_only) {
return OkStatus();
}
return reinterpret_cast<Impl*>(this)->Convert();
}
protected:
Status NotSupportedInImplicitBatch(const char* pOpType = nullptr) {
if (params_->use_implicit_batch) {
const auto& op = params_->node_def.op();
const auto& nodeName = params_->node_def.name();
const auto& error = convert_not_supported_implicit(op, nodeName, pOpType);
return errors::Unimplemented(error);
}
return OkStatus();
}
void AddOutput(const TRT_TensorOrWeights& out) {
params_->outputs->push_back(out);
}
template <typename T>
StatusOr<T> GetAttrValue(absl::string_view key) const {
T result;
TF_RETURN_IF_ERROR(GetNodeAttr(node_def_attrs_, key, &result));
return result;
}
const OpConverterParams* const params_;
const AttrSlice node_def_attrs_;
const std::vector<DataType> allowed_dtypes_;
};
template <typename T>
OpConverter MakeConverterFunction() {
return [](const OpConverterParams* params) -> Status {
T converter(params);
return converter();
};
}
}
}
}
#endif
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h"
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
using ::testing::HasSubstr;
class ExampleOpConverter : public OpConverterBase<ExampleOpConverter> {
public:
explicit ExampleOpConverter(const OpConverterParams* params)
: OpConverterBase<ExampleOpConverter>(params, {DataType::DT_FLOAT}) {}
static constexpr const char* NodeDefDataTypeAttributeName() {
return "data_type";
}
static constexpr std::array<InputArgSpec, 2> InputSpec() {
return std::array<InputArgSpec, 2>{
InputArgSpec::Create("input_tensor", TrtInputArg::kTensor),
InputArgSpec::Create("weight", TrtInputArg::kWeight)};
}
Status Validate() { return OkStatus(); }
Status Convert() {
AddOutput(TRT_TensorOrWeights(nvinfer1::DataType::kFLOAT,
nvinfer1::Dims{1, {1, 1, 1}}, 1));
return OkStatus();
}
};
TEST(TestOpConverterBase, TestOpConverterBase) {
GetOpConverterRegistry()->Register(
"FakeFunc", 1, MakeConverterFunction<ExampleOpConverter>());
NodeDef def;
def.set_op("FakeFunc");
auto converter = Converter::Create(TrtPrecisionMode::FP32, false,
Logger::GetLogger(), false, "test_engine");
EXPECT_THAT(converter, IsOk());
Status conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(conversion_status,
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("Attribute with name data_type not found")));
def.mutable_input()->Add("input1");
conversion_status = (*converter)
->AddInputTensor("input1", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims{4, {1, 1, 1, 1}}, 1);
EXPECT_THAT(conversion_status, IsOk());
AddNodeAttr("data_type", DT_FLOAT, &def);
conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(conversion_status, StatusIs(error::INTERNAL));
def.mutable_input()->Add("input2");
conversion_status = (*converter)
->AddInputTensor("input2", nvinfer1::DataType::kFLOAT,
nvinfer1::Dims{4, {1, 1, 1, 1}}, 1);
EXPECT_THAT(conversion_status, IsOk());
conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(
conversion_status,
StatusIs(error::UNIMPLEMENTED,
HasSubstr("input \"weight\" for FakeFunc must be a constant")));
(*converter)->TensorsMap().erase("input2");
(*converter)
->TensorsMap()
.insert(std::make_pair("input2", TRT_TensorOrWeights(TRT_ShapedWeights(
nvinfer1::DataType::kFLOAT))));
conversion_status = (*converter)->ConvertNode(def);
EXPECT_THAT(conversion_status, IsOk());
EXPECT_EQ((*converter)->TensorsMap().size(), 3U);
GetOpConverterRegistry()->Clear("FakeFunc");
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dcdef421-b677-448b-ba64-e6868f89a248 | cpp | abseil/abseil-cpp | stack_consumption | absl/debugging/internal/stack_consumption.cc | absl/debugging/internal/stack_consumption_test.cc | #include "absl/debugging/internal/stack_consumption.h"
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
#include <signal.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#include "absl/base/attributes.h"
#include "absl/base/internal/raw_logging.h"
#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
#if defined(__i386__) || defined(__x86_64__) || defined(__ppc__) || \
defined(__aarch64__) || defined(__riscv)
constexpr bool kStackGrowsDown = true;
#else
#error Need to define kStackGrowsDown
#endif
void EmptySignalHandler(int) {}
constexpr int kAlternateStackSize = 64 << 10;
constexpr int kSafetyMargin = 32;
constexpr char kAlternateStackFillValue = 0x55;
int GetStackConsumption(const void* const altstack) {
const char* begin;
int increment;
if (kStackGrowsDown) {
begin = reinterpret_cast<const char*>(altstack);
increment = 1;
} else {
begin = reinterpret_cast<const char*>(altstack) + kAlternateStackSize - 1;
increment = -1;
}
for (int usage_count = kAlternateStackSize; usage_count > 0; --usage_count) {
if (*begin != kAlternateStackFillValue) {
ABSL_RAW_CHECK(usage_count <= kAlternateStackSize - kSafetyMargin,
"Buffer has overflowed or is about to overflow");
return usage_count;
}
begin += increment;
}
ABSL_RAW_LOG(FATAL, "Unreachable code");
return -1;
}
}
int GetSignalHandlerStackConsumption(void (*signal_handler)(int)) {
void* altstack = mmap(nullptr, kAlternateStackSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ABSL_RAW_CHECK(altstack != MAP_FAILED, "mmap() failed");
stack_t sigstk;
memset(&sigstk, 0, sizeof(sigstk));
sigstk.ss_sp = altstack;
sigstk.ss_size = kAlternateStackSize;
sigstk.ss_flags = 0;
stack_t old_sigstk;
memset(&old_sigstk, 0, sizeof(old_sigstk));
ABSL_RAW_CHECK(sigaltstack(&sigstk, &old_sigstk) == 0,
"sigaltstack() failed");
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
struct sigaction old_sa1, old_sa2;
sigemptyset(&sa.sa_mask);
sa.sa_flags = SA_ONSTACK;
sa.sa_handler = EmptySignalHandler;
ABSL_RAW_CHECK(sigaction(SIGUSR1, &sa, &old_sa1) == 0, "sigaction() failed");
sa.sa_handler = signal_handler;
ABSL_RAW_CHECK(sigaction(SIGUSR2, &sa, &old_sa2) == 0, "sigaction() failed");
ABSL_RAW_CHECK(kill(getpid(), SIGUSR1) == 0, "kill() failed");
memset(altstack, kAlternateStackFillValue, kAlternateStackSize);
ABSL_RAW_CHECK(kill(getpid(), SIGUSR1) == 0, "kill() failed");
int base_stack_consumption = GetStackConsumption(altstack);
ABSL_RAW_CHECK(kill(getpid(), SIGUSR2) == 0, "kill() failed");
int signal_handler_stack_consumption = GetStackConsumption(altstack);
if (old_sigstk.ss_sp == nullptr && old_sigstk.ss_size == 0 &&
(old_sigstk.ss_flags & SS_DISABLE)) {
old_sigstk.ss_size = static_cast<size_t>(MINSIGSTKSZ);
}
ABSL_RAW_CHECK(sigaltstack(&old_sigstk, nullptr) == 0,
"sigaltstack() failed");
ABSL_RAW_CHECK(sigaction(SIGUSR1, &old_sa1, nullptr) == 0,
"sigaction() failed");
ABSL_RAW_CHECK(sigaction(SIGUSR2, &old_sa2, nullptr) == 0,
"sigaction() failed");
ABSL_RAW_CHECK(munmap(altstack, kAlternateStackSize) == 0, "munmap() failed");
if (signal_handler_stack_consumption != -1 && base_stack_consumption != -1) {
return signal_handler_stack_consumption - base_stack_consumption;
}
return -1;
}
}
ABSL_NAMESPACE_END
}
#else
#ifdef __APPLE__
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
extern const char kAvoidEmptyStackConsumptionLibraryWarning;
const char kAvoidEmptyStackConsumptionLibraryWarning = 0;
}
ABSL_NAMESPACE_END
}
#endif
#endif | #include "absl/debugging/internal/stack_consumption.h"
#ifdef ABSL_INTERNAL_HAVE_DEBUGGING_STACK_CONSUMPTION
#include <string.h>
#include "gtest/gtest.h"
#include "absl/log/log.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
static void SimpleSignalHandler(int signo) {
char buf[100];
memset(buf, 'a', sizeof(buf));
if (signo == 0) {
LOG(INFO) << static_cast<void*>(buf);
}
}
TEST(SignalHandlerStackConsumptionTest, MeasuresStackConsumption) {
EXPECT_GE(GetSignalHandlerStackConsumption(SimpleSignalHandler), 100);
}
}
}
ABSL_NAMESPACE_END
}
#endif | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/stack_consumption.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/internal/stack_consumption_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
6a2fe14c-ebdc-42e6-b9c5-437c37204503 | cpp | google/tensorstore | transform_rep | tensorstore/index_space/internal/transform_rep.cc | tensorstore/index_space/transform_rep_test.cc | #include "tensorstore/index_space/internal/transform_rep.h"
#include <memory>
#include <new>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/internal/dimension_labels.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_index_space {
namespace {
void FreeIndexArrayData(IndexArrayData* data) {
std::destroy_at(data);
std::free(data);
}
void CopyTrivialFields(TransformRep* source, TransformRep* dest) {
assert(dest->input_rank_capacity >= source->input_rank &&
dest->output_rank_capacity >= source->output_rank);
const DimensionIndex input_rank = dest->input_rank = source->input_rank;
dest->output_rank = source->output_rank;
std::copy_n(source->input_origin().begin(), input_rank,
dest->input_origin().begin());
std::copy_n(source->input_shape().begin(), input_rank,
dest->input_shape().begin());
dest->implicit_lower_bounds = source->implicit_lower_bounds;
dest->implicit_upper_bounds = source->implicit_upper_bounds;
}
}
void CopyInputLabels(TransformRep* source, TransformRep* dest, bool can_move) {
assert(dest->input_rank_capacity >= source->input_rank);
const DimensionIndex input_rank = source->input_rank;
if (can_move) {
std::copy_n(std::make_move_iterator(source->input_labels().begin()),
input_rank, dest->input_labels().begin());
} else {
std::copy_n(source->input_labels().begin(), input_rank,
dest->input_labels().begin());
}
}
void OutputIndexMap::SetConstant() {
if (method() == OutputIndexMethod::array) {
FreeIndexArrayData(&index_array_data());
}
value_ = 0;
}
void OutputIndexMap::SetSingleInputDimension(DimensionIndex input_dim) {
if (method() == OutputIndexMethod::array) {
FreeIndexArrayData(&index_array_data());
}
value_ = (input_dim << 1) | 1;
}
IndexArrayData& OutputIndexMap::SetArrayIndexing(DimensionIndex rank) {
IndexArrayData* data;
if (method() == OutputIndexMethod::array) {
data = &index_array_data();
if (data->rank_capacity >= rank) return *data;
SharedElementPointer<const Index> element_pointer =
std::move(data->element_pointer);
auto bounds = data->index_range;
std::destroy_at(data);
IndexArrayData* new_data = static_cast<IndexArrayData*>(
std::realloc(static_cast<void*>(data),
sizeof(IndexArrayData) + sizeof(Index) * rank));
if (new_data) data = new_data;
new (data) IndexArrayData;
data->element_pointer = std::move(element_pointer);
data->index_range = bounds;
if (!new_data) TENSORSTORE_THROW_BAD_ALLOC;
data->rank_capacity = rank;
} else {
data = static_cast<IndexArrayData*>(
std::malloc(sizeof(IndexArrayData) + sizeof(Index) * rank));
if (!data) {
TENSORSTORE_THROW_BAD_ALLOC;
}
new (data) IndexArrayData;
data->rank_capacity = rank;
}
value_ = reinterpret_cast<std::uintptr_t>(data);
return *data;
}
IndexArrayData& OutputIndexMap::SetArrayIndexing(DimensionIndex rank,
const IndexArrayData& other) {
assert(other.rank_capacity >= rank);
auto& data = SetArrayIndexing(rank);
data.element_pointer = other.element_pointer;
data.index_range = other.index_range;
std::memcpy(data.byte_strides, other.byte_strides, sizeof(Index) * rank);
return data;
}
void OutputIndexMap::Assign(DimensionIndex rank, const OutputIndexMap& other) {
if (other.method() == OutputIndexMethod::array) {
SetArrayIndexing(rank, other.index_array_data());
} else {
value_ = other.value_;
}
offset_ = other.offset_;
stride_ = other.stride_;
}
TransformRep::Ptr<> TransformRep::Allocate(
DimensionIndex input_rank_capacity, DimensionIndex output_rank_capacity) {
ABSL_CHECK(input_rank_capacity >= 0 && output_rank_capacity >= 0 &&
input_rank_capacity <= kMaxRank &&
output_rank_capacity <= kMaxRank);
const size_t total_size =
sizeof(TransformRep) +
sizeof(OutputIndexMap) * output_rank_capacity +
input_rank_capacity * (sizeof(Index) * 2 + sizeof(std::string));
char* base_ptr = static_cast<char*>(::operator new(total_size));
TransformRep* ptr =
new (base_ptr + sizeof(OutputIndexMap) * output_rank_capacity)
TransformRep;
ptr->reference_count.store(1, std::memory_order_relaxed);
ptr->input_rank_capacity = input_rank_capacity;
ptr->output_rank_capacity = output_rank_capacity;
std::uninitialized_default_construct_n(ptr->output_index_maps().begin(),
output_rank_capacity);
std::uninitialized_default_construct_n(ptr->input_labels().begin(),
input_rank_capacity);
return TransformRep::Ptr<>(ptr, internal::adopt_object_ref);
}
void DestroyLabelFields(TransformRep* ptr) {
std::destroy_n(ptr->input_labels().begin(), ptr->input_rank_capacity);
}
void TransformRep::Free(TransformRep* ptr) {
assert(ptr->reference_count == 0);
DestroyLabelFields(ptr);
std::destroy_n(ptr->output_index_maps().begin(), ptr->output_rank_capacity);
::operator delete(static_cast<void*>(ptr->output_index_maps().data()));
}
void CopyTransformRep(TransformRep* source, TransformRep* dest) {
assert(source != nullptr);
assert(dest != nullptr);
assert(dest->output_rank_capacity >= source->output_rank);
CopyTransformRepDomain(source, dest);
const DimensionIndex input_rank = source->input_rank;
const DimensionIndex output_rank = dest->output_rank = source->output_rank;
span<const OutputIndexMap> source_maps =
source->output_index_maps().first(output_rank);
span<OutputIndexMap> dest_maps = dest->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
dest_maps[output_dim].Assign(input_rank, source_maps[output_dim]);
}
}
void CopyTransformRepDomain(TransformRep* source, TransformRep* dest) {
assert(source != nullptr);
assert(dest != nullptr);
assert(dest->input_rank_capacity >= source->input_rank);
const DimensionIndex input_rank = dest->input_rank = source->input_rank;
std::copy_n(source->input_origin().begin(), input_rank,
dest->input_origin().begin());
std::copy_n(source->input_shape().begin(), input_rank,
dest->input_shape().begin());
dest->implicit_lower_bounds = source->implicit_lower_bounds;
dest->implicit_upper_bounds = source->implicit_upper_bounds;
std::copy_n(source->input_labels().begin(), input_rank,
dest->input_labels().begin());
}
void MoveTransformRep(TransformRep* source, TransformRep* dest) {
CopyTrivialFields(source, dest);
std::copy_n(std::make_move_iterator(source->output_index_maps().begin()),
source->output_rank, dest->output_index_maps().begin());
CopyInputLabels(source, dest, true);
}
void ResetOutputIndexMaps(TransformRep* ptr) {
auto output_index_maps = ptr->output_index_maps();
for (DimensionIndex output_dim = 0, output_rank = ptr->output_rank;
output_dim < output_rank; ++output_dim) {
output_index_maps[output_dim].SetConstant();
}
ptr->output_rank = 0;
}
TransformRep::Ptr<> MutableRep(TransformRep::Ptr<> ptr, bool domain_only) {
if (!ptr) return ptr;
if (ptr->is_unique()) {
if (domain_only) {
ResetOutputIndexMaps(ptr.get());
ptr->output_rank = 0;
}
return ptr;
}
if (domain_only) {
auto new_rep = TransformRep::Allocate(ptr->input_rank, 0);
CopyTransformRepDomain(ptr.get(), new_rep.get());
new_rep->output_rank = 0;
internal_index_space::DebugCheckInvariants(new_rep.get());
return new_rep;
} else {
auto new_rep = TransformRep::Allocate(ptr->input_rank, ptr->output_rank);
CopyTransformRep(ptr.get(), new_rep.get());
internal_index_space::DebugCheckInvariants(new_rep.get());
return new_rep;
}
}
TransformRep::Ptr<> NewOrMutableRep(TransformRep* ptr,
DimensionIndex input_rank_capacity,
DimensionIndex output_rank_capacity,
bool domain_only) {
assert(ptr);
if (ptr->input_rank_capacity >= input_rank_capacity &&
ptr->output_rank_capacity >= output_rank_capacity && ptr->is_unique()) {
if (domain_only) {
ResetOutputIndexMaps(ptr);
}
return TransformRep::Ptr<>(ptr);
} else {
return TransformRep::Allocate(input_rank_capacity,
domain_only ? 0 : output_rank_capacity);
}
}
bool IsDomainExplicitlyEmpty(TransformRep* ptr) {
DimensionSet implicit_dims = ptr->implicit_dimensions();
const Index* input_shape = ptr->input_shape().data();
for (DimensionIndex input_dim = 0, input_rank = ptr->input_rank;
input_dim < input_rank; ++input_dim) {
if (!implicit_dims[input_dim] && input_shape[input_dim] == 0) {
return true;
}
}
return false;
}
void ReplaceAllIndexArrayMapsWithConstantMaps(TransformRep* ptr) {
for (DimensionIndex output_dim = 0, output_rank = ptr->output_rank;
output_dim < output_rank; ++output_dim) {
auto& map = ptr->output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
map.SetConstant();
map.offset() = 0;
map.stride() = 0;
}
}
bool AreIndexMapsEqual(const OutputIndexMap& a, const OutputIndexMap& b,
BoxView<> input_domain) {
const auto method = a.method();
if (method != b.method() || a.offset() != b.offset()) return false;
switch (method) {
case OutputIndexMethod::constant:
return true;
case OutputIndexMethod::single_input_dimension:
return a.input_dimension() == b.input_dimension() &&
a.stride() == b.stride();
case OutputIndexMethod::array: {
const auto& index_array_data_a = a.index_array_data();
const auto& index_array_data_b = b.index_array_data();
if (a.stride() != b.stride()) return false;
if (index_array_data_a.index_range != index_array_data_b.index_range) {
return false;
}
return ArrayView<const Index, dynamic_rank, offset_origin>(
index_array_data_a.element_pointer,
StridedLayoutView<dynamic_rank, offset_origin>(
input_domain.rank(), input_domain.origin().data(),
input_domain.shape().data(),
index_array_data_a.byte_strides)) ==
ArrayView<const Index, dynamic_rank, offset_origin>(
index_array_data_b.element_pointer,
StridedLayoutView<dynamic_rank, offset_origin>(
input_domain.rank(), input_domain.origin().data(),
input_domain.shape().data(),
index_array_data_b.byte_strides));
}
}
ABSL_UNREACHABLE();
}
bool AreDomainsEqual(TransformRep* a, TransformRep* b) {
if (!a != !b) return false;
if (!a) return true;
if (a->input_rank != b->input_rank) return false;
const DimensionIndex input_rank = a->input_rank;
const BoxView<> input_domain_a = a->input_domain(input_rank);
if (input_domain_a != b->input_domain(input_rank)) return false;
if (a->implicit_lower_bounds != b->implicit_lower_bounds ||
a->implicit_upper_bounds != b->implicit_upper_bounds) {
return false;
}
span<const std::string> input_labels_a = a->input_labels().first(input_rank);
if (!std::equal(input_labels_a.begin(), input_labels_a.end(),
b->input_labels().begin())) {
return false;
}
return true;
}
bool AreEqual(TransformRep* a, TransformRep* b) {
if (!AreDomainsEqual(a, b)) return false;
if (!a) return true;
if (a->output_rank != b->output_rank) return false;
const DimensionIndex input_rank = a->input_rank;
const DimensionIndex output_rank = a->output_rank;
const BoxView<> input_domain_a = a->input_domain(input_rank);
span<const OutputIndexMap> a_maps = a->output_index_maps().first(output_rank);
span<const OutputIndexMap> b_maps = b->output_index_maps().first(output_rank);
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
if (!AreIndexMapsEqual(a_maps[output_dim], b_maps[output_dim],
input_domain_a)) {
return false;
}
}
return true;
}
void PrintToOstream(std::ostream& os, TransformRep* transform) {
if (!transform) {
os << "<Invalid index space transform>";
return;
}
const DimensionIndex input_rank = transform->input_rank;
const DimensionIndex output_rank = transform->output_rank;
os << "Rank " << transform->input_rank << " -> " << transform->output_rank
<< " index space transform:\n";
os << " Input domain:\n";
const BoxView<> input_domain = transform->input_domain(input_rank);
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
const auto d = transform->input_dimension(input_dim);
os << " " << input_dim << ": " << d.optionally_implicit_domain();
if (!d.label().empty()) {
os << " " << QuoteString(d.label());
}
os << '\n';
}
span<const OutputIndexMap> maps =
transform->output_index_maps().first(output_rank);
Index index_array_shape[kMaxRank];
os << " Output index maps:\n";
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto& map = maps[output_dim];
os << " out[" << output_dim << "] = " << map.offset();
if (map.method() != OutputIndexMethod::constant) {
os << " + " << map.stride() << " * ";
}
switch (map.method()) {
case OutputIndexMethod::constant:
break;
case OutputIndexMethod::single_input_dimension:
os << "in[" << map.input_dimension() << "]";
break;
case OutputIndexMethod::array: {
const auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
index_array_shape[input_dim] =
index_array_data.byte_strides[input_dim] == 0
? 1
: input_domain.shape()[input_dim];
}
ArrayView<const Index, dynamic_rank> index_array(
AddByteOffset(
ElementPointer<const Index>(index_array_data.element_pointer),
IndexInnerProduct(input_rank, input_domain.origin().data(),
index_array_data.byte_strides)),
StridedLayoutView<>(input_rank, &index_array_shape[0],
index_array_data.byte_strides));
os << "bounded(" << index_array_data.index_range
<< ", array(in)), where array =\n";
os << " " << index_array;
break;
}
}
os << '\n';
}
}
void PrintDomainToOstream(std::ostream& os, TransformRep* transform) {
if (!transform) {
os << "<invalid index domain>";
return;
}
os << "{ ";
for (DimensionIndex i = 0, rank = transform->input_rank; i < rank; ++i) {
if (i != 0) os << ", ";
const InputDimensionRef dim_ref = transform->input_dimension(i);
const IndexDomainDimension<view> d{dim_ref.optionally_implicit_domain(),
dim_ref.label()};
os << d;
}
os << " }";
}
Result<Index> OutputIndexMap::operator()(
span<const Index> input_indices) const {
Index base_output_index;
switch (method()) {
case OutputIndexMethod::constant:
base_output_index = 0;
break;
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = input_dimension();
assert(input_dim >= 0 && input_dim < input_indices.size());
base_output_index = input_indices[input_dim];
break;
}
case OutputIndexMethod::array: {
const IndexArrayData& data = index_array_data();
assert(data.element_pointer &&
input_indices.size() <= data.rank_capacity);
base_output_index =
data.element_pointer.byte_strided_pointer()[IndexInnerProduct(
input_indices.size(), input_indices.data(), data.byte_strides)];
TENSORSTORE_RETURN_IF_ERROR(
CheckContains(data.index_range, base_output_index),
MaybeAnnotateStatus(
_, "Checking result of index array output index map"));
break;
}
}
return base_output_index * stride() + offset();
}
absl::Status TransformIndices(TransformRep* data,
span<const Index> input_indices,
span<Index> output_indices) {
assert(data && data->input_rank == input_indices.size() &&
data->output_rank == output_indices.size());
const DimensionIndex output_rank = data->output_rank;
const DimensionIndex input_rank = data->input_rank;
span<const OutputIndexMap> output_index_maps =
data->output_index_maps().first(output_rank);
for (DimensionIndex i = 0; i < input_rank; ++i) {
auto oi_interval = data->input_dimension(i).optionally_implicit_domain();
if (!Contains(oi_interval.effective_interval(), input_indices[i])) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Index ", input_indices[i], " is not contained in the domain ",
oi_interval, " for input dimension ", i));
}
}
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
TENSORSTORE_ASSIGN_OR_RETURN(
output_indices[output_dim],
output_index_maps[output_dim](input_indices),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Computing index for output dimension ",
output_dim)));
}
return absl::OkStatus();
}
absl::Status ReplaceZeroRankIndexArrayIndexMap(Index index,
IndexInterval bounds,
Index* output_offset,
Index* output_stride) {
TENSORSTORE_RETURN_IF_ERROR(CheckContains(bounds, index));
Index new_offset;
if (internal::MulOverflow(index, *output_stride, &new_offset) ||
internal::AddOverflow(new_offset, *output_offset, output_offset)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Integer overflow computing offset for output dimension."));
}
*output_stride = 0;
return absl::OkStatus();
}
TransformRep::Ptr<> GetSubDomain(TransformRep* rep,
span<const DimensionIndex> dims) {
assert(rep);
[[maybe_unused]] const DimensionIndex old_rank = rep->input_rank;
const DimensionIndex new_rank = dims.size();
auto new_rep = TransformRep::Allocate(new_rank, 0);
new_rep->output_rank = 0;
new_rep->input_rank = new_rank;
#ifndef NDEBUG
DimensionSet seen_dims;
#endif
for (DimensionIndex new_dim = 0; new_dim < dims.size(); ++new_dim) {
const DimensionIndex old_dim = dims[new_dim];
assert(old_dim >= 0 && old_dim < old_rank);
#ifndef NDEBUG
assert(!seen_dims[old_dim]);
seen_dims[old_dim] = true;
#endif
new_rep->input_dimension(new_dim) = rep->input_dimension(old_dim);
}
return new_rep;
}
bool IsUnlabeled(span<const std::string> labels) {
return std::all_of(labels.begin(), labels.end(),
[](std::string_view s) { return s.empty(); });
}
DimensionSet GetIndexArrayInputDimensions(TransformRep* transform) {
DimensionSet set;
const DimensionIndex output_rank = transform->output_rank;
const DimensionIndex input_rank = transform->input_rank;
auto output_maps = transform->output_index_maps();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = output_maps[output_dim];
if (map.method() != OutputIndexMethod::array) continue;
const auto& index_array_data = map.index_array_data();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (index_array_data.byte_strides[input_dim] != 0) {
set[input_dim] = true;
}
}
}
return set;
}
TransformRep::Ptr<> WithImplicitDimensions(TransformRep::Ptr<> transform,
DimensionSet implicit_lower_bounds,
DimensionSet implicit_upper_bounds,
bool domain_only) {
transform = MutableRep(std::move(transform), domain_only);
if (!domain_only && (implicit_lower_bounds || implicit_upper_bounds)) {
auto index_array_dims =
internal_index_space::GetIndexArrayInputDimensions(transform.get());
implicit_lower_bounds &= ~index_array_dims;
implicit_upper_bounds &= ~index_array_dims;
}
const auto mask = DimensionSet::UpTo(transform->input_rank);
transform->implicit_lower_bounds = implicit_lower_bounds & mask;
transform->implicit_upper_bounds = implicit_upper_bounds & mask;
return transform;
}
#ifndef NDEBUG
void DebugCheckInvariants(TransformRep* rep) {
assert(rep);
assert(rep->reference_count > 0);
const DimensionIndex input_rank = rep->input_rank,
output_rank = rep->output_rank;
assert(rep->input_rank_capacity <= kMaxRank);
assert(rep->output_rank_capacity <= kMaxRank);
assert(input_rank <= rep->input_rank_capacity);
assert(output_rank <= rep->output_rank_capacity);
assert(input_rank >= 0);
assert(output_rank >= 0);
const auto mask = DimensionSet::UpTo(rep->input_rank);
assert((rep->implicit_lower_bounds & mask) == rep->implicit_lower_bounds);
assert((rep->implicit_upper_bounds & mask) == rep->implicit_upper_bounds);
TENSORSTORE_CHECK_OK(internal::ValidateDimensionLabelsAreUnique(
rep->input_labels().first(input_rank)));
auto input_origin = rep->input_origin().data();
auto input_shape = rep->input_shape().data();
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
TENSORSTORE_CHECK_OK(
IndexInterval::Sized(input_origin[input_dim], input_shape[input_dim]));
}
const bool is_domain_explicitly_empty = IsDomainExplicitlyEmpty(rep);
const auto implicit_dims = rep->implicit_dimensions();
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
auto& map = rep->output_index_maps()[output_dim];
switch (map.method()) {
case OutputIndexMethod::constant: {
assert(map.stride() == 0);
break;
}
case OutputIndexMethod::single_input_dimension: {
const DimensionIndex input_dim = map.input_dimension();
assert(input_dim >= 0 && input_dim < input_rank);
assert(map.stride() != 0);
break;
}
case OutputIndexMethod::array: {
assert(map.stride() != 0);
const auto& index_array_data = map.index_array_data();
assert(index_array_data.rank_capacity >= input_rank);
assert(index_array_data.rank_capacity <= kMaxRank);
assert(!is_domain_explicitly_empty);
for (DimensionIndex input_dim = 0; input_dim < input_rank;
++input_dim) {
const Index byte_stride = index_array_data.byte_strides[input_dim];
if (byte_stride == 0) continue;
const auto bounds = IndexInterval::UncheckedSized(
input_origin[input_dim], input_shape[input_dim]);
assert(IsFinite(bounds));
assert(!implicit_dims[input_dim]);
}
break;
}
}
}
for (DimensionIndex output_dim = output_rank,
output_rank_capacity = rep->output_rank_capacity;
output_dim < output_rank_capacity; ++output_dim) {
assert(rep->output_index_maps()[output_dim].method() !=
OutputIndexMethod::array);
}
}
#endif
}
} | #include "tensorstore/index_space/internal/transform_rep.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/macros.h"
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/container_kind.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/internal/transform_rep_impl.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
#if ABSL_HAVE_EXCEPTIONS
#define TENSORSTORE_EXPECT_OOM(expr) EXPECT_THROW(expr, std::bad_alloc);
#else
#define TENSORSTORE_EXPECT_OOM(expr) EXPECT_DEATH(expr, "Out of memory");
#endif
using ::tensorstore::Box;
using ::tensorstore::DimensionIndex;
using ::tensorstore::Index;
using ::tensorstore::IndexInterval;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kInfIndex;
using ::tensorstore::kInfSize;
using ::tensorstore::kMaxFiniteIndex;
using ::tensorstore::kMinFiniteIndex;
using ::tensorstore::MatchesStatus;
using ::tensorstore::OutputIndexMethod;
using ::tensorstore::internal_index_space::CopyTransformRep;
using ::tensorstore::internal_index_space::MoveTransformRep;
using ::tensorstore::internal_index_space::MutableRep;
using ::tensorstore::internal_index_space::NewOrMutableRep;
using ::tensorstore::internal_index_space::OutputIndexMap;
using ::tensorstore::internal_index_space::ReplaceZeroRankIndexArrayIndexMap;
using ::tensorstore::internal_index_space::TransformAccess;
using ::tensorstore::internal_index_space::TransformRep;
using ::tensorstore::internal_index_space::ValidateAndIntersectBounds;
using ::tensorstore::internal_testing::TestConcurrent;
TEST(OutputIndexMapTest, Basic) {
OutputIndexMap map;
EXPECT_EQ(OutputIndexMethod::constant, map.method());
map.SetSingleInputDimension(2);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(2, map.input_dimension());
map.SetSingleInputDimension(3);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(3, map.input_dimension());
map.SetConstant();
EXPECT_EQ(OutputIndexMethod::constant, map.method());
{
auto& index_array_data = map.SetArrayIndexing(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(3, index_array_data.rank_capacity);
EXPECT_EQ(IndexInterval(), index_array_data.index_range);
EXPECT_EQ(nullptr, index_array_data.element_pointer);
EXPECT_EQ(&index_array_data, &map.SetArrayIndexing(1));
EXPECT_EQ(3, index_array_data.rank_capacity);
auto ptr = std::make_shared<Index>();
index_array_data.element_pointer = ptr;
index_array_data.index_range = IndexInterval::UncheckedClosed(1, 10);
index_array_data.byte_strides[0] = 1;
index_array_data.byte_strides[1] = 2;
index_array_data.byte_strides[2] = 3;
auto& new_index_array_data = map.SetArrayIndexing(4);
EXPECT_EQ(4, new_index_array_data.rank_capacity);
EXPECT_EQ(ptr, new_index_array_data.element_pointer.pointer());
EXPECT_EQ(IndexInterval::UncheckedClosed(1, 10),
new_index_array_data.index_range);
EXPECT_EQ(1, new_index_array_data.byte_strides[0]);
EXPECT_EQ(2, new_index_array_data.byte_strides[1]);
EXPECT_EQ(3, new_index_array_data.byte_strides[2]);
}
map.SetSingleInputDimension(3);
EXPECT_EQ(OutputIndexMethod::single_input_dimension, map.method());
EXPECT_EQ(3, map.input_dimension());
{
auto& index_array_data = map.SetArrayIndexing(3);
EXPECT_EQ(OutputIndexMethod::array, map.method());
EXPECT_EQ(3, index_array_data.rank_capacity);
}
}
TEST(OutputIndexMapDeathTest, Basic) {
OutputIndexMap map;
TENSORSTORE_EXPECT_OOM(
map.SetArrayIndexing(static_cast<DimensionIndex>(1) << 60));
map.SetArrayIndexing(5);
TENSORSTORE_EXPECT_OOM(
map.SetArrayIndexing(static_cast<DimensionIndex>(1) << 60));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, Basic) {
Index output_offset = 5, output_stride = 3;
EXPECT_EQ(absl::OkStatus(), ReplaceZeroRankIndexArrayIndexMap(
10, IndexInterval::UncheckedClosed(3, 15),
&output_offset, &output_stride));
EXPECT_EQ(5 + 10 * 3, output_offset);
EXPECT_EQ(0, output_stride);
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OutOfBounds) {
Index output_offset = 5, output_stride = 3;
EXPECT_THAT(ReplaceZeroRankIndexArrayIndexMap(
10, IndexInterval::UncheckedClosed(11, 15), &output_offset,
&output_stride),
MatchesStatus(absl::StatusCode::kOutOfRange,
"Index 10 is outside valid range \\[11, 16\\)"));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OverflowOffset) {
Index output_offset = std::numeric_limits<Index>::max(), output_stride = 3;
EXPECT_THAT(
ReplaceZeroRankIndexArrayIndexMap(10,
IndexInterval::UncheckedClosed(5, 15),
&output_offset, &output_stride),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*Integer overflow computing offset for output dimension.*"));
}
TEST(ReplaceZeroRankIndexArrayIndexMapTest, OverflowStride) {
Index output_offset = 5, output_stride = 100;
EXPECT_THAT(
ReplaceZeroRankIndexArrayIndexMap(kMaxFiniteIndex, IndexInterval(),
&output_offset, &output_stride),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
".*Integer overflow computing offset for output dimension.*"));
}
TEST(Allocate, Basic) {
auto ptr = TransformRep::Allocate(3, 2);
EXPECT_EQ(3, ptr->input_rank_capacity);
EXPECT_EQ(2, ptr->output_rank_capacity);
EXPECT_EQ(OutputIndexMethod::constant, ptr->output_index_maps()[0].method());
EXPECT_EQ(OutputIndexMethod::constant, ptr->output_index_maps()[1].method());
EXPECT_TRUE(ptr->input_labels()[0].empty());
EXPECT_TRUE(ptr->input_labels()[1].empty());
EXPECT_TRUE(ptr->input_labels()[2].empty());
}
TEST(CopyTransformRep, Basic) {
auto source = TransformRep::Allocate(1, 2);
source->input_rank = 1;
source->output_rank = 2;
source->input_origin()[0] = 5;
source->input_shape()[0] = 2;
auto& source_map = source->output_index_maps()[0];
source_map.offset() = 3;
source_map.stride() = 4;
auto index_array_ptr = std::make_shared<Index>();
auto& source_index_array_data = source_map.SetArrayIndexing(1);
source_index_array_data.element_pointer = index_array_ptr;
source_index_array_data.byte_strides[0] = 0;
source->input_labels()[0] = "source";
auto dest = TransformRep::Allocate(1, 2);
dest->input_rank = 0;
dest->output_rank = 0;
dest->input_origin()[0] = 6;
dest->input_shape()[0] = 7;
dest->input_labels()[0] = "dest";
auto& dest_map = dest->output_index_maps()[0];
dest_map.offset() = 10;
dest_map.stride() = 11;
CopyTransformRep(source.get(), dest.get());
EXPECT_EQ(5, source->input_origin()[0]);
EXPECT_EQ(2, source->input_shape()[0]);
EXPECT_EQ(3, source_map.offset());
EXPECT_EQ(4, source_map.stride());
EXPECT_EQ(OutputIndexMethod::array, source_map.method());
EXPECT_EQ(&source_index_array_data, &source_map.index_array_data());
EXPECT_EQ(index_array_ptr, source_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, source_index_array_data.byte_strides[0]);
EXPECT_EQ("source", source->input_labels()[0]);
EXPECT_EQ(1, dest->input_rank);
EXPECT_EQ(2, dest->output_rank);
EXPECT_EQ(5, dest->input_origin()[0]);
EXPECT_EQ(2, dest->input_shape()[0]);
EXPECT_EQ(3, dest_map.offset());
EXPECT_EQ(4, dest_map.stride());
EXPECT_EQ(OutputIndexMethod::array, dest_map.method());
auto& dest_index_array_data = dest_map.index_array_data();
EXPECT_EQ(index_array_ptr, dest_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, dest_index_array_data.byte_strides[0]);
EXPECT_EQ(3, index_array_ptr.use_count());
EXPECT_EQ("source", dest->input_labels()[0]);
}
TEST(MoveTransformRep, Basic) {
using ::tensorstore::DimensionSet;
auto source = TransformRep::Allocate(1, 2);
source->input_rank = 1;
source->output_rank = 2;
source->implicit_lower_bounds = DimensionSet::UpTo(source->input_rank);
source->implicit_upper_bounds = DimensionSet::UpTo(source->input_rank);
source->input_origin()[0] = 5;
source->input_shape()[0] = 2;
auto& source_map = source->output_index_maps()[0];
source_map.SetSingleInputDimension(0);
source_map.offset() = 3;
source_map.stride() = 4;
auto index_array_ptr = std::make_shared<Index>();
auto& source_index_array_data = source_map.SetArrayIndexing(1);
source_index_array_data.element_pointer = index_array_ptr;
source_index_array_data.byte_strides[0] = 0;
source->input_labels()[0] = "source";
auto dest = TransformRep::Allocate(1, 2);
dest->input_rank = 0;
dest->output_rank = 0;
dest->input_origin()[0] = 6;
dest->input_shape()[0] = 7;
dest->input_labels()[0] = "dest";
auto& dest_map = dest->output_index_maps()[0];
dest_map.offset() = 10;
dest_map.stride() = 11;
MoveTransformRep(source.get(), dest.get());
EXPECT_EQ(5, source->input_origin()[0]);
EXPECT_EQ(2, source->input_shape()[0]);
EXPECT_EQ(3, source_map.offset());
EXPECT_EQ(4, source_map.stride());
EXPECT_EQ(OutputIndexMethod::constant, source_map.method());
EXPECT_EQ(1, dest->input_rank);
EXPECT_EQ(2, dest->output_rank);
EXPECT_EQ(5, dest->input_origin()[0]);
EXPECT_EQ(2, dest->input_shape()[0]);
EXPECT_EQ(3, dest_map.offset());
EXPECT_EQ(4, dest_map.stride());
EXPECT_EQ(OutputIndexMethod::array, dest_map.method());
auto& dest_index_array_data = dest_map.index_array_data();
EXPECT_EQ(&dest_index_array_data, &source_index_array_data);
EXPECT_EQ(index_array_ptr, dest_index_array_data.element_pointer.pointer());
EXPECT_EQ(0, dest_index_array_data.byte_strides[0]);
EXPECT_EQ(2, index_array_ptr.use_count());
EXPECT_EQ("source", dest->input_labels()[0]);
}
tensorstore::IndexTransform<> MakeTestTransform() {
return IndexTransformBuilder<>(3, 3)
.input_origin({1, 2, 3})
.input_shape({2, 3, 4})
.input_labels({"a", "b", "c"})
.implicit_lower_bounds({0, 1, 0})
.implicit_upper_bounds({0, 1, 1})
.output_constant(2, 5)
.output_single_input_dimension(1, 5, 7, 2)
.output_index_array(0, 8, 11,
tensorstore::MakeArray<Index>({{{8}}, {{9}}}),
tensorstore::IndexInterval::Sized(7, 3))
.Finalize()
.value();
}
TEST(MutableRepTest, Basic) {
auto transform = MakeTestTransform();
EXPECT_TRUE(TransformAccess::rep(transform)->is_unique());
auto rep1 = TransformAccess::rep_ptr<tensorstore::container>(transform);
EXPECT_FALSE(TransformAccess::rep(transform)->is_unique());
auto rep2 = MutableRep(std::move(rep1));
EXPECT_NE(TransformAccess::rep(transform), rep2.get());
EXPECT_EQ(transform, TransformAccess::Make<tensorstore::IndexTransformView<>>(
rep2.get()));
EXPECT_TRUE(rep2->is_unique());
TransformRep* rep2_ptr = rep2.get();
auto rep3 = MutableRep(std::move(rep2));
EXPECT_EQ(rep2_ptr, rep3.get());
}
TEST(MutableRepTest, Concurrent) {
auto orig = IndexTransformBuilder<>(1, 1)
.input_origin({1})
.input_shape({2})
.input_labels({"a"})
.implicit_lower_bounds({0})
.implicit_upper_bounds({0})
.output_constant(0, 5)
.Finalize()
.value();
TransformRep* orig_ptr;
TransformRep::Ptr<> write_ptr = TransformAccess::rep_ptr(orig);
write_ptr->output_rank = 0;
TransformRep::Ptr<> read_ptr;
[[maybe_unused]] size_t num_reads_before_write = 0;
const size_t num_iterations = 1000;
TestConcurrent(
num_iterations,
[&] {
write_ptr->input_rank = 1;
orig_ptr = write_ptr.get();
read_ptr = write_ptr;
},
[&] { EXPECT_EQ(0, write_ptr->input_rank); },
[&] {
write_ptr = MutableRep(std::move(write_ptr));
if (orig_ptr == write_ptr.get()) {
++num_reads_before_write;
}
write_ptr->input_rank = 0;
},
[&] {
EXPECT_EQ(1, read_ptr->input_rank);
read_ptr.reset();
});
#if 0
EXPECT_LT(0, num_reads_before_write);
EXPECT_LT(num_reads_before_write, num_iterations);
#endif
}
TEST(NewOrMutableRepTest, Basic) {
auto transform = MakeTestTransform();
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 3);
EXPECT_EQ(TransformAccess::rep(transform), mutable_rep.get());
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 2, 2);
EXPECT_EQ(TransformAccess::rep(transform), mutable_rep.get());
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 3);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(3, mutable_rep->output_rank_capacity);
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 1, 2);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(1, mutable_rep->input_rank_capacity);
EXPECT_EQ(2, mutable_rep->output_rank_capacity);
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 4);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(4, mutable_rep->output_rank_capacity);
}
{
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 4, 3);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(4, mutable_rep->input_rank_capacity);
EXPECT_EQ(3, mutable_rep->output_rank_capacity);
}
{
auto transform_copy = transform;
auto mutable_rep = NewOrMutableRep(TransformAccess::rep(transform), 3, 4);
EXPECT_NE(TransformAccess::rep(transform), mutable_rep.get());
EXPECT_EQ(3, mutable_rep->input_rank_capacity);
EXPECT_EQ(4, mutable_rep->output_rank_capacity);
}
}
TEST(ValidateAndIntersectBoundsTest, Success) {
const Box<> inner({-kInfIndex, 6}, {kInfIndex + 8, 3});
Box<> combined({1, 5}, {9, kInfIndex - 5 + 1});
auto status = ValidateAndIntersectBounds(
inner, combined, [](IndexInterval outer, IndexInterval inner) {
return ContainsOrUnbounded(outer, inner);
});
TENSORSTORE_CHECK_OK(status);
EXPECT_EQ(Box<>({1, 6}, {7, 3}), combined);
}
TEST(ValidateAndIntersectBoundsTest, Failure) {
const Box<> inner({-kInfIndex, 4}, {kInfIndex + 8, 3});
Box<> combined({1, 5}, {9, kInfIndex - 5 + 1});
auto status = ValidateAndIntersectBounds(
inner, combined, [](IndexInterval outer, IndexInterval inner) {
return ContainsOrUnbounded(outer, inner);
});
EXPECT_THAT(
status,
MatchesStatus(
absl::StatusCode::kOutOfRange,
".*Propagated bounds are incompatible with existing bounds in "
"dimension 1 bounds .* vs. propagated bounds.*"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/internal/transform_rep.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/index_space/transform_rep_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1b9b18b5-5623-4596-a97b-1e51ff9ed9ac | cpp | google/cel-cpp | parsed_message_value | common/values/parsed_message_value.cc | common/values/parsed_message_value_test.cc | #include "common/values/parsed_message_value.h"
#include <cstdint>
#include <string>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/status_macros.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel {
bool ParsedMessageValue::IsZeroValue() const {
ABSL_DCHECK(*this);
return ABSL_PREDICT_TRUE(value_ != nullptr) ? value_->ByteSizeLong() == 0
: true;
}
std::string ParsedMessageValue::DebugString() const {
if (ABSL_PREDICT_FALSE(value_ == nullptr)) {
return "INVALID";
}
return absl::StrCat(*value_);
}
absl::Status ParsedMessageValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
return absl::UnimplementedError("SerializeTo is not yet implemented");
}
absl::StatusOr<Json> ParsedMessageValue::ConvertToJson(
AnyToJsonConverter& converter) const {
return absl::UnimplementedError("ConvertToJson is not yet implemented");
}
absl::Status ParsedMessageValue::Equal(ValueManager& value_manager,
const Value& other,
Value& result) const {
return absl::UnimplementedError("Equal is not yet implemented");
}
absl::StatusOr<Value> ParsedMessageValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
absl::Status ParsedMessageValue::GetFieldByName(
ValueManager& value_manager, absl::string_view name, Value& result,
ProtoWrapperTypeOptions unboxing_options) const {
return absl::UnimplementedError("GetFieldByName is not yet implemented");
}
absl::StatusOr<Value> ParsedMessageValue::GetFieldByName(
ValueManager& value_manager, absl::string_view name,
ProtoWrapperTypeOptions unboxing_options) const {
Value result;
CEL_RETURN_IF_ERROR(
GetFieldByName(value_manager, name, result, unboxing_options));
return result;
}
absl::Status ParsedMessageValue::GetFieldByNumber(
ValueManager& value_manager, int64_t number, Value& result,
ProtoWrapperTypeOptions unboxing_options) const {
return absl::UnimplementedError("GetFieldByNumber is not yet implemented");
}
absl::StatusOr<Value> ParsedMessageValue::GetFieldByNumber(
ValueManager& value_manager, int64_t number,
ProtoWrapperTypeOptions unboxing_options) const {
Value result;
CEL_RETURN_IF_ERROR(
GetFieldByNumber(value_manager, number, result, unboxing_options));
return result;
}
absl::StatusOr<bool> ParsedMessageValue::HasFieldByName(
absl::string_view name) const {
return absl::UnimplementedError("HasFieldByName is not yet implemented");
}
absl::StatusOr<bool> ParsedMessageValue::HasFieldByNumber(
int64_t number) const {
return absl::UnimplementedError("HasFieldByNumber is not yet implemented");
}
absl::Status ParsedMessageValue::ForEachField(
ValueManager& value_manager, ForEachFieldCallback callback) const {
return absl::UnimplementedError("ForEachField is not yet implemented");
}
absl::StatusOr<int> ParsedMessageValue::Qualify(
ValueManager& value_manager, absl::Span<const SelectQualifier> qualifiers,
bool presence_test, Value& result) const {
return absl::UnimplementedError("Qualify is not yet implemented");
}
absl::StatusOr<std::pair<Value, int>> ParsedMessageValue::Qualify(
ValueManager& value_manager, absl::Span<const SelectQualifier> qualifiers,
bool presence_test) const {
Value result;
CEL_ASSIGN_OR_RETURN(
auto count, Qualify(value_manager, qualifiers, presence_test, result));
return std::pair{std::move(result), count};
}
} | #include "absl/base/nullability.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "common/value_manager.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel {
namespace {
using ::absl_testing::StatusIs;
using ::cel::internal::DynamicParseTextProto;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::testing::_;
using ::testing::PrintToStringParamName;
using ::testing::TestWithParam;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
class ParsedMessageValueTest : public TestWithParam<AllocatorKind> {
public:
void SetUp() override {
switch (GetParam()) {
case AllocatorKind::kArena:
arena_.emplace();
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::Pooling(arena()),
NewThreadCompatibleTypeReflector(MemoryManager::Pooling(arena())));
break;
case AllocatorKind::kNewDelete:
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::ReferenceCounting(),
NewThreadCompatibleTypeReflector(
MemoryManager::ReferenceCounting()));
break;
}
}
void TearDown() override {
value_manager_.reset();
arena_.reset();
}
Allocator<> allocator() {
return arena_ ? ArenaAllocator(&*arena_) : NewDeleteAllocator();
}
absl::Nullable<google::protobuf::Arena*> arena() { return allocator().arena(); }
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
ValueManager& value_manager() { return **value_manager_; }
template <typename T>
ParsedMessageValue MakeParsedMessage(absl::string_view text) {
return ParsedMessageValue(DynamicParseTextProto<T>(
allocator(), R"pb()pb", descriptor_pool(), message_factory()));
}
private:
absl::optional<google::protobuf::Arena> arena_;
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ParsedMessageValueTest, Default) {
ParsedMessageValue value;
EXPECT_FALSE(value);
}
TEST_P(ParsedMessageValueTest, Field) {
ParsedMessageValue value(DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory()));
EXPECT_TRUE(value);
}
TEST_P(ParsedMessageValueTest, Kind) {
ParsedMessageValue value(DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory()));
EXPECT_EQ(value.kind(), ParsedMessageValue::kKind);
EXPECT_EQ(value.kind(), ValueKind::kStruct);
}
TEST_P(ParsedMessageValueTest, GetTypeName) {
ParsedMessageValue value(DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory()));
EXPECT_EQ(value.GetTypeName(), "google.api.expr.test.v1.proto3.TestAllTypes");
}
TEST_P(ParsedMessageValueTest, GetRuntimeType) {
ParsedMessageValue value(DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory()));
EXPECT_EQ(value.GetRuntimeType(), MessageType(value.GetDescriptor()));
}
TEST_P(ParsedMessageValueTest, DebugString) {
ParsedMessageValue valid_value(DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory()));
EXPECT_THAT(valid_value.DebugString(), _);
}
TEST_P(ParsedMessageValueTest, IsZeroValue) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_TRUE(valid_value.IsZeroValue());
}
TEST_P(ParsedMessageValueTest, SerializeTo) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
absl::Cord serialized;
EXPECT_THAT(valid_value.SerializeTo(value_manager(), serialized),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMessageValueTest, ConvertToJson) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(valid_value.ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMessageValueTest, Equal) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(valid_value.Equal(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMessageValueTest, GetFieldByName) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(valid_value.GetFieldByName(value_manager(), "does_not_exist"),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMessageValueTest, GetFieldByNumber) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(valid_value.GetFieldByNumber(value_manager(), 1),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMessageValueTest, ForEachField) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(valid_value.ForEachField(
value_manager(),
[](absl::string_view, const Value&) -> absl::StatusOr<bool> {
return true;
}),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMessageValueTest, Qualify) {
MessageValue valid_value = MakeParsedMessage<TestAllTypesProto3>(R"pb()pb");
EXPECT_THAT(valid_value.Qualify(value_manager(), {}, false),
StatusIs(absl::StatusCode::kUnimplemented));
}
INSTANTIATE_TEST_SUITE_P(ParsedMessageValueTest, ParsedMessageValueTest,
::testing::Values(AllocatorKind::kArena,
AllocatorKind::kNewDelete),
PrintToStringParamName());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_message_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_message_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
219f0a76-c451-412b-a998-8677ee7adc06 | cpp | tensorflow/tensorflow | mutable_op_resolver_utils | tensorflow/lite/mutable_op_resolver_utils.cc | tensorflow/lite/mutable_op_resolver_utils_test.cc | #include "tensorflow/lite/mutable_op_resolver_utils.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op,
int min_version, int max_version) {
TfLiteRegistration registration{};
registration.builtin_code = TfLiteOperatorGetBuiltInCode(op);
registration.custom_name = TfLiteOperatorGetCustomName(op);
registration.version = TfLiteOperatorGetVersion(op);
registration.registration_external = const_cast<TfLiteOperator*>(op);
if (registration.custom_name != nullptr) {
mutable_op_resolver->AddCustom(registration.custom_name, ®istration,
min_version, max_version);
} else {
mutable_op_resolver->AddBuiltin(BuiltinOperator(registration.builtin_code),
®istration, min_version, max_version);
}
}
void AddOp(MutableOpResolver* mutable_op_resolver, const TfLiteOperator* op) {
int version = TfLiteOperatorGetVersion(op);
AddOp(mutable_op_resolver, op, version, version);
}
} | #include "tensorflow/lite/mutable_op_resolver_utils.h"
#include <stddef.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api.h"
#include "tensorflow/lite/c/c_api_opaque.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/c/common_internal.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/test_util.h"
namespace tflite {
namespace {
TfLiteStatus DummyInvoke(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) {
return kTfLiteOk;
}
TfLiteStatus DummyPrepare(void* user_data, TfLiteOpaqueContext* context,
TfLiteOpaqueNode* node) {
return kTfLiteOk;
}
TfLiteOperator* GetDummyRegistration() {
static TfLiteOperator* registration = []() {
auto* op = TfLiteOperatorCreate(kTfLiteBuiltinCustom, "dummy",
1, nullptr);
TfLiteOperatorSetPrepareWithData(op, DummyPrepare);
TfLiteOperatorSetInvokeWithData(op, DummyInvoke);
return op;
}();
return registration;
}
TfLiteOperator* GetAdditionOpRegistration() {
static TfLiteOperator* registration = []() {
auto* r = TfLiteOperatorCreate(kTfLiteBuiltinAdd, nullptr,
1, nullptr);
TfLiteOperatorSetInvokeWithData(r, DummyInvoke);
return r;
}();
return registration;
}
using MutableOpResolverTest = tflite::testing::Test;
TEST_F(MutableOpResolverTest, FindOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(
TfLiteOperatorGetBuiltInCode(found_registration->registration_external),
kTfLiteBuiltinAdd);
EXPECT_EQ(TfLiteOperatorGetVersion(found_registration->registration_external),
1);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_ADD);
EXPECT_EQ(found_registration->version, 1);
}
TEST_F(MutableOpResolverTest, FindMissingOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration());
const TfLiteRegistration* found_registration =
resolver.FindOp(BuiltinOperator_CONV_2D, 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, RegisterOpWithSingleVersion) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 2);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
ASSERT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, RegisterOpWithMultipleVersions) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 2);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 2);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 3);
ASSERT_NE(found_registration, nullptr);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 3);
}
TEST_F(MutableOpResolverTest, FindOpWithUnsupportedVersions) {
MutableOpResolver resolver;
AddOp(&resolver, GetAdditionOpRegistration(), 2, 3);
const TfLiteRegistration* found_registration;
found_registration = resolver.FindOp(BuiltinOperator_ADD, 1);
EXPECT_EQ(found_registration, nullptr);
found_registration = resolver.FindOp(BuiltinOperator_ADD, 4);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, FindCustomOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("dummy", 1);
ASSERT_NE(found_registration, nullptr);
EXPECT_EQ(found_registration->builtin_code, BuiltinOperator_CUSTOM);
EXPECT_TRUE(found_registration->registration_external->invoke_with_data ==
DummyInvoke);
EXPECT_EQ(found_registration->version, 1);
}
TEST_F(MutableOpResolverTest, FindMissingCustomOp) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("whatever", 1);
EXPECT_EQ(found_registration, nullptr);
}
TEST_F(MutableOpResolverTest, FindCustomOpWithUnsupportedVersion) {
MutableOpResolver resolver;
AddOp(&resolver, GetDummyRegistration());
const TfLiteRegistration* found_registration = resolver.FindOp("dummy", 2);
EXPECT_EQ(found_registration, nullptr);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/mutable_op_resolver_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
51a7fed9-c52a-4155-b34f-5a1bf3110f66 | cpp | tensorflow/tensorflow | hlo_liveness_analysis | third_party/xla/xla/service/hlo_liveness_analysis.cc | third_party/xla/xla/service/hlo_liveness_analysis_test.cc | #include "xla/service/hlo_liveness_analysis.h"
#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
#include <memory>
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
using Worklist = std::deque<const HloInstruction*>;
using Workset = absl::flat_hash_set<const HloInstruction*>;
void AddToWorklist(const HloInstruction* instruction, Worklist* worklist,
Workset* workset) {
if (workset->insert(instruction).second) {
worklist->push_back(instruction);
VLOG(3) << "ADD instruction: " << instruction->name();
}
}
using VisitorFunction = absl::FunctionRef<void(const ShapeIndex& )>;
void ForEachLiveIndex(const ShapeTree<bool>& index_tree, VisitorFunction func) {
index_tree.ForEachElement([&](const ShapeIndex& shape_index, bool live) {
if (live) {
func(shape_index);
}
});
}
void MarkLiveAtIndex(const HloInstruction* instruction,
const ShapeIndex& shape_index,
HloLivenessAnalysis::HloIndexMap* live_index_map,
Worklist* worklist, Workset* workset) {
std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction];
if (liveness == nullptr) {
liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(),
false);
}
bool& alive = *liveness->mutable_element(shape_index);
if (!alive) {
AddToWorklist(instruction, worklist, workset);
alive = true;
VLOG(3) << "MARK instruction: " << instruction->name()
<< " shape_index: " << shape_index;
}
}
void MarkLiveAtAllIndices(const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map,
Worklist* worklist, Workset* workset) {
bool add_to_worklist = false;
std::unique_ptr<ShapeTree<bool>>& liveness = (*live_index_map)[instruction];
if (liveness == nullptr) {
liveness = std::make_unique<ShapeTree<bool>>(instruction->shape(),
true);
add_to_worklist = true;
} else {
for (auto& entry : *liveness) {
if (!entry.second) {
add_to_worklist = true;
entry.second = true;
VLOG(3) << "MARK instruction: " << instruction->name()
<< " shape_index: " << entry.first;
}
}
}
if (add_to_worklist) {
AddToWorklist(instruction, worklist, workset);
}
}
void PropagateLivenessThroughTuple(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kTuple);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
const size_t size = shape_index.size();
if (size == 0) {
return;
}
const int64_t operand_index = shape_index[0];
if (operand_index >= instruction->operand_count()) {
return;
}
MarkLiveAtIndex(instruction->operand(operand_index), {}, live_index_map,
worklist, workset);
ShapeIndex operand_shape_index(size - 1);
for (int i = 1; i < size; ++i) {
operand_shape_index[i - 1] = shape_index[i];
}
MarkLiveAtIndex(instruction->operand(operand_index), operand_shape_index,
live_index_map, worklist, workset);
});
}
void PropagateLivenessThroughGTE(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kGetTupleElement);
MarkLiveAtIndex(instruction->operand(0), {}, live_index_map, worklist,
workset);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
ShapeIndex operand_shape_index(shape_index);
operand_shape_index.push_front(instruction->tuple_index());
MarkLiveAtIndex(instruction->operand(0), operand_shape_index,
live_index_map, worklist, workset);
});
}
void PropagateLivenessThroughWhile(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset) {
CHECK_EQ(instruction->opcode(), HloOpcode::kWhile);
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(instruction->while_body()->root_instruction(), shape_index,
live_index_map, worklist, workset);
MarkLiveAtIndex(instruction->operand(0), shape_index, live_index_map,
worklist, workset);
});
MarkLiveAtIndex(instruction->while_condition()->root_instruction(), {},
live_index_map, worklist, workset);
}
void PropagateLivenessToParameterCallers(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset, CallGraph* call_graph) {
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
const CallGraphNode& call_graph_node =
call_graph->GetNode(instruction->parent());
if (call_graph_node.context() == CallContext::kControlFlow) {
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
auto* xla_while = callsite.instruction();
const ShapeTree<bool>& index_tree = *live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(xla_while, shape_index, live_index_map, worklist,
workset);
MarkLiveAtIndex(xla_while->while_body()->root_instruction(),
shape_index, live_index_map, worklist, workset);
MarkLiveAtIndex(xla_while->operand(0), shape_index, live_index_map,
worklist, workset);
});
}
}
}
}
void PropagateLivenessThroughControlFlow(
const HloInstruction* instruction,
HloLivenessAnalysis::HloIndexMap* live_index_map, Worklist* worklist,
Workset* workset, CallGraph* call_graph) {
const CallGraphNode& call_graph_node =
call_graph->GetNode(instruction->parent());
if (call_graph_node.context() == CallContext::kControlFlow) {
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
HloInstruction* caller = callsite.instruction();
if (caller->opcode() == HloOpcode::kWhile) {
MarkLiveAtIndex(caller->while_condition()->root_instruction(), {},
live_index_map, worklist, workset);
} else if (caller->opcode() == HloOpcode::kConditional) {
MarkLiveAtIndex(caller->operand(0), {}, live_index_map, worklist,
workset);
MarkLiveAtIndex(caller, {}, live_index_map, worklist, workset);
const HloComputation* callee_comp = instruction->parent();
int64_t operand_index = 1;
for (auto* caller_comp : caller->called_computations()) {
if (callee_comp == caller_comp) {
MarkLiveAtIndex(caller->operand(operand_index), {}, live_index_map,
worklist, workset);
if (instruction->opcode() == HloOpcode::kParameter) {
const ShapeTree<bool>& index_tree =
*live_index_map->at(instruction);
ForEachLiveIndex(index_tree, [&](const ShapeIndex& shape_index) {
MarkLiveAtIndex(caller->operand(operand_index), shape_index,
live_index_map, worklist, workset);
});
}
break;
}
++operand_index;
}
}
}
}
}
}
HloLivenessAnalysis::HloLivenessAnalysis(const HloModule& module)
: module_(module), call_graph_(CallGraph::Build(&module)) {}
void HloLivenessAnalysis::RunAnalysis() {
Worklist worklist;
Workset workset;
MarkLiveAtAllIndices(module_.entry_computation()->root_instruction(),
&live_index_map_, &worklist, &workset);
for (auto* computation : module_.computations()) {
for (auto* instruction : computation->instructions()) {
if (instruction->HasSideEffectNoRecurse()) {
MarkLiveAtAllIndices(instruction, &live_index_map_, &worklist,
&workset);
}
}
}
while (!worklist.empty()) {
const HloInstruction* instruction = worklist.front();
worklist.pop_front();
workset.erase(workset.find(instruction));
VLOG(1) << "VISIT instruction: " << instruction->name();
if (instruction->opcode() == HloOpcode::kTuple) {
PropagateLivenessThroughTuple(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kGetTupleElement) {
PropagateLivenessThroughGTE(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kWhile) {
PropagateLivenessThroughWhile(instruction, &live_index_map_, &worklist,
&workset);
} else if (instruction->opcode() == HloOpcode::kParameter) {
PropagateLivenessToParameterCallers(instruction, &live_index_map_,
&worklist, &workset,
call_graph_.get());
} else {
for (auto* called_computation : instruction->called_computations()) {
MarkLiveAtAllIndices(called_computation->root_instruction(),
&live_index_map_, &worklist, &workset);
}
for (HloInstruction* operand : instruction->operands()) {
MarkLiveAtAllIndices(operand, &live_index_map_, &worklist, &workset);
}
}
PropagateLivenessThroughControlFlow(instruction, &live_index_map_,
&worklist, &workset, call_graph_.get());
}
}
bool HloLivenessAnalysis::IsLive(const HloInstruction* instruction,
const ShapeIndex& shape_index) const {
auto it = live_index_map_.find(instruction);
return (it != live_index_map_.end()) && it->second->element(shape_index);
}
absl::StatusOr<std::unique_ptr<HloLivenessAnalysis>> HloLivenessAnalysis::Run(
const HloModule& module) {
VLOG(1) << "HloLivenessAnalysis::Run on module " << module.name();
XLA_VLOG_LINES(2, module.ToString());
auto liveness_analysis = absl::WrapUnique(new HloLivenessAnalysis(module));
liveness_analysis->RunAnalysis();
return std::move(liveness_analysis);
}
} | #include "xla/service/hlo_liveness_analysis.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class HloLivenessAnalysisTest : public HloTestBase {
protected:
HloLivenessAnalysisTest() {}
const HloLivenessAnalysis& RunLiveness(HloModule* module) {
liveness_ = HloLivenessAnalysis::Run(*module).value();
return *liveness_;
}
HloInstruction* GetInstruction(HloModule* module, const std::string& name) {
HloInstruction* to_return = nullptr;
for (auto* comp : module->computations()) {
for (auto* inst : comp->instructions()) {
if (inst->name() == name) {
to_return = inst;
break;
}
}
}
return CHECK_NOTNULL(to_return);
}
std::unique_ptr<HloLivenessAnalysis> liveness_;
};
TEST_F(HloLivenessAnalysisTest, AddAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
ROOT add = s32[] add(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, DeadAdd) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
add.1 = s32[] add(constant.1, constant.2)
ROOT add.2 = s32[] add(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {}));
}
TEST_F(HloLivenessAnalysisTest, TupleAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
ROOT tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, NestedTupleAtEntryRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(1)
constant.2 = s32[] constant(2)
constant.3 = s32[] constant(3)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
ROOT tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
tuple.1 = (s32[], s32[]) tuple(constant.1, constant.2)
ROOT get-tuple-element.1 = s32[] get-tuple-element(tuple.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfNestedTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
constant.3 = s32[] constant(2)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
ROOT get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {0}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, GteOfGteOfNestedTuple) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleModule
ENTRY SimpleComputation {
constant.1 = s32[] constant(0)
constant.2 = s32[] constant(1)
constant.3 = s32[] constant(2)
tuple.1 = (s32[], s32[]) tuple(constant.2, constant.3)
tuple.2 = (s32[], (s32[], s32[])) tuple(constant.1, tuple.1)
get-tuple-element.1 = (s32[], s32[]) get-tuple-element(tuple.2), index=1
ROOT get-tuple-element.2 = s32[] get-tuple-element(get-tuple-element.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.2"), {}));
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.1"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {0}));
EXPECT_FALSE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 0}));
EXPECT_FALSE(
liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1, 1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.2"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithDeadTupleElement) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.4 = s32[] get-tuple-element(while.0), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.4"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.0"), {}));
EXPECT_FALSE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileCondPropagatesLiveness) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
add_S32 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
SimpleLoop.body {
loop_var.1 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
constant.1 = s32[] constant(1)
add.0 = s32[] add(get-tuple-element.1, constant.1)
get-tuple-element.2 = s32[3]{0} get-tuple-element(loop_var.1), index=1
multiply.0 = s32[3]{0} multiply(get-tuple-element.2, get-tuple-element.2)
ROOT tuple.0 = (s32[], s32[3]{0}) tuple(add.0, multiply.0)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[3]{0}) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.2), index=0
get-tuple-element.4 = s32[3]{0} get-tuple-element(loop_var.2), index=1
zero = s32[] constant(0)
reduce = s32[] reduce(get-tuple-element.4, zero), dimensions={0}, to_apply=add_S32
add.1 = s32[] add(get-tuple-element.3, reduce)
constant.2 = s32[] constant(5)
ROOT less-than = pred[] compare(add.1, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
constant.4 = s32[3]{0} constant({0, 1, 2})
tuple.1 = (s32[], s32[3]{0}) tuple(constant.3, constant.4)
while.0 = (s32[], s32[3]{0}) while(tuple.1), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.5 = s32[] get-tuple-element(while.0), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.4"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.0"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "multiply.0"), {}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithLiveTupleElements) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule SimpleLoop
SimpleLoop.body {
loop_var.1 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.1 = s32[] get-tuple-element(loop_var.1), index=0
get-tuple-element.2 = s32[] get-tuple-element(loop_var.1), index=1
add.1 = s32[] add(get-tuple-element.1, get-tuple-element.2)
get-tuple-element.3 = s32[] get-tuple-element(loop_var.1), index=2
multiply.1 = s32[] multiply(get-tuple-element.3, get-tuple-element.3)
ROOT tuple.1 = (s32[], s32[], s32[]) tuple(add.1, get-tuple-element.3, multiply.1)
}
SimpleLoop.condition {
loop_var.2 = (s32[], s32[], s32[]) parameter(0)
get-tuple-element.4 = s32[] get-tuple-element(loop_var.2), index=0
constant.1 = s32[] constant(5)
ROOT less-than = pred[] compare(get-tuple-element.4, constant.1), direction=LT
}
ENTRY SimpleLoop {
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
constant.4 = s32[] constant(2)
tuple.2 = (s32[], s32[], s32[]) tuple(constant.2, constant.3, constant.4)
while.1 = (s32[], s32[], s32[]) while(tuple.2), condition=
SimpleLoop.condition, body=SimpleLoop.body
ROOT get-tuple-element.5 = s32[] get-tuple-element(while.1), index=0
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "get-tuple-element.5"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.1"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.2"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.1"), {2}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {0}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {1}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "loop_var.1"), {2}));
}
TEST_F(HloLivenessAnalysisTest, WhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
WhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
WhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=WhileCondition,
body=WhileBody
ROOT rtuple = () tuple()
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, NestedWhileWithOutfeed) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule OutfeedLoop
InnerWhileBody {
body_param = (s32[]) parameter(0)
token0 = token[] after-all()
constant.2 = s32[] constant(2)
outfeed_tuple = (s32[]) outfeed(constant.2, token0)
get-tuple-element.1 = s32[] get-tuple-element(body_param), index=0
constant.1 = s32[] constant(1)
add = s32[] add(get-tuple-element.1, constant.1)
ROOT tuple = (s32[]) tuple(add)
}
InnerWhileCondition {
cond_param = (s32[]) parameter(0)
get-tuple-element.3 = s32[] get-tuple-element(cond_param), index=0
constant.2 = s32[] constant(10)
ROOT less-than = pred[] compare(get-tuple-element.3, constant.2), direction=LT
}
OuterWhileCondition {
cond_param.2 = (s32[]) parameter(0)
get-tuple-element.5 = s32[] get-tuple-element(cond_param.2), index=0
constant.5 = s32[] constant(5)
ROOT less-than.2 = pred[] compare(get-tuple-element.5, constant.5), direction=LT
}
OuterWhileBody {
body_param.2 = (s32[]) parameter(0)
get-tuple-element.8 = s32[] get-tuple-element(body_param.2), index=0
constant.6 = s32[] constant(0)
tuple.2 = (s32[]) tuple(constant.6)
inner_while = (s32[]) while(tuple.2), condition=InnerWhileCondition,
body=InnerWhileBody
constant.7 = s32[] constant(1)
add.2 = s32[] add(get-tuple-element.8, constant.7)
ROOT rtuple = (s32[]) tuple(add.2)
}
ENTRY SimpleLoop {
constant.3 = s32[] constant(0)
tuple.1 = (s32[]) tuple(constant.3)
while = (s32[]) while(tuple.1), condition=OuterWhileCondition,
body=OuterWhileBody
ROOT rtuple = () tuple()
})")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "add.2"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "constant.3"), {}));
}
TEST_F(HloLivenessAnalysisTest, PropagateLivenessFromConditionalComputation) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule main.67
%region_0.10 (Arg_0.11: (s32[], s32[], f32[1024,3], s32[1])) -> (s32[], s32[], f32[1024,3], s32[1]) {
%Arg_0.11 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)
%get-tuple-element.17 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=0, metadata={op_name="while"}
%constant.13 = s32[] constant(1)
%add.25 = s32[] add(s32[] %get-tuple-element.17, s32[] %constant.13), metadata={op_name="while/add_1"}
%get-tuple-element.18 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=1, metadata={op_name="while"}
%add.22 = s32[] add(s32[] %get-tuple-element.18, s32[] %constant.13), metadata={op_name="while/add"}
%get-tuple-element.19 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=2, metadata={op_name="while"}
%constant.16 = f32[] constant(0)
%constant.15 = f32[] constant(1)
%rng.21 = f32[3]{0} rng(f32[] %constant.16, f32[] %constant.15), distribution=rng_uniform, metadata={op_name="while/random_uniform/RandomUniform"}
%reshape.23 = f32[1,3]{1,0} reshape(f32[3]{0} %rng.21), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"}
%constant.12 = s32[] constant(0)
%dynamic-update-slice.24 = f32[1024,3]{1,0} dynamic-update-slice(f32[1024,3]{1,0} %get-tuple-element.19, f32[1,3]{1,0} %reshape.23, s32[] %get-tuple-element.18, s32[] %constant.12), metadata={op_name="while/TensorArrayV2Write/TensorListSetItem"}
%get-tuple-element.20 = s32[1]{0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.11), index=3, metadata={op_name="while"}
ROOT %tuple.26 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %add.25, s32[] %add.22, f32[1024,3]{1,0} %dynamic-update-slice.24, s32[1]{0} %get-tuple-element.20), metadata={op_name="while"}
}
%region_1.27 (Arg_0.28: (s32[], s32[], f32[1024,3], s32[1])) -> pred[] {
%Arg_0.28 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) parameter(0)
%get-tuple-element.30 = s32[] get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %Arg_0.28), index=1, metadata={op_name="while"}
%constant.29 = s32[] constant(1024)
ROOT %compare.31 = pred[] compare(s32[] %get-tuple-element.30, s32[] %constant.29), direction=LT, metadata={op_name="while/Less"}
}
%region_2.42 (Arg_0.43: (f32[3,32,32,3], token[])) -> (pred[], token[]) {
%constant.44 = pred[] constant(true)
%Arg_0.43 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)
%get-tuple-element.52 = f32[3,32,32,3]{3,2,1,0} get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=0, metadata={op_name="image_sample/write_summary/summary_cond"}
%constant.49 = f32[] constant(255.5)
%broadcast.50 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.49), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"}
%multiply.53 = f32[3,32,32,3]{3,2,1,0} multiply(f32[3,32,32,3]{3,2,1,0} %get-tuple-element.52, f32[3,32,32,3]{3,2,1,0} %broadcast.50), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Mul"}
%constant.47 = f32[] constant(0)
%broadcast.48 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.47), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"}
%maximum.54 = f32[3,32,32,3]{3,2,1,0} maximum(f32[3,32,32,3]{3,2,1,0} %multiply.53, f32[3,32,32,3]{3,2,1,0} %broadcast.48), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Maximum"}
%constant.45 = f32[] constant(255)
%broadcast.46 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[] %constant.45), dimensions={}, metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"}
%minimum.55 = f32[3,32,32,3]{3,2,1,0} minimum(f32[3,32,32,3]{3,2,1,0} %maximum.54, f32[3,32,32,3]{3,2,1,0} %broadcast.46), metadata={op_name="image_sample/write_summary/summary_cond/convert_image/Minimum"}
%convert.56 = u8[3,32,32,3]{3,2,1,0} convert(f32[3,32,32,3]{3,2,1,0} %minimum.55), metadata={op_name="image_sample/write_summary/summary_cond/convert_image"}
%get-tuple-element.51 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.43), index=1, metadata={op_name="image_sample/write_summary/summary_cond"}
%send.57 = (u8[3,32,32,3]{3,2,1,0}, u32[], token[]) send(u8[3,32,32,3]{3,2,1,0} %convert.56, token[] %get-tuple-element.51), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"}
%send-done.58 = token[] send-done((u8[3,32,32,3]{3,2,1,0}, u32[], token[]) %send.57), channel_id=2, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="host_compute_channel_0_args_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond/encode_each_image/TensorArrayUnstack/TensorListFromTensor"}
ROOT %tuple.59 = (pred[], token[]) tuple(pred[] %constant.44, token[] %send-done.58), metadata={op_name="image_sample/write_summary/summary_cond"}
}
%region_3.60 (Arg_0.61: (f32[3,32,32,3], token[])) -> (pred[], token[]) {
%constant.62 = pred[] constant(false)
%Arg_0.61 = (f32[3,32,32,3]{3,2,1,0}, token[]) parameter(0)
%get-tuple-element.63 = token[] get-tuple-element((f32[3,32,32,3]{3,2,1,0}, token[]) %Arg_0.61), index=1, metadata={op_name="image_sample/write_summary/summary_cond"}
ROOT %tuple.64 = (pred[], token[]) tuple(pred[] %constant.62, token[] %get-tuple-element.63), metadata={op_name="image_sample/write_summary/summary_cond"}
}
ENTRY %main.67 (arg_tuple.1: (s32[])) -> () {
%arg_tuple.1 = (s32[]{:T(256)}) parameter(0)
%get-tuple-element.2 = s32[]{:T(256)} get-tuple-element((s32[]{:T(256)}) %arg_tuple.1), index=0
%constant.3 = s32[] constant(0)
%compare.8 = pred[]{:T(256)} compare(s32[]{:T(256)} %get-tuple-element.2, s32[] %constant.3), direction=EQ, metadata={op_name="image_sample/write_summary/Equal"}
%constant.5 = f32[] constant(0)
%broadcast.6 = f32[1024,3]{1,0} broadcast(f32[] %constant.5), dimensions={}, metadata={op_name="tokens_accumulator"}
%constant.4 = s32[1]{0} constant({1024})
%tuple.9 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) tuple(s32[] %constant.3, s32[] %constant.3, f32[1024,3]{1,0} %broadcast.6, s32[1]{0} %constant.4), metadata={op_name="while"}
%while.32 = (s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) while((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %tuple.9), condition=%region_1.27, body=%region_0.10, metadata={op_name="while"}
%get-tuple-element.33 = f32[1024,3]{1,0} get-tuple-element((s32[], s32[], f32[1024,3]{1,0}, s32[1]{0}) %while.32), index=2, metadata={op_name="while"}
%transpose.34 = f32[3,1024]{0,1} transpose(f32[1024,3]{1,0} %get-tuple-element.33), dimensions={1,0}, metadata={op_name="transpose.transpose/perm"}
%reshape.35 = f32[3,32,32,1]{3,2,1,0} reshape(f32[3,1024]{0,1} %transpose.34), metadata={op_name="Reshape"}
%broadcast.36 = f32[3,32,32,1]{3,2,1,0} broadcast(f32[3,32,32,1]{3,2,1,0} %reshape.35), dimensions={0,1,2,3}, metadata={op_name="Tile"}
%reshape.37 = f32[3,32,32]{2,1,0} reshape(f32[3,32,32,1]{3,2,1,0} %broadcast.36), metadata={op_name="Tile"}
%broadcast.38 = f32[3,32,32,3]{3,2,1,0} broadcast(f32[3,32,32]{2,1,0} %reshape.37), dimensions={0,1,2}, metadata={op_name="Tile"}
%after-all.7 = token[] after-all(), metadata={op_name="image_sample/write_summary/summary_cond"}
%send.39 = (pred[]{:T(256)}, u32[], token[]) send(pred[]{:T(256)} %compare.8, token[] %after-all.7), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"}
%send-done.40 = token[] send-done((pred[]{:T(256)}, u32[], token[]) %send.39), channel_id=1, is_host_transfer=true, frontend_attributes={_xla_host_transfer_rendezvous="if_predicate_channel_1_dtoh_0"}, metadata={op_name="image_sample/write_summary/summary_cond"}
%tuple.41 = (f32[3,32,32,3]{3,2,1,0}, token[]) tuple(f32[3,32,32,3]{3,2,1,0} %broadcast.38, token[] %send-done.40), metadata={op_name="image_sample/write_summary/summary_cond"}
%conditional.65 = (pred[], token[]) conditional(pred[]{:T(256)} %compare.8, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41, (f32[3,32,32,3]{3,2,1,0}, token[]) %tuple.41), true_computation=%region_2.42, false_computation=%region_3.60, metadata={op_name="image_sample/write_summary/summary_cond"}
ROOT %tuple.66 = () tuple()
}
)")
.value();
const HloLivenessAnalysis& liveness = RunLiveness(module.get());
EXPECT_TRUE(
liveness.IsLive(GetInstruction(module.get(), "conditional.65"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "tuple.41"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "get-tuple-element.33"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "while.32"), {}));
EXPECT_TRUE(liveness.IsLive(
GetInstruction(module.get(), "dynamic-update-slice.24"), {}));
EXPECT_TRUE(liveness.IsLive(GetInstruction(module.get(), "send.57"), {}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_liveness_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0ededd8e-de7b-4c1d-bda1-bcfd77a634ba | cpp | google/quiche | crypto_handshake_message | quiche/quic/core/crypto/crypto_handshake_message.cc | quiche/quic/core/crypto/crypto_handshake_message_test.cc | #include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include <memory>
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/crypto_framer.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/crypto/crypto_utils.h"
#include "quiche/quic/core/quic_socket_address_coder.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
CryptoHandshakeMessage::CryptoHandshakeMessage() : tag_(0), minimum_size_(0) {}
CryptoHandshakeMessage::CryptoHandshakeMessage(
const CryptoHandshakeMessage& other)
: tag_(other.tag_),
tag_value_map_(other.tag_value_map_),
minimum_size_(other.minimum_size_) {
}
CryptoHandshakeMessage::CryptoHandshakeMessage(CryptoHandshakeMessage&& other) =
default;
CryptoHandshakeMessage::~CryptoHandshakeMessage() {}
CryptoHandshakeMessage& CryptoHandshakeMessage::operator=(
const CryptoHandshakeMessage& other) {
tag_ = other.tag_;
tag_value_map_ = other.tag_value_map_;
serialized_.reset();
minimum_size_ = other.minimum_size_;
return *this;
}
CryptoHandshakeMessage& CryptoHandshakeMessage::operator=(
CryptoHandshakeMessage&& other) = default;
bool CryptoHandshakeMessage::operator==(
const CryptoHandshakeMessage& rhs) const {
return tag_ == rhs.tag_ && tag_value_map_ == rhs.tag_value_map_ &&
minimum_size_ == rhs.minimum_size_;
}
bool CryptoHandshakeMessage::operator!=(
const CryptoHandshakeMessage& rhs) const {
return !(*this == rhs);
}
void CryptoHandshakeMessage::Clear() {
tag_ = 0;
tag_value_map_.clear();
minimum_size_ = 0;
serialized_.reset();
}
const QuicData& CryptoHandshakeMessage::GetSerialized() const {
if (!serialized_) {
serialized_ = CryptoFramer::ConstructHandshakeMessage(*this);
}
return *serialized_;
}
void CryptoHandshakeMessage::MarkDirty() { serialized_.reset(); }
void CryptoHandshakeMessage::SetVersionVector(
QuicTag tag, ParsedQuicVersionVector versions) {
QuicVersionLabelVector version_labels;
for (const ParsedQuicVersion& version : versions) {
version_labels.push_back(
quiche::QuicheEndian::HostToNet32(CreateQuicVersionLabel(version)));
}
SetVector(tag, version_labels);
}
void CryptoHandshakeMessage::SetVersion(QuicTag tag,
ParsedQuicVersion version) {
SetValue(tag,
quiche::QuicheEndian::HostToNet32(CreateQuicVersionLabel(version)));
}
void CryptoHandshakeMessage::SetStringPiece(QuicTag tag,
absl::string_view value) {
tag_value_map_[tag] = std::string(value);
}
void CryptoHandshakeMessage::Erase(QuicTag tag) { tag_value_map_.erase(tag); }
QuicErrorCode CryptoHandshakeMessage::GetTaglist(
QuicTag tag, QuicTagVector* out_tags) const {
auto it = tag_value_map_.find(tag);
QuicErrorCode ret = QUIC_NO_ERROR;
if (it == tag_value_map_.end()) {
ret = QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
} else if (it->second.size() % sizeof(QuicTag) != 0) {
ret = QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
if (ret != QUIC_NO_ERROR) {
out_tags->clear();
return ret;
}
size_t num_tags = it->second.size() / sizeof(QuicTag);
out_tags->resize(num_tags);
for (size_t i = 0; i < num_tags; ++i) {
memcpy(&(*out_tags)[i], it->second.data() + i * sizeof(tag), sizeof(tag));
}
return ret;
}
QuicErrorCode CryptoHandshakeMessage::GetVersionLabelList(
QuicTag tag, QuicVersionLabelVector* out) const {
QuicErrorCode error = GetTaglist(tag, out);
if (error != QUIC_NO_ERROR) {
return error;
}
for (size_t i = 0; i < out->size(); ++i) {
(*out)[i] = quiche::QuicheEndian::HostToNet32((*out)[i]);
}
return QUIC_NO_ERROR;
}
QuicErrorCode CryptoHandshakeMessage::GetVersionLabel(
QuicTag tag, QuicVersionLabel* out) const {
QuicErrorCode error = GetUint32(tag, out);
if (error != QUIC_NO_ERROR) {
return error;
}
*out = quiche::QuicheEndian::HostToNet32(*out);
return QUIC_NO_ERROR;
}
bool CryptoHandshakeMessage::GetStringPiece(QuicTag tag,
absl::string_view* out) const {
auto it = tag_value_map_.find(tag);
if (it == tag_value_map_.end()) {
return false;
}
*out = it->second;
return true;
}
bool CryptoHandshakeMessage::HasStringPiece(QuicTag tag) const {
return tag_value_map_.find(tag) != tag_value_map_.end();
}
QuicErrorCode CryptoHandshakeMessage::GetNthValue24(
QuicTag tag, unsigned index, absl::string_view* out) const {
absl::string_view value;
if (!GetStringPiece(tag, &value)) {
return QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
}
for (unsigned i = 0;; i++) {
if (value.empty()) {
return QUIC_CRYPTO_MESSAGE_INDEX_NOT_FOUND;
}
if (value.size() < 3) {
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
const unsigned char* data =
reinterpret_cast<const unsigned char*>(value.data());
size_t size = static_cast<size_t>(data[0]) |
(static_cast<size_t>(data[1]) << 8) |
(static_cast<size_t>(data[2]) << 16);
value.remove_prefix(3);
if (value.size() < size) {
return QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
if (i == index) {
*out = absl::string_view(value.data(), size);
return QUIC_NO_ERROR;
}
value.remove_prefix(size);
}
}
QuicErrorCode CryptoHandshakeMessage::GetUint32(QuicTag tag,
uint32_t* out) const {
return GetPOD(tag, out, sizeof(uint32_t));
}
QuicErrorCode CryptoHandshakeMessage::GetUint64(QuicTag tag,
uint64_t* out) const {
return GetPOD(tag, out, sizeof(uint64_t));
}
QuicErrorCode CryptoHandshakeMessage::GetStatelessResetToken(
QuicTag tag, StatelessResetToken* out) const {
return GetPOD(tag, out, kStatelessResetTokenLength);
}
size_t CryptoHandshakeMessage::size() const {
size_t ret = sizeof(QuicTag) + sizeof(uint16_t) +
sizeof(uint16_t) ;
ret += (sizeof(QuicTag) + sizeof(uint32_t) ) *
tag_value_map_.size();
for (auto i = tag_value_map_.begin(); i != tag_value_map_.end(); ++i) {
ret += i->second.size();
}
return ret;
}
void CryptoHandshakeMessage::set_minimum_size(size_t min_bytes) {
if (min_bytes == minimum_size_) {
return;
}
serialized_.reset();
minimum_size_ = min_bytes;
}
size_t CryptoHandshakeMessage::minimum_size() const { return minimum_size_; }
std::string CryptoHandshakeMessage::DebugString() const {
return DebugStringInternal(0);
}
QuicErrorCode CryptoHandshakeMessage::GetPOD(QuicTag tag, void* out,
size_t len) const {
auto it = tag_value_map_.find(tag);
QuicErrorCode ret = QUIC_NO_ERROR;
if (it == tag_value_map_.end()) {
ret = QUIC_CRYPTO_MESSAGE_PARAMETER_NOT_FOUND;
} else if (it->second.size() != len) {
ret = QUIC_INVALID_CRYPTO_MESSAGE_PARAMETER;
}
if (ret != QUIC_NO_ERROR) {
memset(out, 0, len);
return ret;
}
memcpy(out, it->second.data(), len);
return ret;
}
std::string CryptoHandshakeMessage::DebugStringInternal(size_t indent) const {
std::string ret =
std::string(2 * indent, ' ') + QuicTagToString(tag_) + "<\n";
++indent;
for (auto it = tag_value_map_.begin(); it != tag_value_map_.end(); ++it) {
ret += std::string(2 * indent, ' ') + QuicTagToString(it->first) + ": ";
bool done = false;
switch (it->first) {
case kICSL:
case kCFCW:
case kSFCW:
case kIRTT:
case kMIUS:
case kMIBS:
case kTCID:
case kMAD:
if (it->second.size() == 4) {
uint32_t value;
memcpy(&value, it->second.data(), sizeof(value));
absl::StrAppend(&ret, value);
done = true;
}
break;
case kKEXS:
case kAEAD:
case kCOPT:
case kPDMD:
case kVER:
if (it->second.size() % sizeof(QuicTag) == 0) {
for (size_t j = 0; j < it->second.size(); j += sizeof(QuicTag)) {
QuicTag tag;
memcpy(&tag, it->second.data() + j, sizeof(tag));
if (j > 0) {
ret += ",";
}
ret += "'" + QuicTagToString(tag) + "'";
}
done = true;
}
break;
case kRREJ:
if (it->second.size() % sizeof(uint32_t) == 0) {
for (size_t j = 0; j < it->second.size(); j += sizeof(uint32_t)) {
uint32_t value;
memcpy(&value, it->second.data() + j, sizeof(value));
if (j > 0) {
ret += ",";
}
ret += CryptoUtils::HandshakeFailureReasonToString(
static_cast<HandshakeFailureReason>(value));
}
done = true;
}
break;
case kCADR:
if (!it->second.empty()) {
QuicSocketAddressCoder decoder;
if (decoder.Decode(it->second.data(), it->second.size())) {
ret += QuicSocketAddress(decoder.ip(), decoder.port()).ToString();
done = true;
}
}
break;
case kSCFG:
if (!it->second.empty()) {
std::unique_ptr<CryptoHandshakeMessage> msg(
CryptoFramer::ParseMessage(it->second));
if (msg) {
ret += "\n";
ret += msg->DebugStringInternal(indent + 1);
done = true;
}
}
break;
case kPAD:
ret += absl::StrFormat("(%d bytes of padding)", it->second.size());
done = true;
break;
case kSNI:
case kUAID:
ret += "\"" + it->second + "\"";
done = true;
break;
}
if (!done) {
ret += "0x" + absl::BytesToHexString(it->second);
}
ret += "\n";
}
--indent;
ret += std::string(2 * indent, ' ') + ">";
return ret;
}
} | #include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include <utility>
#include <vector>
#include "quiche/quic/core/crypto/crypto_handshake.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
namespace test {
namespace {
TEST(CryptoHandshakeMessageTest, DebugString) {
const char* str = "SHLO<\n>";
CryptoHandshakeMessage message;
message.set_tag(kSHLO);
EXPECT_EQ(str, message.DebugString());
CryptoHandshakeMessage message2(message);
EXPECT_EQ(str, message2.DebugString());
CryptoHandshakeMessage message3(std::move(message));
EXPECT_EQ(str, message3.DebugString());
CryptoHandshakeMessage message4 = message3;
EXPECT_EQ(str, message4.DebugString());
CryptoHandshakeMessage message5 = std::move(message3);
EXPECT_EQ(str, message5.DebugString());
}
TEST(CryptoHandshakeMessageTest, DebugStringWithUintVector) {
const char* str =
"REJ <\n RREJ: "
"SOURCE_ADDRESS_TOKEN_DIFFERENT_IP_ADDRESS_FAILURE,"
"CLIENT_NONCE_NOT_UNIQUE_FAILURE\n>";
CryptoHandshakeMessage message;
message.set_tag(kREJ);
std::vector<uint32_t> reasons = {
SOURCE_ADDRESS_TOKEN_DIFFERENT_IP_ADDRESS_FAILURE,
CLIENT_NONCE_NOT_UNIQUE_FAILURE};
message.SetVector(kRREJ, reasons);
EXPECT_EQ(str, message.DebugString());
CryptoHandshakeMessage message2(message);
EXPECT_EQ(str, message2.DebugString());
CryptoHandshakeMessage message3(std::move(message));
EXPECT_EQ(str, message3.DebugString());
CryptoHandshakeMessage message4 = message3;
EXPECT_EQ(str, message4.DebugString());
CryptoHandshakeMessage message5 = std::move(message3);
EXPECT_EQ(str, message5.DebugString());
}
TEST(CryptoHandshakeMessageTest, DebugStringWithTagVector) {
const char* str = "CHLO<\n COPT: 'TBBR','PAD ','BYTE'\n>";
CryptoHandshakeMessage message;
message.set_tag(kCHLO);
message.SetVector(kCOPT, QuicTagVector{kTBBR, kPAD, kBYTE});
EXPECT_EQ(str, message.DebugString());
CryptoHandshakeMessage message2(message);
EXPECT_EQ(str, message2.DebugString());
CryptoHandshakeMessage message3(std::move(message));
EXPECT_EQ(str, message3.DebugString());
CryptoHandshakeMessage message4 = message3;
EXPECT_EQ(str, message4.DebugString());
CryptoHandshakeMessage message5 = std::move(message3);
EXPECT_EQ(str, message5.DebugString());
}
TEST(CryptoHandshakeMessageTest, HasStringPiece) {
CryptoHandshakeMessage message;
EXPECT_FALSE(message.HasStringPiece(kALPN));
message.SetStringPiece(kALPN, "foo");
EXPECT_TRUE(message.HasStringPiece(kALPN));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/crypto_handshake_message.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/crypto_handshake_message_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
3d4c0666-2f4c-4f65-b796-b910e48f5143 | cpp | tensorflow/tensorflow | fp_util | third_party/xla/xla/fp_util.h | third_party/xla/xla/fp_util_test.cc | #ifndef XLA_FP_UTIL_H_
#define XLA_FP_UTIL_H_
#include <algorithm>
#define _USE_MATH_DEFINES
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <limits>
#include <optional>
#include <utility>
#include "xla/types.h"
#include "xla/util.h"
namespace xla {
template <typename T>
constexpr bool IsZero(T x) {
return x == static_cast<T>(0.0f);
}
template <typename T>
constexpr bool IsSignMinus(T x) {
return x < 0;
}
template <typename T>
constexpr T Abs(T x) {
if (IsZero(x)) {
return x + static_cast<T>(0.0f);
}
return IsSignMinus(x) ? -x : x;
}
template <typename T>
constexpr bool IsNaN(T x) {
return x != x;
}
template <typename T>
constexpr bool IsInfinite(T x) {
return x == std::numeric_limits<T>::infinity() ||
x == -std::numeric_limits<T>::infinity();
}
template <typename T>
constexpr bool IsFinite(T x) {
return !IsNaN(x) && !IsInfinite(x);
}
template <typename T>
constexpr bool IsNormal(T x) {
T abs_x = Abs(x);
return abs_x >= std::numeric_limits<T>::min() &&
abs_x <= std::numeric_limits<T>::max();
}
template <typename T>
constexpr bool IsSubnormal(T x) {
T abs_x = Abs(x);
return abs_x > static_cast<T>(0) && abs_x < std::numeric_limits<T>::min();
}
template <typename T>
constexpr T ScaleBase(T x, int n) {
static_assert(is_specialized_floating_point_v<T>);
while (n > 0 && IsFinite(x) && !IsZero(x)) {
int multiplier_exponent =
std::min(n, std::numeric_limits<T>::max_exponent - 1);
x *= IPow(static_cast<T>(std::numeric_limits<T>::radix),
multiplier_exponent);
n -= multiplier_exponent;
}
for (; n < 0 && IsFinite(x) && !IsZero(x); ++n) {
T shifted_x = x / std::numeric_limits<T>::radix;
if (IsSubnormal(shifted_x)) {
int scale_exponent = -((std::numeric_limits<T>::min_exponent - 1) -
(std::numeric_limits<T>::digits - 1)) +
n;
if (scale_exponent < 0) {
return x * static_cast<T>(0);
}
return x *
ScaleBase(std::numeric_limits<T>::denorm_min(), scale_exponent);
}
x = shifted_x;
}
return x;
}
template <typename T>
constexpr std::optional<int> LogBase(T x) {
if (IsNaN(x)) {
return std::nullopt;
}
if (IsInfinite(x)) {
return std::numeric_limits<int>::max();
}
if (IsZero(x)) {
return std::numeric_limits<int>::min();
}
T abs_x = Abs(x);
int exponent = 0;
while (abs_x < static_cast<T>(1)) {
abs_x *= std::numeric_limits<T>::radix;
exponent -= 1;
}
while (abs_x >= std::numeric_limits<T>::radix) {
abs_x /= std::numeric_limits<T>::radix;
exponent += 1;
}
return exponent;
}
enum class RoundingDirection {
kRoundTiesToEven,
kRoundTowardsZero,
};
template <typename DstT, typename SrcT>
constexpr std::pair<DstT, DstT> SplitToFpPair(
SrcT to_split, int num_high_trailing_zeros,
RoundingDirection rounding_direction =
RoundingDirection::kRoundTiesToEven) {
constexpr auto kError =
std::make_pair(std::numeric_limits<DstT>::quiet_NaN(),
std::numeric_limits<DstT>::quiet_NaN());
if (num_high_trailing_zeros < 0) {
return kError;
}
if (!IsFinite(to_split)) {
return kError;
}
if (IsZero(to_split)) {
DstT zero = static_cast<DstT>(to_split);
return std::make_pair(zero, zero);
}
if (IsSignMinus(to_split)) {
auto [high, low] =
SplitToFpPair<DstT, SrcT>(Abs(to_split), num_high_trailing_zeros);
return std::make_pair(-high, -low);
}
auto maybe_exponent = LogBase(to_split);
if (!maybe_exponent.has_value()) {
return kError;
}
int exponent = *maybe_exponent;
constexpr int kMinNormalExponent =
std::numeric_limits<DstT>::min_exponent - 1;
const int effective_precision = std::numeric_limits<DstT>::digits -
std::max(kMinNormalExponent - exponent, 0);
const int high_bits_to_keep = effective_precision - num_high_trailing_zeros;
if (high_bits_to_keep < 1) {
return kError;
}
static_assert(std::numeric_limits<SrcT>::max_exponent - 1 >=
std::numeric_limits<DstT>::digits);
SrcT scaled_significand =
ScaleBase(to_split, high_bits_to_keep - (exponent + 1));
uint64_t integer_part = static_cast<uint64_t>(scaled_significand);
SrcT fractional_part = scaled_significand - static_cast<SrcT>(integer_part);
switch (rounding_direction) {
case RoundingDirection::kRoundTiesToEven: {
if (fractional_part > static_cast<SrcT>(0.5f) ||
(fractional_part == static_cast<SrcT>(0.5f) &&
integer_part % 2 == 1)) {
integer_part += 1;
}
break;
}
case RoundingDirection::kRoundTowardsZero: {
break;
}
}
SrcT rounded = ScaleBase(static_cast<SrcT>(integer_part),
(exponent + 1) - high_bits_to_keep);
DstT high = static_cast<DstT>(rounded);
if (static_cast<SrcT>(high) != rounded) {
return kError;
}
DstT low = static_cast<DstT>(to_split - double{high});
return std::make_pair(high, low);
}
template <typename DstT, typename SrcT>
constexpr DstT RoundToPrecision(
SrcT to_round, int precision = std::numeric_limits<DstT>::digits,
RoundingDirection rounding_direction =
RoundingDirection::kRoundTiesToEven) {
auto [high, low] = SplitToFpPair<DstT, SrcT>(
to_round,
std::numeric_limits<DstT>::digits - precision,
rounding_direction);
return high;
}
template <typename DstT>
constexpr std::pair<DstT, DstT> Log2FloatPair(int num_high_trailing_zeros) {
return SplitToFpPair<DstT>(M_LN2, num_high_trailing_zeros);
}
template <typename T>
constexpr T GoldbergUlp(T x) {
if (IsZero(x) || IsSubnormal(x)) {
return GoldbergUlp(std::numeric_limits<T>::min());
}
std::optional<int> maybe_exponent = LogBase(x);
if (maybe_exponent.has_value(); const int exponent = *maybe_exponent) {
return ScaleBase(std::numeric_limits<T>::epsilon(), exponent);
}
if constexpr (std::numeric_limits<T>::has_quiet_NaN) {
return std::numeric_limits<T>::quiet_NaN();
} else if constexpr (std::numeric_limits<T>::has_infinity) {
return std::numeric_limits<T>::infinity();
} else {
return GoldbergUlp(std::numeric_limits<T>::max());
}
}
template <typename T>
int64_t CalculateDistanceInFloats(T a, T b) {
auto a_sign_and_magnitude = SignAndMagnitude(a);
auto b_sign_and_magnitude = SignAndMagnitude(b);
uint64_t a_distance_from_zero = a_sign_and_magnitude.first
? -a_sign_and_magnitude.second
: a_sign_and_magnitude.second;
uint64_t b_distance_from_zero = b_sign_and_magnitude.first
? -b_sign_and_magnitude.second
: b_sign_and_magnitude.second;
int64_t signed_distance = a_distance_from_zero - b_distance_from_zero;
return std::abs(signed_distance);
}
}
#endif | #include "xla/fp_util.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
#include "absl/numeric/bits.h"
#include "xla/bit_cast.h"
#include "xla/test.h"
#include "xla/util.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
class FixedValueTest : public testing::TestWithParam<double> {};
TEST_P(FixedValueTest, DropBits) {
double input = GetParam();
int exponent = std::ilogb(input);
constexpr int kMinNormalExponent =
std::numeric_limits<float>::min_exponent - 1;
int normalization_loss =
std::isnormal(input) ? std::max(kMinNormalExponent - exponent, 0) : 0;
int max_precision = std::numeric_limits<float>::digits - normalization_loss;
for (int i = 0; i < max_precision; ++i) {
auto result = SplitToFpPair<float>(input,
i);
auto [high_float, low_float] = result;
if (!std::isfinite(input)) {
EXPECT_TRUE(std::isnan(high_float));
EXPECT_TRUE(std::isnan(low_float));
continue;
}
EXPECT_FALSE(std::isnan(high_float));
EXPECT_FALSE(std::isnan(low_float));
EXPECT_GE(absl::countr_zero(absl::bit_cast<uint32_t>(high_float)), i);
double sum = double{high_float} + double{low_float};
if (input == 0.0) {
EXPECT_EQ(high_float, 0.0f);
EXPECT_EQ(low_float, 0.0f);
} else {
EXPECT_LT(std::fabs(input - double{high_float}),
std::scalbn(input, -(max_precision - i)));
if (std::abs(input) >= std::numeric_limits<float>::min()) {
EXPECT_LT(std::fabs(input - sum),
std::scalbn(std::fabs(input), -(2 * max_precision + 1 - i)));
}
}
if (i == 0) {
EXPECT_EQ(high_float + low_float, high_float);
}
if (input == high_float) {
EXPECT_EQ(low_float, 0.0f);
} else {
EXPECT_GT(std::fabs(high_float),
std::scalbn(low_float, max_precision - i))
<< "input: " << RoundTripFpToString(input)
<< " high_float: " << RoundTripFpToString(high_float)
<< " low_float: " << RoundTripFpToString(low_float);
auto no_op_split = SplitToFpPair<float>(high_float,
i);
EXPECT_EQ(no_op_split.first, high_float);
EXPECT_EQ(no_op_split.second, 0.0f);
}
if (input != sum) {
EXPECT_LT(absl::countr_zero(absl::bit_cast<uint64_t>(input)),
std::numeric_limits<double>::digits - (2 * max_precision + 1))
<< "input: " << RoundTripFpToString(input)
<< " high_float: " << RoundTripFpToString(high_float)
<< " low_float: " << RoundTripFpToString(low_float);
}
}
}
INSTANTIATE_TEST_SUITE_P(
SinglePrecisionInputs, FixedValueTest,
testing::Values(+0.0f, -0.0f, 1.0f, static_cast<float>(M_PI),
static_cast<float>(M_1_PI), static_cast<float>(M_E),
static_cast<float>(M_LN2), static_cast<float>(M_LOG2E),
static_cast<float>(M_SQRT2), static_cast<float>(M_SQRT1_2),
static_cast<float>(M_2_SQRTPI), 0x1.555554p+1f,
0x1.aaaaaap+1f, 0x1.fffffcp-127f,
std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::quiet_NaN()));
INSTANTIATE_TEST_SUITE_P(DoublePrecisionInputs, FixedValueTest,
testing::Values(+0.0, -0.0, 1.0, M_PI, M_1_PI, M_E,
M_LN2, M_LOG2E, M_SQRT2, M_SQRT1_2,
M_2_SQRTPI, 0x1.5555555555555p+1,
0x1.aaaaaaaaaaaaap+1,
0x1.fffffffffffffp-127,
0x1.aaaaaaaaaaaaap-127));
template <typename T>
class FP8E4M3DistanceTest : public ::testing::Test {};
using F8E4M3Types = ::testing::Types<tsl::float8_e4m3, tsl::float8_e4m3fn>;
TYPED_TEST_SUITE(FP8E4M3DistanceTest, F8E4M3Types);
TEST(FPDistanceTest, F8E3M4Distance) {
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(tsl::float8_e3m4(8.0),
tsl::float8_e3m4(8.0)),
0);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(tsl::float8_e3m4(8.0),
tsl::float8_e3m4(15.5)),
15);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(tsl::float8_e3m4(8.0),
tsl::float8_e3m4(6)),
8);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
std::numeric_limits<tsl::float8_e3m4>::denorm_min(),
tsl::float8_e3m4(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::denorm_min(),
tsl::float8_e3m4(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::denorm_min(),
std::numeric_limits<tsl::float8_e3m4>::denorm_min()),
2);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e3m4>(
std::numeric_limits<tsl::float8_e3m4>::min(), tsl::float8_e3m4(0)),
16);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::min(), tsl::float8_e3m4(0)),
16);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e3m4>(
-std::numeric_limits<tsl::float8_e3m4>::min(),
std::numeric_limits<tsl::float8_e3m4>::min()),
32);
}
TYPED_TEST(FP8E4M3DistanceTest, F8E4M3Distance) {
EXPECT_EQ(
CalculateDistanceInFloats<TypeParam>(TypeParam(8.0), TypeParam(8.0)), 0);
EXPECT_EQ(
CalculateDistanceInFloats<TypeParam>(TypeParam(8.0), TypeParam(15.0)), 7);
EXPECT_EQ(
CalculateDistanceInFloats<TypeParam>(TypeParam(8.0), TypeParam(6.0)), 4);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
std::numeric_limits<TypeParam>::denorm_min(), TypeParam(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::denorm_min(), TypeParam(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::denorm_min(),
std::numeric_limits<TypeParam>::denorm_min()),
2);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
std::numeric_limits<TypeParam>::min(), TypeParam(0)),
8);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::min(), TypeParam(0)),
8);
EXPECT_EQ(CalculateDistanceInFloats<TypeParam>(
-std::numeric_limits<TypeParam>::min(),
std::numeric_limits<TypeParam>::min()),
16);
}
TEST(FPDistanceTest, F8E5M2Distance) {
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(tsl::float8_e5m2(8.0),
tsl::float8_e5m2(8.0)),
0);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(tsl::float8_e5m2(8.0),
tsl::float8_e5m2(14)),
3);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(tsl::float8_e5m2(8.0),
tsl::float8_e5m2(6)),
2);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
std::numeric_limits<tsl::float8_e5m2>::denorm_min(),
tsl::float8_e5m2(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::denorm_min(),
tsl::float8_e5m2(0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::denorm_min(),
std::numeric_limits<tsl::float8_e5m2>::denorm_min()),
2);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e5m2>(
std::numeric_limits<tsl::float8_e5m2>::min(), tsl::float8_e5m2(0)),
4);
EXPECT_EQ(
CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::min(), tsl::float8_e5m2(0)),
4);
EXPECT_EQ(CalculateDistanceInFloats<tsl::float8_e5m2>(
-std::numeric_limits<tsl::float8_e5m2>::min(),
std::numeric_limits<tsl::float8_e5m2>::min()),
8);
}
TEST(FPDistanceTest, F64Distance) {
EXPECT_EQ(CalculateDistanceInFloats<double>(8.0, 8.0), 0);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::denorm_min(),
std::nextafter(std::numeric_limits<double>::denorm_min(), 1.0)),
1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::min(),
std::numeric_limits<double>::denorm_min()),
(1ULL << 52) - 1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::denorm_min(), 0.0),
1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
-std::numeric_limits<double>::denorm_min(), 0.0),
1);
EXPECT_EQ(CalculateDistanceInFloats<double>(
-std::numeric_limits<double>::denorm_min(),
std::numeric_limits<double>::denorm_min()),
2);
EXPECT_EQ(CalculateDistanceInFloats<double>(
std::numeric_limits<double>::min(), 0.0),
1ULL << 52);
EXPECT_EQ(CalculateDistanceInFloats<double>(
-std::numeric_limits<double>::min(), 0.0),
1ULL << 52);
EXPECT_EQ(
CalculateDistanceInFloats<double>(-std::numeric_limits<double>::min(),
std::numeric_limits<double>::min()),
2 * (1ULL << 52));
EXPECT_EQ(
CalculateDistanceInFloats<double>(BitCast<double>(0x7fffffffffffffff),
BitCast<double>(0xffffffffffffffff)),
2);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/fp_util.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/fp_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cee18ae8-9b62-4adf-9023-605307ccd725 | cpp | google/tensorstore | type_traits | tensorstore/internal/type_traits.h | tensorstore/internal/type_traits_test.cc | #ifndef TENSORSTORE_INTERNAL_TYPE_TRAITS_H_
#define TENSORSTORE_INTERNAL_TYPE_TRAITS_H_
#include <cstddef>
#include <initializer_list>
#include <iosfwd>
#include <type_traits>
#include <utility>
#if defined(__has_builtin)
#if __has_builtin(__type_pack_element)
#define TENSORSTORE_HAS_TYPE_PACK_ELEMENT
#endif
#endif
#ifndef TENSORSTORE_HAS_TYPE_PACK_ELEMENT
#include <tuple>
#endif
#include "absl/meta/type_traits.h"
#include "tensorstore/index.h"
namespace tensorstore {
namespace internal {
struct not_detected {
~not_detected() = delete;
not_detected(not_detected const&) = delete;
void operator=(not_detected const&) = delete;
};
template <class AlwaysVoid, template <class...> class Op, class... Args>
struct detector_impl {
using value_t = std::false_type;
using type = not_detected;
};
template <template <class...> class Op, class... Args>
struct detector_impl<std::void_t<Op<Args...>>, Op, Args...> {
using value_t = std::true_type;
using type = Op<Args...>;
};
template <template <class...> class Op, class... Args>
using is_detected = typename detector_impl<void, Op, Args...>::value_t;
template <template <class...> class Op, class... Args>
using detected_t = typename detector_impl<void, Op, Args...>::type;
template <typename T, typename U = T, typename = void>
constexpr inline bool IsEqualityComparable = false;
template <typename T, typename U>
constexpr inline bool IsEqualityComparable<
T, U,
std::enable_if_t<std::is_convertible_v<
decltype(std::declval<T>() == std::declval<U>()), bool>>> = true;
template <typename To, typename, typename... From>
constexpr inline bool IsPackConvertibleWithoutNarrowingHelper = false;
template <typename To, typename... From>
constexpr inline bool IsPackConvertibleWithoutNarrowingHelper<
To,
std::void_t<decltype(std::initializer_list<To>{std::declval<From>()...})>,
From...> = true;
template <typename Source, typename Target>
constexpr inline bool IsOnlyExplicitlyConvertible =
(std::is_constructible_v<Target, Source> &&
!std::is_convertible_v<Source, Target>);
template <typename To, typename... From>
constexpr inline bool IsPackConvertibleWithoutNarrowing =
IsPackConvertibleWithoutNarrowingHelper<To, void, From...>;
template <typename... IndexType>
constexpr inline bool IsIndexPack =
IsPackConvertibleWithoutNarrowing<Index, IndexType...>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairImplicitlyConvertible =
std::is_convertible_v<ASource, ADest> &&
std::is_convertible_v<BSource, BDest>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairExplicitlyConvertible =
std::is_constructible_v<ADest, ASource> &&
std::is_constructible_v<BDest, BSource>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairOnlyExplicitlyConvertible =
IsPairExplicitlyConvertible<ASource, BSource, ADest, BDest> &&
!IsPairImplicitlyConvertible<ASource, BSource, ADest, BDest>;
template <typename ASource, typename BSource, typename ADest, typename BDest>
constexpr inline bool IsPairAssignable =
std::is_assignable_v<ADest&, ASource> &&
std::is_assignable_v<BDest&, BSource>;
template <typename From, typename To>
constexpr inline bool IsConvertibleOrVoid = std::is_convertible_v<From, To>;
template <typename From>
constexpr inline bool IsConvertibleOrVoid<From, void> = true;
template <typename T, typename = void>
constexpr inline bool IsOstreamable = false;
template <typename T>
constexpr inline bool
IsOstreamable<T, std::void_t<decltype(std::declval<std::ostream&>()
<< std ::declval<const T&>())>> =
true;
template <typename Qualified, typename T>
struct CopyQualifiersHelper {
using type = T;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<const Qualified, T> {
using type = const typename CopyQualifiersHelper<Qualified, T>::type;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<volatile Qualified, T> {
using type = volatile typename CopyQualifiersHelper<Qualified, T>::type;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<const volatile Qualified, T> {
using type = const volatile typename CopyQualifiersHelper<Qualified, T>::type;
};
template <typename Qualified, typename T>
struct CopyQualifiersHelper<Qualified&, T> {
using type = typename CopyQualifiersHelper<Qualified, T>::type&;
};
template <typename T, typename Qualified>
struct CopyQualifiersHelper<Qualified&&, T> {
using type = typename CopyQualifiersHelper<Qualified, T>::type&&;
};
template <typename Qualified, typename T>
using CopyQualifiers =
typename CopyQualifiersHelper<Qualified, absl::remove_cvref_t<T>>::type;
template <typename T>
inline T& GetLValue(T&& x) {
return x;
}
template <typename T, typename... U>
using FirstType = T;
template <typename Source, typename Dest>
constexpr inline bool IsConstConvertible =
(std::is_same_v<Source, Dest> || std::is_same_v<const Source, Dest>);
template <typename Source, typename Dest>
constexpr inline bool IsConstConvertibleOrVoid =
(std::is_same_v<Source, Dest> || std::is_same_v<const Source, Dest> ||
std::is_void_v<Dest>);
#ifdef TENSORSTORE_HAS_TYPE_PACK_ELEMENT
#if __clang__
template <size_t I, typename... Ts>
using TypePackElement = __type_pack_element<I, Ts...>;
#else
template <std::size_t I, typename... Ts>
struct TypePackElementImpl {
using type = __type_pack_element<I, Ts...>;
};
template <size_t I, typename... Ts>
using TypePackElement = typename TypePackElementImpl<I, Ts...>::type;
#endif
#else
template <size_t I, typename... Ts>
using TypePackElement = typename std::tuple_element<I, std::tuple<Ts...>>::type;
#endif
template <typename T>
class EmptyObject {
static_assert(std::is_empty_v<T>, "T must be an empty type.");
static_assert(std::is_standard_layout_v<T>, "T must be standard layout.");
struct T1 {
char c;
};
struct T2 : T {
char c;
};
union Storage {
constexpr Storage() : t1{} {}
T1 t1;
T2 t2;
};
Storage storage{};
public:
T& get(T* = nullptr) {
char* c = &storage.t2.c;
T2* t2 = reinterpret_cast<T2*>(c);
return *static_cast<T*>(t2);
}
};
class NonEmptyObjectGetter {
public:
template <typename T>
static T& get(T* pointer) {
return *pointer;
}
};
template <typename T>
using PossiblyEmptyObjectGetter =
std::conditional_t<std::is_empty_v<T>, EmptyObject<T>,
NonEmptyObjectGetter>;
template <typename T>
struct DefaultConstructibleFunction {
constexpr DefaultConstructibleFunction() = default;
constexpr DefaultConstructibleFunction(const T&) {}
template <typename... Arg>
constexpr std::invoke_result_t<T&, Arg...> operator()(Arg&&... arg) const {
EmptyObject<T> obj;
return obj.get()(static_cast<Arg&&>(arg)...);
}
};
template <typename T>
using DefaultConstructibleFunctionIfEmpty =
std::conditional_t<(std::is_empty_v<T> &&
!std::is_default_constructible_v<T>),
DefaultConstructibleFunction<T>, T>;
template <typename T>
struct type_identity {
using type = T;
};
template <typename T>
using type_identity_t = typename type_identity<T>::type;
struct identity {
template <typename T>
constexpr T&& operator()(T&& t) const noexcept {
return static_cast<T&&>(t);
}
};
template <typename Base, typename Derived>
Base* BaseCast(Derived* derived) {
return derived;
}
template <typename Base, typename Derived>
const Base* BaseCast(const Derived* derived) {
return derived;
}
template <typename Base, typename Derived>
Base& BaseCast(Derived& derived) {
return derived;
}
template <typename Base, typename Derived>
const Base& BaseCast(const Derived& derived) {
return derived;
}
template <typename T>
using Undocumented = T;
}
}
#endif | #include "tensorstore/internal/type_traits.h"
#include <stddef.h>
#include <tuple>
#include <type_traits>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::CopyQualifiers;
using ::tensorstore::internal::FirstType;
using ::tensorstore::internal::GetLValue;
using ::tensorstore::internal::IsConstConvertible;
using ::tensorstore::internal::IsConstConvertibleOrVoid;
using ::tensorstore::internal::IsEqualityComparable;
using ::tensorstore::internal::PossiblyEmptyObjectGetter;
using ::tensorstore::internal::type_identity_t;
using ::tensorstore::internal::TypePackElement;
namespace equality_comparable_tests {
struct X {};
static_assert(IsEqualityComparable<float, float>);
static_assert(IsEqualityComparable<float, int>);
static_assert(!IsEqualityComparable<X, X>);
}
static_assert(std::is_same_v<CopyQualifiers<float, int>, int>);
static_assert(std::is_same_v<CopyQualifiers<const float, int>, const int>);
static_assert(std::is_same_v<CopyQualifiers<const float&, int>, const int&>);
static_assert(std::is_same_v<CopyQualifiers<const float, int&>, const int>);
static_assert(std::is_same_v<CopyQualifiers<float&&, const int&>, int&&>);
static_assert(std::is_same_v<int&, decltype(GetLValue(3))>);
static_assert(std::is_same_v<int*, decltype(&GetLValue(3))>);
static_assert(std::is_same_v<FirstType<void>, void>);
static_assert(std::is_same_v<FirstType<int, void>, int>);
static_assert(IsConstConvertible<int, int>);
static_assert(IsConstConvertible<void, void>);
static_assert(IsConstConvertible<void, const void>);
static_assert(IsConstConvertible<int, const int>);
static_assert(!IsConstConvertible<const int, int>);
static_assert(!IsConstConvertible<int, float>);
static_assert(!IsConstConvertible<int, const float>);
static_assert(!IsConstConvertible<int, const void>);
static_assert(!IsConstConvertible<const int, void>);
static_assert(!IsConstConvertible<int, void>);
static_assert(IsConstConvertibleOrVoid<int, int>);
static_assert(IsConstConvertibleOrVoid<int, const int>);
static_assert(IsConstConvertibleOrVoid<int, void>);
static_assert(IsConstConvertibleOrVoid<const int, void>);
static_assert(IsConstConvertibleOrVoid<int, const void>);
static_assert(!IsConstConvertibleOrVoid<const int, int>);
static_assert(!IsConstConvertibleOrVoid<int, float>);
static_assert(!IsConstConvertibleOrVoid<int, const float>);
static_assert(std::is_same_v<TypePackElement<0, int, float>, int>);
static_assert(std::is_same_v<TypePackElement<1, int, float>, float>);
template <size_t I, typename... Ts>
using NonBuiltinTypePackElement =
typename std::tuple_element<I, std::tuple<Ts...>>::type;
static_assert(std::is_same_v<NonBuiltinTypePackElement<0, int, float>, int>);
static_assert(std::is_same_v<NonBuiltinTypePackElement<1, int, float>, float>);
TEST(PossiblyEmptyObjectGetterTest, Basic) {
struct Empty {
Empty() = delete;
int foo() { return 3; }
};
{
PossiblyEmptyObjectGetter<Empty> helper;
Empty& obj = helper.get(nullptr);
EXPECT_EQ(3, obj.foo());
}
{
auto lambda = [](int x, int y) { return x + y; };
using Lambda = decltype(lambda);
PossiblyEmptyObjectGetter<Lambda> helper;
Lambda& obj = helper.get(nullptr);
EXPECT_EQ(3, obj(1, 2));
}
{
int value = 3;
PossiblyEmptyObjectGetter<int> helper;
auto& obj = helper.get(&value);
EXPECT_EQ(&value, &obj);
}
}
static_assert(std::is_same_v<int, type_identity_t<int>>);
namespace explict_conversion_tests {
using ::tensorstore::internal::IsOnlyExplicitlyConvertible;
using ::tensorstore::internal::IsPairExplicitlyConvertible;
using ::tensorstore::internal::IsPairImplicitlyConvertible;
using ::tensorstore::internal::IsPairOnlyExplicitlyConvertible;
struct X {
X(int) {}
explicit X(float*) {}
};
static_assert(IsOnlyExplicitlyConvertible<float*, X>);
static_assert(std::is_convertible_v<int, X>);
static_assert(std::is_constructible_v<X, int>);
static_assert(!IsOnlyExplicitlyConvertible<int, X>);
struct Y {
Y(int*) {}
explicit Y(double*) {}
};
static_assert(IsPairImplicitlyConvertible<int, int*, X, Y>);
static_assert(IsPairExplicitlyConvertible<int, int*, X, Y>);
static_assert(IsPairExplicitlyConvertible<int, double*, X, Y>);
static_assert(IsPairExplicitlyConvertible<float*, int*, X, Y>);
static_assert(IsPairExplicitlyConvertible<float*, double*, X, Y>);
static_assert(!IsPairImplicitlyConvertible<int, double*, X, Y>);
static_assert(!IsPairImplicitlyConvertible<float*, int*, X, Y>);
static_assert(!IsPairImplicitlyConvertible<float*, double*, X, Y>);
static_assert(IsPairOnlyExplicitlyConvertible<int, double*, X, Y>);
static_assert(IsPairOnlyExplicitlyConvertible<float*, int*, X, Y>);
static_assert(IsPairOnlyExplicitlyConvertible<float*, double*, X, Y>);
}
TEST(DefaultConstructibleFunctionIfEmptyTest, Basic) {
auto fn = [](int x) { return x + 1; };
using Wrapper =
tensorstore::internal::DefaultConstructibleFunctionIfEmpty<decltype(fn)>;
static_assert(std::is_default_constructible_v<Wrapper>);
EXPECT_EQ(4, Wrapper()(3));
EXPECT_EQ(4, Wrapper(fn)(3));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/type_traits.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/type_traits_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3f822d69-6c9f-46ae-ab6f-031ccb5e5af2 | cpp | tensorflow/tensorflow | tensor_coding | tensorflow/core/distributed_runtime/tensor_coding.cc | tensorflow/core/distributed_runtime/tensor_coding_test.cc | #include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
namespace tensorflow {
TensorResponse::Source::~Source() {}
void TensorResponse::Clear() {
on_host_ = false;
device_ = nullptr;
alloc_attrs_ = AllocatorAttributes();
allocator_ = nullptr;
already_used_ = false;
ClearTensor();
}
void TensorResponse::ClearTensor() {
meta_.Clear();
tensor_ = Tensor();
}
void TensorResponse::InitAlloc(DeviceBase* d, const AllocatorAttributes& aa) {
Clear();
device_ = d;
alloc_attrs_ = aa;
const DeviceAttributes& da = d->attributes();
if (alloc_attrs_.on_host() || da.device_type() == "CPU") {
on_host_ = true;
}
allocator_ = device_->GetAllocator(alloc_attrs_);
}
Status TensorResponse::InitFrom(RecvTensorResponse* response) {
Status s;
meta_.Swap(response);
if (on_host_) {
if (!tensor_.FromProto(allocator_, meta_.tensor())) {
s = errors::InvalidArgument("Cannot parse tensor from response");
}
} else {
s = device_->MakeTensorFromProto(meta_.tensor(), alloc_attrs_, &tensor_);
}
{
TensorProto empty;
meta_.mutable_tensor()->Swap(&empty);
}
meta_.clear_tensor();
return s;
}
void TensorResponse::InitPartial(const RecvTensorResponse& response,
const AllocationAttributes& allocation_attr) {
meta_ = response;
TensorShape shape(meta_.tensor().tensor_shape());
Tensor t(allocator_, meta_.tensor().dtype(), shape, allocation_attr);
tensor_ = std::move(t);
}
Status TensorResponse::ParseFrom(Source* source) {
if (!on_host_) {
protobuf::io::CodedInputStream input(source->contents());
if (!meta_.ParseFromCodedStream(&input) || !input.ConsumedEntireMessage()) {
return errors::InvalidArgument("Cannot parse tensor from response");
}
Status s =
device_->MakeTensorFromProto(meta_.tensor(), alloc_attrs_, &tensor_);
{
TensorProto empty;
meta_.mutable_tensor()->Swap(&empty);
}
meta_.clear_tensor();
return s;
}
if (already_used_) {
ClearTensor();
}
already_used_ = true;
if (ParseFast(source)) return absl::OkStatus();
meta_.Clear();
if (ParseSlow(source)) return absl::OkStatus();
return errors::InvalidArgument("Cannot parse tensor from response");
}
namespace {
enum WireType {
WIRETYPE_VARINT = 0,
WIRETYPE_LENGTH_DELIMITED = 2,
};
inline int GetTagFieldNumber(uint32 tag) { return tag >> 3; }
inline WireType GetTagWireType(uint32 tag) {
return static_cast<WireType>(tag & 0x7);
}
bool ReadVarintSizeAsInt(protobuf::io::CodedInputStream* input, int* result) {
protobuf_uint64 v;
if (input->ReadVarint64(&v) && v <= static_cast<uint64>(INT_MAX)) {
*result = static_cast<int>(v);
return true;
} else {
return false;
}
}
bool ReadNestedMessage(protobuf::io::CodedInputStream* input,
protobuf::Message* value) {
int length;
if (!ReadVarintSizeAsInt(input, &length)) return false;
std::pair<protobuf::io::CodedInputStream::Limit, int> p =
input->IncrementRecursionDepthAndPushLimit(length);
if (p.second < 0 || !value->MergePartialFromCodedStream(input)) return false;
return input->DecrementRecursionDepthAndPopLimit(p.first);
}
}
bool TensorResponse::ParseTensorSubmessage(
protobuf::io::CodedInputStream* input, TensorProto* tensor_meta) {
bool seen_tensor_content = false;
while (true) {
auto p = input->ReadTagWithCutoff(127);
int tag = GetTagFieldNumber(p.first);
WireType wt = GetTagWireType(p.first);
if (!p.second) {
bool ok = (tag == 0);
if (ok && !seen_tensor_content) {
TensorShape shape(tensor_meta->tensor_shape());
Tensor t(allocator_, tensor_meta->dtype(), shape);
tensor_ = std::move(t);
}
return ok;
}
switch (tag) {
case TensorProto::kDtypeFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input->ReadVarint32(&v)) return false;
if (seen_tensor_content) return false;
tensor_meta->set_dtype(static_cast<DataType>(static_cast<int>(v)));
if (!DataTypeCanUseMemcpy(tensor_meta->dtype())) return false;
break;
}
case TensorProto::kTensorShapeFieldNumber: {
if ((wt != WIRETYPE_LENGTH_DELIMITED) ||
!ReadNestedMessage(input, tensor_meta->mutable_tensor_shape()))
return false;
if (seen_tensor_content) return false;
break;
}
case TensorProto::kVersionNumberFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input->ReadVarint32(&v)) return false;
if (seen_tensor_content) return false;
tensor_meta->set_version_number(static_cast<int32>(v));
break;
}
case TensorProto::kTensorContentFieldNumber: {
if (seen_tensor_content) return false;
if (wt != WIRETYPE_LENGTH_DELIMITED ||
!tensor_meta->has_tensor_shape()) {
return false;
}
int num_bytes;
if (!ReadVarintSizeAsInt(input, &num_bytes)) return false;
seen_tensor_content = true;
TensorShape shape(tensor_meta->tensor_shape());
Tensor t(allocator_, tensor_meta->dtype(), shape);
StringPiece buf = t.tensor_data();
if (static_cast<size_t>(num_bytes) != buf.size()) return false;
if (!input->ReadRaw(const_cast<char*>(buf.data()), num_bytes))
return false;
tensor_ = std::move(t);
break;
}
default: {
return false;
}
}
}
}
bool TensorResponse::ParseFast(Source* source) {
protobuf::io::CodedInputStream input(source->contents());
while (true) {
auto p = input.ReadTagWithCutoff(127);
int tag = GetTagFieldNumber(p.first);
WireType wt = GetTagWireType(p.first);
if (!p.second) {
return (tag == 0);
}
switch (tag) {
case RecvTensorResponse::kTensorFieldNumber: {
if (wt != WIRETYPE_LENGTH_DELIMITED) return false;
int length;
if (!ReadVarintSizeAsInt(&input, &length)) return false;
std::pair<protobuf::io::CodedInputStream::Limit, int> p =
input.IncrementRecursionDepthAndPushLimit(length);
if (p.second < 0 ||
!ParseTensorSubmessage(&input, meta_.mutable_tensor())) {
return false;
}
if (!input.DecrementRecursionDepthAndPopLimit(p.first)) {
return false;
}
break;
}
case RecvTensorResponse::kIsDeadFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) return false;
meta_.set_is_dead(v != 0);
break;
}
case RecvTensorResponse::kSendStartMicrosFieldNumber: {
protobuf_uint64 v;
if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) return false;
meta_.set_send_start_micros(static_cast<int64_t>(v));
break;
}
case RecvTensorResponse::kTransportOptionsFieldNumber: {
if ((wt != WIRETYPE_LENGTH_DELIMITED) ||
!ReadNestedMessage(&input, meta_.mutable_transport_options()))
return false;
break;
}
case RecvTensorResponse::kRequireAckFieldNumber: {
uint32 v;
if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) return false;
meta_.set_require_ack(v != 0);
break;
}
default: {
return false;
}
}
}
return false;
}
bool TensorResponse::ParseSlow(Source* source) {
if (!meta_.ParseFromZeroCopyStream(source->contents())) {
return false;
}
Tensor parsed(meta_.tensor().dtype());
if (!parsed.FromProto(allocator_, meta_.tensor())) {
return false;
}
tensor_ = std::move(parsed);
{
TensorProto empty;
meta_.mutable_tensor()->Swap(&empty);
}
meta_.clear_tensor();
return true;
}
} | #include "tensorflow/core/distributed_runtime/tensor_coding.h"
#include "tensorflow/core/framework/device_attributes.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
class DummyDevice : public DeviceBase {
public:
explicit DummyDevice(Env* env) : DeviceBase(env) {
attr_.set_device_type("CPU");
}
const DeviceAttributes& attributes() const override { return attr_; }
Allocator* GetAllocator(AllocatorAttributes attr) override {
return cpu_allocator();
}
private:
DeviceAttributes attr_;
};
class StringSource : public TensorResponse::Source {
public:
explicit StringSource(const string* s, int block_size)
: s_(s), stream_(nullptr), block_size_(block_size) {}
~StringSource() override { DeleteStream(); }
protobuf::io::ZeroCopyInputStream* contents() override {
DeleteStream();
stream_ = new (&space_)
protobuf::io::ArrayInputStream(s_->data(), s_->size(), block_size_);
return stream_;
}
void DeleteStream() {
if (stream_) {
stream_->~ArrayInputStream();
}
}
private:
const string* s_;
protobuf::io::ArrayInputStream* stream_;
char space_[sizeof(protobuf::io::ArrayInputStream)];
int block_size_;
};
class TensorResponseTest : public ::testing::Test {
public:
void Validate(const Tensor& src, bool is_dead, bool use_tensor_content) {
RecvTensorResponse proto;
proto.set_is_dead(is_dead);
proto.set_send_start_micros(123456);
if (use_tensor_content) {
src.AsProtoTensorContent(proto.mutable_tensor());
} else {
src.AsProtoField(proto.mutable_tensor());
}
string encoded;
proto.AppendToString(&encoded);
StringSource source(&encoded, 1024);
TensorResponse response;
DummyDevice cpu_device(Env::Default());
response.InitAlloc(&cpu_device, AllocatorAttributes());
for (int i = 0; i < 2; i++) {
Status s = response.ParseFrom(&source);
EXPECT_TRUE(s.ok());
const RecvTensorResponse& meta = response.metadata();
EXPECT_EQ(meta.is_dead(), is_dead);
EXPECT_EQ(meta.send_start_micros(), 123456);
const Tensor& result = response.tensor();
EXPECT_EQ(result.dtype(), src.dtype());
EXPECT_EQ(result.shape().DebugString(), src.shape().DebugString());
EXPECT_EQ(result.DebugString(), src.DebugString());
}
}
template <typename T>
void DoTest(DataType dt) {
gtl::InlinedVector<T, 4> v;
LOG(ERROR) << "DT: " << static_cast<int>(dt);
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<T>(&a, v);
Validate(a, (elems == 0), true);
}
v.push_back(static_cast<T>(elems));
}
}
void DoTestForStrings(DataType dt) {
absl::InlinedVector<tstring, 4UL> v;
LOG(ERROR) << "DT: string";
for (int elems = 0; elems <= 10000; elems++) {
if (elems < 100 || (elems % 1000 == 0)) {
Tensor a(dt, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<tstring>(&a, v);
Validate(a, (elems == 0), true);
}
v.push_back(strings::StrCat("This is string ", elems));
}
}
};
TEST_F(TensorResponseTest, Simple) {
DoTest<float>(DT_FLOAT);
DoTest<double>(DT_DOUBLE);
DoTest<int32>(DT_INT32);
DoTest<uint16>(DT_UINT16);
DoTest<uint8>(DT_UINT8);
DoTest<int16>(DT_INT16);
DoTest<int8>(DT_INT8);
DoTest<complex64>(DT_COMPLEX64);
DoTest<complex128>(DT_COMPLEX128);
DoTest<int64_t>(DT_INT64);
DoTest<bool>(DT_BOOL);
DoTest<qint8>(DT_QINT8);
DoTest<quint8>(DT_QUINT8);
DoTest<qint16>(DT_QINT16);
DoTest<quint16>(DT_QUINT16);
DoTest<qint32>(DT_QINT32);
DoTest<bfloat16>(DT_BFLOAT16);
DoTest<Eigen::half>(DT_HALF);
}
TEST_F(TensorResponseTest, StringTensor) { DoTestForStrings(DT_STRING); }
string MakeFloatTensorTestCase(int num_elems) {
std::vector<int8> v(num_elems);
for (int i = 0; i < num_elems; i++) {
v[i] = i % 10;
}
Tensor src(DT_INT8, TensorShape({1, static_cast<int64_t>(v.size())}));
test::FillValues<int8>(&src, v);
RecvTensorResponse proto;
proto.set_is_dead(false);
proto.set_send_start_micros(123456);
src.AsProtoTensorContent(proto.mutable_tensor());
string encoded;
proto.AppendToString(&encoded);
return encoded;
}
static void BM_TensorResponse(::testing::benchmark::State& state) {
const int arg = state.range(0);
string encoded = MakeFloatTensorTestCase(arg);
DummyDevice cpu_device(Env::Default());
size_t bytes = 0;
for (auto i : state) {
TensorResponse response;
response.InitAlloc(&cpu_device, AllocatorAttributes());
StringSource source(&encoded, -1);
Status s = response.ParseFrom(&source);
bytes = response.tensor().TotalBytes();
}
state.SetLabel(strings::StrCat("Bytes: ", bytes));
}
BENCHMARK(BM_TensorResponse)->Arg(0)->Arg(1000)->Arg(100000);
static void BM_TensorViaTensorProto(::testing::benchmark::State& state) {
const int arg = state.range(0);
std::string encoded = MakeFloatTensorTestCase(arg);
size_t bytes = 0;
for (auto s : state) {
RecvTensorResponse r;
r.ParseFromString(encoded);
Tensor t;
CHECK(t.FromProto(r.tensor()));
bytes = t.TotalBytes();
}
state.SetLabel(strings::StrCat("Bytes: ", bytes));
}
BENCHMARK(BM_TensorViaTensorProto)->Arg(0)->Arg(1000)->Arg(100000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/tensor_coding.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/tensor_coding_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7ec514c-d713-4b48-a241-615bd39daf6d | cpp | google/quiche | continuation_payload_decoder | quiche/http2/decoder/payload_decoders/continuation_payload_decoder.cc | quiche/http2/decoder/payload_decoders/continuation_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/continuation_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus ContinuationPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "ContinuationPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::CONTINUATION, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(0, frame_header.flags & ~(Http2FrameFlag::END_HEADERS));
state->InitializeRemainders();
state->listener()->OnContinuationStart(frame_header);
return ResumeDecodingPayload(state, db);
}
DecodeStatus ContinuationPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "ContinuationPayloadDecoder::ResumeDecodingPayload"
<< " remaining_payload=" << state->remaining_payload()
<< " db->Remaining=" << db->Remaining();
QUICHE_DCHECK_EQ(Http2FrameType::CONTINUATION, state->frame_header().type);
QUICHE_DCHECK_LE(state->remaining_payload(),
state->frame_header().payload_length);
QUICHE_DCHECK_LE(db->Remaining(), state->remaining_payload());
size_t avail = db->Remaining();
QUICHE_DCHECK_LE(avail, state->remaining_payload());
if (avail > 0) {
state->listener()->OnHpackFragment(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() == 0) {
state->listener()->OnContinuationEnd();
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
}
} | #include "quiche/http2/decoder/payload_decoders/continuation_payload_decoder.h"
#include <stddef.h>
#include <string>
#include <type_traits>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class ContinuationPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::CONTINUATION;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnContinuationStart(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnContinuationStart: " << header;
StartFrame(header)->OnContinuationStart(header);
}
void OnHpackFragment(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnHpackFragment: len=" << len;
CurrentFrame()->OnHpackFragment(data, len);
}
void OnContinuationEnd() override {
QUICHE_VLOG(1) << "OnContinuationEnd";
EndFrame()->OnContinuationEnd();
}
};
class ContinuationPayloadDecoderTest
: public AbstractPayloadDecoderTest<
ContinuationPayloadDecoder, ContinuationPayloadDecoderPeer, Listener>,
public ::testing::WithParamInterface<uint32_t> {
protected:
ContinuationPayloadDecoderTest() : length_(GetParam()) {
QUICHE_VLOG(1) << "################ length_=" << length_
<< " ################";
}
const uint32_t length_;
};
INSTANTIATE_TEST_SUITE_P(VariousLengths, ContinuationPayloadDecoderTest,
::testing::Values(0, 1, 2, 3, 4, 5, 6));
TEST_P(ContinuationPayloadDecoderTest, ValidLength) {
std::string hpack_payload = Random().RandString(length_);
Http2FrameHeader frame_header(length_, Http2FrameType::CONTINUATION,
RandFlags(), RandStreamId());
set_frame_header(frame_header);
FrameParts expected(frame_header, hpack_payload);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(hpack_payload, expected));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/continuation_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/continuation_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d33b30d0-a91b-487d-8ffe-621250906013 | cpp | google/arolla | operator_factory | arolla/qexpr/operator_factory.h | arolla/qexpr/operator_factory_test.cc | #ifndef AROLLA_QEXPR_OPERATOR_FACTORY_H_
#define AROLLA_QEXPR_OPERATOR_FACTORY_H_
#include <cstddef>
#include <memory>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qexpr/result_type_traits.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/demangle.h"
#include "arolla/util/meta.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
template <typename FUNC>
absl::StatusOr<OperatorPtr> QExprOperatorFromFunction(FUNC func);
template <typename FUNC>
absl::StatusOr<OperatorPtr> QExprOperatorBuildFromFunction(
FUNC func, const QExprOperatorSignature* signature);
template <typename FUNC, typename... ARG_Ts>
absl::StatusOr<OperatorPtr> QExprOperatorFromFunctor();
template <typename FUNC>
std::unique_ptr<arolla::OperatorFamily> MakeVariadicInputOperatorFamily(
FUNC eval_func);
namespace operator_factory_impl {
template <typename T>
using Slot = FrameLayout::Slot<T>;
template <typename FUNC, typename... OTHER_ARGs>
struct ContextFunc : private FUNC {
explicit ContextFunc(FUNC func) : FUNC(std::move(func)) {}
auto operator()(EvaluationContext*, OTHER_ARGs... args) const {
return static_cast<const FUNC&>(*this)(args...);
}
};
template <typename FUNC, typename... OTHER_ARGs>
struct ContextFunc<FUNC, EvaluationContext*, OTHER_ARGs...> : FUNC {
explicit ContextFunc(FUNC func) : FUNC(std::move(func)) {}
};
template <typename FUNC, typename... FUNC_ARGs>
auto WrapIntoContextFunc(FUNC func, meta::type_list<FUNC_ARGs...>) {
if constexpr (std::is_class_v<FUNC>) {
return ContextFunc<FUNC, FUNC_ARGs...>(std::move(func));
} else {
auto fn = [func = std::forward<FUNC>(func)](FUNC_ARGs... args) {
return func(args...);
};
return ContextFunc<decltype(fn), FUNC_ARGs...>(std::move(fn));
}
}
template <typename... Ts>
struct QTypesVerifier;
template <typename T, typename... Ts>
struct QTypesVerifier<T, Ts...> {
static absl::Status Verify(absl::Span<const QTypePtr> qtypes) {
if (qtypes.size() != sizeof...(Ts) + 1) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"unexpected number of types: expected %d types %s, got %d",
qtypes.size(), FormatTypeVector(qtypes), sizeof...(Ts) + 1));
}
DCHECK_GT(qtypes.size(), size_t{0});
if (qtypes[0]->type_info() != typeid(T)) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"unexpected type: expected %s with C++ type %s, got %s",
qtypes[0]->name(), TypeName(qtypes[0]->type_info()),
TypeName<T>()));
}
return QTypesVerifier<Ts...>::Verify(qtypes.subspan(1));
}
};
template <>
struct QTypesVerifier<> {
static absl::Status Verify(absl::Span<const QTypePtr> qtypes) {
if (!qtypes.empty()) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"unexpected number of types: expected %d types %s, got 0",
qtypes.size(), FormatTypeVector(qtypes)));
}
return absl::OkStatus();
}
};
template <typename... Ts>
struct QTypesVerifier<meta::type_list<Ts...>> {
static absl::Status Verify(absl::Span<const QTypePtr> qtypes) {
return QTypesVerifier<Ts...>::Verify(qtypes);
}
};
template <typename Slots, std::size_t... Is>
Slots UnsafeToSlotsTupleImpl(absl::Span<const TypedSlot> slots,
std::index_sequence<Is...>) {
DCHECK_EQ(slots.size(), sizeof...(Is));
return {
slots[Is]
.UnsafeToSlot<
typename std::tuple_element<Is, Slots>::type::value_type>()...};
}
template <typename Slots>
Slots UnsafeToSlotsTuple(absl::Span<const TypedSlot> slots) {
return UnsafeToSlotsTupleImpl<Slots>(
slots, std::make_index_sequence<std::tuple_size<Slots>::value>{});
}
template <typename FUNC, typename RES, typename... ARGs>
const QExprOperatorSignature* DeduceOperatorSignatureImpl(
meta::type_list<RES>, meta::type_list<ARGs...>) {
return QExprOperatorSignature::Get(
{GetQType<std::decay_t<ARGs>>()...},
qexpr_impl::ResultTypeTraits<RES>::GetOutputType());
}
template <typename FUNC>
const QExprOperatorSignature* DeduceOperatorSignature() {
return DeduceOperatorSignatureImpl<FUNC>(
meta::type_list<typename meta::function_traits<FUNC>::return_type>(),
meta::tail_t<typename meta::function_traits<FUNC>::arg_types>());
}
template <typename FUNC>
absl::Status VerifyOperatorSignature(const QExprOperatorSignature* signature) {
RETURN_IF_ERROR(QTypesVerifier<meta::tail_t<typename meta::function_traits<
FUNC>::arg_types>>::Verify(signature->input_types()))
<< "in input types of " << signature << ".";
std::vector<QTypePtr> output_types = {signature->output_type()};
if (IsTupleQType(signature->output_type())) {
output_types = SlotsToTypes(signature->output_type()->type_fields());
}
RETURN_IF_ERROR(
QTypesVerifier<typename qexpr_impl::ResultTypeTraits<
typename meta::function_traits<FUNC>::return_type>::Types>::
Verify(output_types))
<< "in output types of " << signature << ".";
return absl::OkStatus();
}
template <typename CTX_FUNC, typename RES, typename... ARGs>
class OpImpl : public QExprOperator {
public:
OpImpl(const QExprOperatorSignature* signature, CTX_FUNC func)
: QExprOperator(signature), func_(std::move(func)) {}
private:
absl::StatusOr<std::unique_ptr<BoundOperator>> DoBind(
absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot) const override {
auto inputs = UnsafeToSlotsTuple<InputSlots>(input_slots);
auto outputs =
qexpr_impl::ResultTypeTraits<RES>::UnsafeToSlots(output_slot);
return MakeBoundOperator(
[data = BoundOpData(func_, std::move(inputs), std::move(outputs))](
EvaluationContext* ctx, FramePtr frame) {
RunImpl(data, ctx, frame, std::index_sequence_for<ARGs...>{});
});
}
private:
using InputSlots = std::tuple<Slot<absl::decay_t<ARGs>>...>;
using OutputSlots = typename qexpr_impl::ResultTypeTraits<RES>::Slots;
struct BoundOpData : private CTX_FUNC {
BoundOpData(CTX_FUNC func, InputSlots input_slots, OutputSlots output_slots)
: CTX_FUNC(std::move(func)),
input_slots(input_slots),
output_slots(output_slots) {}
const CTX_FUNC& func() const { return static_cast<const CTX_FUNC&>(*this); }
const InputSlots input_slots;
const OutputSlots output_slots;
};
template <std::size_t... Is>
static void RunImpl(const BoundOpData& data, EvaluationContext* ctx,
FramePtr frame, std::index_sequence<Is...>) {
qexpr_impl::ResultTypeTraits<RES>::SaveAndReturn(
ctx, frame, data.output_slots,
data.func()(ctx, frame.Get(std::get<Is>(data.input_slots))...));
}
const CTX_FUNC func_;
};
template <typename CTX_FUNC, typename... ARGs>
absl::StatusOr<OperatorPtr> QExprOperatorFromFunctionImpl(
CTX_FUNC func, const QExprOperatorSignature* signature,
meta::type_list<ARGs...>) {
return OperatorPtr(
new operator_factory_impl::OpImpl<
CTX_FUNC, typename meta::function_traits<CTX_FUNC>::return_type,
ARGs...>(signature, std::move(func)));
}
template <typename T>
struct VariadicInputTypeTraits {
using Container = nullptr_t;
using Slot = nullptr_t;
static_assert(sizeof(T) == 0,
"unsupported input for VariadicInputOperatorFamily");
};
template <typename T>
struct VariadicInputTypeTraits<meta::type_list<absl::Span<const T* const>>> {
using Container = absl::InlinedVector<const T*, 4>;
using Slot = FrameLayout::Slot<T>;
static QTypePtr GetInputType() ABSL_ATTRIBUTE_ALWAYS_INLINE {
return GetQType<T>();
}
static Container GetInputs(arolla::FramePtr frame,
absl::Span<const Slot> input_slots) {
Container inputs;
inputs.reserve(input_slots.size());
for (const auto& input_slot : input_slots) {
inputs.push_back(&frame.Get(input_slot));
}
return inputs;
}
static Slot UnsafeToSlot(TypedSlot output_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
return output_slot.UnsafeToSlot<T>();
}
};
template <typename T>
struct VariadicInputTypeTraits<meta::type_list<std::vector<T>>> {
using Container = std::vector<T>;
using Slot = FrameLayout::Slot<T>;
static QTypePtr GetInputType() ABSL_ATTRIBUTE_ALWAYS_INLINE {
return GetQType<T>();
}
static Container GetInputs(arolla::FramePtr frame,
absl::Span<const Slot> input_slots) {
Container inputs;
inputs.reserve(input_slots.size());
for (const auto& input_slot : input_slots) {
inputs.push_back(frame.Get(input_slot));
}
return inputs;
}
static Slot UnsafeToSlot(TypedSlot output_slot) ABSL_ATTRIBUTE_ALWAYS_INLINE {
return output_slot.UnsafeToSlot<T>();
}
};
template <typename FUNC>
struct VariadicInputFuncTraits {
using input =
VariadicInputTypeTraits<typename meta::function_traits<FUNC>::arg_types>;
using result = qexpr_impl::ResultTypeTraits<
typename meta::function_traits<FUNC>::return_type>;
};
template <typename FUNC>
class VariadicInputOperator : public arolla::QExprOperator {
using input_traits = VariadicInputFuncTraits<FUNC>::input;
using result_traits = VariadicInputFuncTraits<FUNC>::result;
public:
explicit VariadicInputOperator(FUNC eval_func,
absl::Span<const arolla::QTypePtr> input_types)
: arolla::QExprOperator(arolla::QExprOperatorSignature::Get(
input_types, result_traits::GetOutputType())),
eval_func_(std::move(eval_func)) {}
private:
absl::StatusOr<std::unique_ptr<arolla::BoundOperator>> DoBind(
absl::Span<const arolla::TypedSlot> typed_input_slots,
arolla::TypedSlot typed_output_slot) const final {
std::vector<typename input_traits::Slot> input_slots;
input_slots.reserve(typed_input_slots.size());
for (const auto& input_slot : typed_input_slots) {
input_slots.push_back(input_traits::UnsafeToSlot(input_slot));
}
return arolla::MakeBoundOperator(
[input_slots = std::move(input_slots),
output_slot = result_traits::UnsafeToSlots(typed_output_slot),
eval_func = eval_func_](arolla::EvaluationContext* ctx,
arolla::FramePtr frame) {
auto inputs = input_traits::GetInputs(frame, input_slots);
result_traits::SaveAndReturn(ctx, frame, output_slot,
eval_func(std::move(inputs)));
});
}
FUNC eval_func_;
};
template <typename FUNC>
class VariadicInputOperatorFamily : public arolla::OperatorFamily {
using input_traits = VariadicInputFuncTraits<FUNC>::input;
public:
explicit VariadicInputOperatorFamily(FUNC eval_func)
: eval_func_(std::move(eval_func)) {}
private:
absl::StatusOr<arolla::OperatorPtr> DoGetOperator(
absl::Span<const arolla::QTypePtr> input_types,
arolla::QTypePtr output_type) const final {
for (const auto& input_type : input_types) {
if (input_type != input_traits::GetInputType()) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected only %s, got %s", input_traits::GetInputType()->name(),
input_type->name()));
}
}
return arolla::EnsureOutputQTypeMatches(
std::make_shared<VariadicInputOperator<FUNC>>(eval_func_, input_types),
input_types, output_type);
}
FUNC eval_func_;
};
}
template <typename FUNC>
absl::StatusOr<OperatorPtr> QExprOperatorFromFunction(FUNC func) {
auto context_func = operator_factory_impl::WrapIntoContextFunc(
std::move(func), typename meta::function_traits<FUNC>::arg_types());
using CtxFunc = decltype(context_func);
const QExprOperatorSignature* signature =
operator_factory_impl::DeduceOperatorSignature<CtxFunc>();
return QExprOperatorFromFunctionImpl(
std::move(context_func), signature,
meta::tail_t<typename meta::function_traits<CtxFunc>::arg_types>());
}
template <typename FUNC>
absl::StatusOr<OperatorPtr> QExprOperatorFromFunction(
FUNC func, const QExprOperatorSignature* signature) {
auto context_func = operator_factory_impl::WrapIntoContextFunc(
std::move(func), typename meta::function_traits<FUNC>::arg_types());
using CtxFunc = decltype(context_func);
RETURN_IF_ERROR(
operator_factory_impl::VerifyOperatorSignature<CtxFunc>(signature));
return QExprOperatorFromFunctionImpl(
std::move(context_func), signature,
meta::tail_t<typename meta::function_traits<CtxFunc>::arg_types>());
}
template <typename FUNC, typename... ARG_Ts>
absl::StatusOr<OperatorPtr> QExprOperatorFromFunctor() {
return QExprOperatorFromFunction(
[](EvaluationContext* ctx, const ARG_Ts&... args) {
if constexpr (std::is_invocable_v<FUNC, ARG_Ts...>) {
((void)(ctx));
return FUNC()(args...);
} else {
return FUNC()(ctx, args...);
}
});
}
template <typename FUNC>
std::unique_ptr<arolla::OperatorFamily> MakeVariadicInputOperatorFamily(
FUNC eval_func) {
return std::make_unique<
operator_factory_impl::VariadicInputOperatorFamily<FUNC>>(
std::move(eval_func));
}
}
#endif | #include "arolla/qexpr/operator_factory.h"
#include <cstdint>
#include <tuple>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qexpr/qexpr_operator_signature.h"
#include "arolla/qexpr/testing/operator_fixture.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/simple_qtype.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
using ::absl_testing::IsOk;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::Field;
using ::testing::MatchesRegex;
struct CopyCounter {
CopyCounter() = default;
CopyCounter(const CopyCounter& other) { count = other.count + 1; }
CopyCounter& operator=(const CopyCounter& other) {
count = other.count + 1;
return *this;
}
CopyCounter(CopyCounter&& other) = default;
CopyCounter& operator=(CopyCounter&& other) = default;
int count = 0;
};
AROLLA_DECLARE_FINGERPRINT_HASHER_TRAITS(CopyCounter);
void FingerprintHasherTraits<CopyCounter>::operator()(
FingerprintHasher* hasher, const CopyCounter& value) const {
hasher->Combine(value.count);
}
AROLLA_DECLARE_SIMPLE_QTYPE(COPY_COUNTER, CopyCounter);
AROLLA_DEFINE_SIMPLE_QTYPE(COPY_COUNTER, CopyCounter);
namespace {
TEST(OperatorFactory, SimpleOperator) {
ASSERT_OK_AND_ASSIGN(
auto op,
QExprOperatorFromFunction([](int64_t a, int64_t b) { return a * b; }));
ASSERT_THAT(op->signature(), Eq(QExprOperatorSignature::Get(
{GetQType<int64_t>(), GetQType<int64_t>()},
GetQType<int64_t>())));
EXPECT_THAT(InvokeOperator<int64_t>(*op, int64_t{3}, int64_t{19}),
IsOkAndHolds(Eq(57)));
}
int64_t Multiply(int64_t a, int64_t b) { return a * b; }
TEST(OperatorFactory, NotAFunctor) {
ASSERT_OK_AND_ASSIGN(auto op, QExprOperatorFromFunction(Multiply));
ASSERT_THAT(op->signature(), Eq(QExprOperatorSignature::Get(
{GetQType<int64_t>(), GetQType<int64_t>()},
GetQType<int64_t>())));
EXPECT_THAT(InvokeOperator<int64_t>(*op, int64_t{3}, int64_t{19}),
IsOkAndHolds(Eq(57)));
}
TEST(OperatorFactory, ReturnsTuple) {
using Pair = std::tuple<int64_t, int64_t>;
ASSERT_OK_AND_ASSIGN(auto op,
QExprOperatorFromFunction([](int64_t a, int64_t b) {
return std::make_tuple(b, a % b);
}));
ASSERT_THAT(op->signature(),
Eq(QExprOperatorSignature::Get(
{GetQType<int64_t>(), GetQType<int64_t>()},
MakeTupleQType({GetQType<int64_t>(), GetQType<int64_t>()}))));
ASSERT_OK_AND_ASSIGN(auto fixture,
(OperatorFixture<Pair, Pair>::Create(*op)));
EXPECT_THAT(fixture.Call(57, 20), IsOkAndHolds(Eq(std::make_tuple(20, 17))));
}
TEST(OperatorFactory, ReturnsStatusOr) {
ASSERT_OK_AND_ASSIGN(
auto op, QExprOperatorFromFunction([]() -> absl::StatusOr<int64_t> {
return absl::Status(absl::StatusCode::kFailedPrecondition, "failed");
}));
ASSERT_THAT(op->signature(),
Eq(QExprOperatorSignature::Get({}, GetQType<int64_t>())));
EXPECT_THAT(InvokeOperator<int64_t>(*op),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST(OperatorFactory, ReturnsStatusOrTuple) {
using Pair = std::tuple<int64_t, int64_t>;
auto qtype = QExprOperatorSignature::Get(
{GetQType<int64_t>(), GetQType<int64_t>()},
MakeTupleQType({GetQType<int64_t>(), GetQType<int64_t>()}));
ASSERT_OK_AND_ASSIGN(
auto op, QExprOperatorFromFunction(
[](int64_t a, int64_t b) -> absl::StatusOr<Pair> {
if (b == 0) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"b is 0");
}
return std::make_tuple(b, a % b);
},
qtype));
EXPECT_THAT(op->signature(),
Eq(QExprOperatorSignature::Get(
{GetQType<int64_t>(), GetQType<int64_t>()},
MakeTupleQType({GetQType<int64_t>(), GetQType<int64_t>()}))));
ASSERT_OK_AND_ASSIGN(auto fixture,
(OperatorFixture<Pair, Pair>::Create(*op)));
EXPECT_THAT(fixture.Call(57, 20), IsOkAndHolds(Eq(std::tuple(20, 17))));
EXPECT_THAT(fixture.Call(57, 0),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(OperatorFactory, NumberOfCopies) {
using Fixture = OperatorFixture<std::tuple<CopyCounter>, CopyCounter>;
ASSERT_OK_AND_ASSIGN(
auto by_ref_with_eval_context_op,
QExprOperatorFromFunction(
[](EvaluationContext*, const CopyCounter& c) { return c; }));
ASSERT_OK_AND_ASSIGN(auto by_ref_with_eval_context_op_fixture,
Fixture::Create(*by_ref_with_eval_context_op));
EXPECT_THAT(by_ref_with_eval_context_op_fixture.Call(CopyCounter()),
IsOkAndHolds(Field(&CopyCounter::count, 1)));
ASSERT_OK_AND_ASSIGN(
auto by_ref_without_eval_context_op,
QExprOperatorFromFunction([](const CopyCounter& c) { return c; }));
ASSERT_OK_AND_ASSIGN(auto by_ref_without_eval_context_op_fixture,
Fixture::Create(*by_ref_without_eval_context_op));
EXPECT_THAT(by_ref_without_eval_context_op_fixture.Call(CopyCounter()),
IsOkAndHolds(Field(&CopyCounter::count, 1)));
ASSERT_OK_AND_ASSIGN(
auto by_val_with_eval_context_op,
QExprOperatorFromFunction(
[](EvaluationContext*, CopyCounter c) { return c; }));
ASSERT_OK_AND_ASSIGN(auto by_val_with_eval_context_op_fixture,
Fixture::Create(*by_val_with_eval_context_op));
EXPECT_THAT(by_val_with_eval_context_op_fixture.Call(CopyCounter()),
IsOkAndHolds(Field(&CopyCounter::count, 1)));
ASSERT_OK_AND_ASSIGN(
auto by_val_without_eval_context_op,
QExprOperatorFromFunction([](CopyCounter c) { return c; }));
ASSERT_OK_AND_ASSIGN(auto by_val_without_eval_context_op_fixture,
Fixture::Create(*by_val_without_eval_context_op));
EXPECT_THAT(by_val_without_eval_context_op_fixture.Call(CopyCounter()),
IsOkAndHolds(Field(&CopyCounter::count, 2)));
ASSERT_OK_AND_ASSIGN(auto returns_tuple_op,
QExprOperatorFromFunction([](const CopyCounter& c) {
return std::make_tuple(c);
}));
ASSERT_OK_AND_ASSIGN(auto returns_tuple_op_fixture,
Fixture::Create(*returns_tuple_op));
EXPECT_THAT(returns_tuple_op_fixture.Call(CopyCounter()),
IsOkAndHolds(Field(&CopyCounter::count, 1)));
ASSERT_OK_AND_ASSIGN(
auto returns_status_or_tuple_op,
QExprOperatorFromFunction([](const CopyCounter& c) {
return absl::StatusOr<std::tuple<CopyCounter>>(std::make_tuple(c));
}));
ASSERT_OK_AND_ASSIGN(auto returns_status_or_tuple_op_fixture,
Fixture::Create(*returns_status_or_tuple_op));
EXPECT_THAT(returns_status_or_tuple_op_fixture.Call(CopyCounter()),
IsOkAndHolds(Field(&CopyCounter::count, 1)));
}
TEST(OperatorFactory, TakesContext) {
ASSERT_OK_AND_ASSIGN(
auto op, QExprOperatorFromFunction([](EvaluationContext* ctx, float x) {
ctx->buffer_factory().CreateRawBuffer(0);
return 1;
}));
EXPECT_THAT(InvokeOperator<int32_t>(*op, 5.7f), IsOkAndHolds(1));
}
struct AddOp {
template <typename T>
T operator()(T a, T b) const {
return a + b;
}
};
struct Int64AddOp {
int64_t operator()(int64_t a, int64_t b) const { return a + b; }
};
struct ContextAddOp {
template <typename T>
T operator()(EvaluationContext* ctx, T a, T b) const {
return a + b;
}
};
TEST(OperatorFactory, FromFunctor) {
ASSERT_OK_AND_ASSIGN(auto op,
(QExprOperatorFromFunctor<AddOp, int32_t, int32_t>()));
EXPECT_THAT(InvokeOperator<int32_t>(*op, 1, 2), IsOkAndHolds(Eq(3)));
ASSERT_OK_AND_ASSIGN(
auto non_template_op,
(QExprOperatorFromFunctor<Int64AddOp, int64_t, int64_t>()));
EXPECT_THAT(InvokeOperator<int64_t>(*non_template_op, int64_t{1}, int64_t{2}),
IsOkAndHolds(Eq(3)));
ASSERT_OK_AND_ASSIGN(
auto context_op,
(QExprOperatorFromFunctor<ContextAddOp, int32_t, int32_t>()));
EXPECT_THAT(InvokeOperator<int32_t>(*context_op, 1, 2), IsOkAndHolds(Eq(3)));
}
TEST(OperatorFactory, Errors) {
EXPECT_THAT(
QExprOperatorFromFunction([](int64_t a, int64_t b) { return a * b; }),
IsOk());
auto qtype = QExprOperatorSignature::Get(
{GetQType<float>(), GetQType<int32_t>()}, GetQType<int>());
EXPECT_THAT(
QExprOperatorFromFunction(
[](float arg1, float arg2) -> int32_t { return 57; }, qtype),
StatusIs(
absl::StatusCode::kInvalidArgument,
MatchesRegex("unexpected type: expected INT32 with C\\+\\+ type int, "
"got float; in input types of .*->.*\\.")));
}
TEST(VariadicInputOperatorTest, MakeVariadicInputOperatorFamily) {
{
auto op_family = MakeVariadicInputOperatorFamily(
[](std::vector<int32_t> args) -> int64_t { return args[0] * args[1]; });
ASSERT_OK_AND_ASSIGN(auto op, op_family->GetOperator({GetQType<int32_t>(),
GetQType<int32_t>()},
GetQType<int64_t>()));
EXPECT_THAT(InvokeOperator<int64_t>(*op, 3, 19), IsOkAndHolds(Eq(57)));
}
{
auto op_family = MakeVariadicInputOperatorFamily(
[](absl::Span<const int32_t* const> args) -> int64_t {
return *args[0] * *args[1];
});
ASSERT_OK_AND_ASSIGN(auto op, op_family->GetOperator({GetQType<int32_t>(),
GetQType<int32_t>()},
GetQType<int64_t>()));
EXPECT_THAT(InvokeOperator<int64_t>(*op, 3, 19), IsOkAndHolds(Eq(57)));
}
{
auto op_family = MakeVariadicInputOperatorFamily(
[](absl::Span<const int32_t* const> args) -> absl::StatusOr<int64_t> {
return *args[0] * *args[1];
});
ASSERT_OK_AND_ASSIGN(auto op, op_family->GetOperator({GetQType<int32_t>(),
GetQType<int32_t>()},
GetQType<int64_t>()));
EXPECT_THAT(InvokeOperator<int64_t>(*op, 3, 19), IsOkAndHolds(Eq(57)));
}
{
auto op_family = MakeVariadicInputOperatorFamily(
[](absl::Span<const int32_t* const> args) -> absl::StatusOr<int64_t> {
return absl::InvalidArgumentError("failed");
});
ASSERT_OK_AND_ASSIGN(auto op, op_family->GetOperator({GetQType<int32_t>(),
GetQType<int32_t>()},
GetQType<int64_t>()));
EXPECT_THAT(InvokeOperator<int64_t>(*op, 3, 19),
StatusIs(absl::StatusCode::kInvalidArgument, "failed"));
}
{
auto op_family = MakeVariadicInputOperatorFamily(
[](absl::Span<const int32_t* const> args) {
return std::make_tuple(*args[0], *args[1]);
});
ASSERT_OK_AND_ASSIGN(
auto op,
op_family->GetOperator(
{GetQType<int32_t>(), GetQType<int32_t>()},
MakeTupleQType({GetQType<int32_t>(), GetQType<int32_t>()})));
ASSERT_OK_AND_ASSIGN(
auto fixture,
(OperatorFixture<std::tuple<int32_t, int32_t>,
std::tuple<int32_t, int32_t>>::Create(*op)));
EXPECT_THAT(fixture.Call(57, 20),
IsOkAndHolds(Eq(std::make_tuple(57, 20))));
}
{
auto op_family = MakeVariadicInputOperatorFamily(
[](absl::Span<const int32_t* const> args) -> absl::StatusOr<int64_t> {
return *args[0] + *args[1];
});
EXPECT_THAT(
op_family->GetOperator({GetQType<int32_t>(), GetQType<int64_t>()},
GetQType<int64_t>()),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected only INT32, got INT64"));
}
{
auto op_family = MakeVariadicInputOperatorFamily(
[](absl::Span<const int32_t* const> args) -> absl::StatusOr<int64_t> {
return *args[0] + *args[1];
});
EXPECT_THAT(
op_family->GetOperator({GetQType<int32_t>(), GetQType<int32_t>()},
GetQType<int32_t>()),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operator_factory.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operator_factory_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
97bc17f6-6af4-452c-a3ae-cc0e453a192e | cpp | google/tsl | scanner | tsl/platform/scanner.cc | tsl/platform/scanner_test.cc | #include "tsl/platform/scanner.h"
namespace tsl {
namespace strings {
void Scanner::ScanUntilImpl(char end_ch, bool escaped) {
for (;;) {
if (cur_.empty()) {
Error();
return;
}
const char ch = cur_[0];
if (ch == end_ch) {
return;
}
cur_.remove_prefix(1);
if (escaped && ch == '\\') {
if (cur_.empty()) {
Error();
return;
}
cur_.remove_prefix(1);
}
}
}
bool Scanner::GetResult(absl::string_view* remaining,
absl::string_view* capture) {
if (error_) {
return false;
}
if (remaining != nullptr) {
*remaining = cur_;
}
if (capture != nullptr) {
const char* end = capture_end_ == nullptr ? cur_.data() : capture_end_;
*capture = absl::string_view(capture_start_, end - capture_start_);
}
return true;
}
}
} | #include "tsl/platform/scanner.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace strings {
class ScannerTest : public ::testing::Test {
protected:
string ClassStr(Scanner::CharClass clz) {
string s;
for (int i = 0; i < 256; ++i) {
char ch = i;
if (Scanner::Matches(clz, ch)) {
s += ch;
}
}
return s;
}
};
TEST_F(ScannerTest, Any) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(" horse0123")
.Any(Scanner::SPACE)
.Any(Scanner::DIGIT)
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ(" horse", match);
EXPECT_EQ("0123", remaining);
EXPECT_TRUE(Scanner("")
.Any(Scanner::SPACE)
.Any(Scanner::DIGIT)
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("", match);
EXPECT_TRUE(Scanner("----")
.Any(Scanner::SPACE)
.Any(Scanner::DIGIT)
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("----", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, AnySpace) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(" a b ")
.AnySpace()
.One(Scanner::LETTER)
.AnySpace()
.GetResult(&remaining, &match));
EXPECT_EQ(" a ", match);
EXPECT_EQ("b ", remaining);
}
TEST_F(ScannerTest, AnyEscapedNewline) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("\\\n")
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &match));
EXPECT_EQ("\\\n", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, AnyEmptyString) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("")
.Any(Scanner::LETTER_DIGIT_UNDERSCORE)
.GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, Eos) {
EXPECT_FALSE(Scanner("a").Eos().GetResult());
EXPECT_TRUE(Scanner("").Eos().GetResult());
EXPECT_FALSE(Scanner("abc").OneLiteral("ab").Eos().GetResult());
EXPECT_TRUE(Scanner("abc").OneLiteral("abc").Eos().GetResult());
}
TEST_F(ScannerTest, Many) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("abc").Many(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("0").Many(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("").Many(Scanner::LETTER).GetResult());
EXPECT_TRUE(
Scanner("abc ").Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ(" ", remaining);
EXPECT_EQ("abc", match);
EXPECT_TRUE(
Scanner("abc").Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("abc", match);
}
TEST_F(ScannerTest, One) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner("abc").One(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("0").One(Scanner::LETTER).GetResult());
EXPECT_FALSE(Scanner("").One(Scanner::LETTER).GetResult());
EXPECT_TRUE(Scanner("abc")
.One(Scanner::LETTER)
.One(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("c", remaining);
EXPECT_EQ("ab", match);
EXPECT_TRUE(Scanner("a").One(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("a", match);
}
TEST_F(ScannerTest, OneLiteral) {
EXPECT_FALSE(Scanner("abc").OneLiteral("abC").GetResult());
EXPECT_TRUE(Scanner("abc").OneLiteral("ab").OneLiteral("c").GetResult());
}
TEST_F(ScannerTest, ScanUntil) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(R"(' \1 \2 \3 \' \\'rest)")
.OneLiteral("'")
.ScanUntil('\'')
.OneLiteral("'")
.GetResult(&remaining, &match));
EXPECT_EQ(R"( \\'rest)", remaining);
EXPECT_EQ(R"(' \1 \2 \3 \')", match);
remaining = match = "unset";
EXPECT_FALSE(Scanner(R"(' \1 \2 \3 \\rest)")
.OneLiteral("'")
.ScanUntil('\'')
.GetResult(&remaining, &match));
EXPECT_EQ("unset", remaining);
EXPECT_EQ("unset", match);
remaining = match = "";
EXPECT_TRUE(
Scanner(R"(123\456)").ScanUntil('\\').GetResult(&remaining, &match));
EXPECT_EQ(R"(\456)", remaining);
EXPECT_EQ("123", match);
}
TEST_F(ScannerTest, ScanEscapedUntil) {
absl::string_view remaining, match;
EXPECT_TRUE(Scanner(R"(' \1 \2 \3 \' \\'rest)")
.OneLiteral("'")
.ScanEscapedUntil('\'')
.OneLiteral("'")
.GetResult(&remaining, &match));
EXPECT_EQ("rest", remaining);
EXPECT_EQ(R"(' \1 \2 \3 \' \\')", match);
remaining = match = "unset";
EXPECT_FALSE(Scanner(R"(' \1 \2 \3 \' \\rest)")
.OneLiteral("'")
.ScanEscapedUntil('\'')
.GetResult(&remaining, &match));
EXPECT_EQ("unset", remaining);
EXPECT_EQ("unset", match);
}
TEST_F(ScannerTest, ZeroOrOneLiteral) {
absl::string_view remaining, match;
EXPECT_TRUE(
Scanner("abc").ZeroOrOneLiteral("abC").GetResult(&remaining, &match));
EXPECT_EQ("abc", remaining);
EXPECT_EQ("", match);
EXPECT_TRUE(
Scanner("abcd").ZeroOrOneLiteral("ab").ZeroOrOneLiteral("c").GetResult(
&remaining, &match));
EXPECT_EQ("d", remaining);
EXPECT_EQ("abc", match);
EXPECT_TRUE(
Scanner("").ZeroOrOneLiteral("abc").GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("", match);
}
TEST_F(ScannerTest, CaptureAndGetResult) {
absl::string_view remaining, match;
Scanner scan(" first second");
EXPECT_TRUE(scan.Any(Scanner::SPACE)
.RestartCapture()
.One(Scanner::LETTER)
.Any(Scanner::LETTER_DIGIT)
.StopCapture()
.Any(Scanner::SPACE)
.GetResult(&remaining, &match));
EXPECT_EQ("second", remaining);
EXPECT_EQ("first", match);
EXPECT_TRUE(scan.GetResult());
remaining = "";
EXPECT_TRUE(scan.GetResult(&remaining));
EXPECT_EQ("second", remaining);
remaining = "";
match = "";
EXPECT_TRUE(scan.GetResult(&remaining, &match));
EXPECT_EQ("second", remaining);
EXPECT_EQ("first", match);
scan.RestartCapture().One(Scanner::LETTER).One(Scanner::LETTER);
remaining = "";
match = "";
EXPECT_TRUE(scan.GetResult(&remaining, &match));
EXPECT_EQ("cond", remaining);
EXPECT_EQ("se", match);
}
TEST_F(ScannerTest, MultipleGetResultExtendsCapture) {
absl::string_view remaining, match;
Scanner scan("one2three");
EXPECT_TRUE(scan.Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("2three", remaining);
EXPECT_EQ("one", match);
EXPECT_TRUE(scan.Many(Scanner::DIGIT).GetResult(&remaining, &match));
EXPECT_EQ("three", remaining);
EXPECT_EQ("one2", match);
EXPECT_TRUE(scan.Many(Scanner::LETTER).GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("one2three", match);
}
TEST_F(ScannerTest, FailedMatchDoesntChangeResult) {
Scanner scan("name");
absl::string_view remaining = "rem";
absl::string_view match = "match";
EXPECT_FALSE(scan.One(Scanner::SPACE).GetResult(&remaining, &match));
EXPECT_EQ("rem", remaining);
EXPECT_EQ("match", match);
}
TEST_F(ScannerTest, DefaultCapturesAll) {
Scanner scan("a b");
absl::string_view remaining = "rem";
absl::string_view match = "match";
EXPECT_TRUE(scan.Any(Scanner::LETTER)
.AnySpace()
.Any(Scanner::LETTER)
.GetResult(&remaining, &match));
EXPECT_EQ("", remaining);
EXPECT_EQ("a b", match);
}
TEST_F(ScannerTest, AllCharClasses) {
EXPECT_EQ(256, ClassStr(Scanner::ALL).size());
EXPECT_EQ("0123456789", ClassStr(Scanner::DIGIT));
EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER));
EXPECT_EQ("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT));
EXPECT_EQ(
"-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
"abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DASH_UNDERSCORE));
EXPECT_EQ(
"-./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DASH_DOT_SLASH));
EXPECT_EQ(
"-./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_"
"abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DASH_DOT_SLASH_UNDERSCORE));
EXPECT_EQ(".0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DOT));
EXPECT_EQ("+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DOT_PLUS_MINUS));
EXPECT_EQ(".0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_DOT_UNDERSCORE));
EXPECT_EQ("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LETTER_DIGIT_UNDERSCORE));
EXPECT_EQ("abcdefghijklmnopqrstuvwxyz", ClassStr(Scanner::LOWERLETTER));
EXPECT_EQ("0123456789abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LOWERLETTER_DIGIT));
EXPECT_EQ("0123456789_abcdefghijklmnopqrstuvwxyz",
ClassStr(Scanner::LOWERLETTER_DIGIT_UNDERSCORE));
EXPECT_EQ("123456789", ClassStr(Scanner::NON_ZERO_DIGIT));
EXPECT_EQ("\t\n\v\f\r ", ClassStr(Scanner::SPACE));
EXPECT_EQ("ABCDEFGHIJKLMNOPQRSTUVWXYZ", ClassStr(Scanner::UPPERLETTER));
EXPECT_EQ(">", ClassStr(Scanner::RANGLE));
}
TEST_F(ScannerTest, Peek) {
EXPECT_EQ('a', Scanner("abc").Peek());
EXPECT_EQ('a', Scanner("abc").Peek('b'));
EXPECT_EQ('\0', Scanner("").Peek());
EXPECT_EQ('z', Scanner("").Peek('z'));
EXPECT_EQ('A', Scanner("0123A").Any(Scanner::DIGIT).Peek());
EXPECT_EQ('\0', Scanner("0123A").Any(Scanner::LETTER_DIGIT).Peek());
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/scanner.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/scanner_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
08aabd66-003a-4e32-bd14-891d13de3ca4 | cpp | google/quiche | quic_receive_control_stream | quiche/quic/core/http/quic_receive_control_stream.cc | quiche/quic/core/http/quic_receive_control_stream_test.cc | #include "quiche/quic/core/http/quic_receive_control_stream.h"
#include <optional>
#include <utility>
#include "absl/strings/numbers.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/http/http_decoder.h"
#include "quiche/quic/core/http/quic_spdy_session.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
QuicReceiveControlStream::QuicReceiveControlStream(
PendingStream* pending, QuicSpdySession* spdy_session)
: QuicStream(pending, spdy_session,
true),
settings_frame_received_(false),
decoder_(this),
spdy_session_(spdy_session) {
sequencer()->set_level_triggered(true);
}
QuicReceiveControlStream::~QuicReceiveControlStream() {}
void QuicReceiveControlStream::OnStreamReset(
const QuicRstStreamFrame& ) {
stream_delegate()->OnStreamError(
QUIC_HTTP_CLOSED_CRITICAL_STREAM,
"RESET_STREAM received for receive control stream");
}
void QuicReceiveControlStream::OnDataAvailable() {
iovec iov;
while (!reading_stopped() && decoder_.error() == QUIC_NO_ERROR &&
sequencer()->GetReadableRegion(&iov)) {
QUICHE_DCHECK(!sequencer()->IsClosed());
QuicByteCount processed_bytes = decoder_.ProcessInput(
reinterpret_cast<const char*>(iov.iov_base), iov.iov_len);
sequencer()->MarkConsumed(processed_bytes);
if (!session()->connection()->connected()) {
return;
}
QUICHE_DCHECK_EQ(iov.iov_len, processed_bytes);
}
}
void QuicReceiveControlStream::OnError(HttpDecoder* decoder) {
stream_delegate()->OnStreamError(decoder->error(), decoder->error_detail());
}
bool QuicReceiveControlStream::OnMaxPushIdFrame() {
return ValidateFrameType(HttpFrameType::MAX_PUSH_ID);
}
bool QuicReceiveControlStream::OnGoAwayFrame(const GoAwayFrame& frame) {
if (spdy_session()->debug_visitor()) {
spdy_session()->debug_visitor()->OnGoAwayFrameReceived(frame);
}
if (!ValidateFrameType(HttpFrameType::GOAWAY)) {
return false;
}
spdy_session()->OnHttp3GoAway(frame.id);
return true;
}
bool QuicReceiveControlStream::OnSettingsFrameStart(
QuicByteCount ) {
return ValidateFrameType(HttpFrameType::SETTINGS);
}
bool QuicReceiveControlStream::OnSettingsFrame(const SettingsFrame& frame) {
QUIC_DVLOG(1) << "Control Stream " << id()
<< " received settings frame: " << frame;
return spdy_session_->OnSettingsFrame(frame);
}
bool QuicReceiveControlStream::OnDataFrameStart(QuicByteCount ,
QuicByteCount
) {
return ValidateFrameType(HttpFrameType::DATA);
}
bool QuicReceiveControlStream::OnDataFramePayload(
absl::string_view ) {
QUICHE_NOTREACHED();
return false;
}
bool QuicReceiveControlStream::OnDataFrameEnd() {
QUICHE_NOTREACHED();
return false;
}
bool QuicReceiveControlStream::OnHeadersFrameStart(
QuicByteCount , QuicByteCount
) {
return ValidateFrameType(HttpFrameType::HEADERS);
}
bool QuicReceiveControlStream::OnHeadersFramePayload(
absl::string_view ) {
QUICHE_NOTREACHED();
return false;
}
bool QuicReceiveControlStream::OnHeadersFrameEnd() {
QUICHE_NOTREACHED();
return false;
}
bool QuicReceiveControlStream::OnPriorityUpdateFrameStart(
QuicByteCount ) {
return ValidateFrameType(HttpFrameType::PRIORITY_UPDATE_REQUEST_STREAM);
}
bool QuicReceiveControlStream::OnPriorityUpdateFrame(
const PriorityUpdateFrame& frame) {
if (spdy_session()->debug_visitor()) {
spdy_session()->debug_visitor()->OnPriorityUpdateFrameReceived(frame);
}
std::optional<HttpStreamPriority> priority =
ParsePriorityFieldValue(frame.priority_field_value);
if (!priority.has_value()) {
stream_delegate()->OnStreamError(QUIC_INVALID_PRIORITY_UPDATE,
"Invalid PRIORITY_UPDATE frame payload.");
return false;
}
const QuicStreamId stream_id = frame.prioritized_element_id;
return spdy_session_->OnPriorityUpdateForRequestStream(stream_id, *priority);
}
bool QuicReceiveControlStream::OnOriginFrameStart(
QuicByteCount ) {
return ValidateFrameType(HttpFrameType::ORIGIN);
}
bool QuicReceiveControlStream::OnOriginFrame(const OriginFrame& frame) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, spdy_session()->perspective());
if (spdy_session()->debug_visitor()) {
spdy_session()->debug_visitor()->OnOriginFrameReceived(frame);
}
spdy_session()->OnOriginFrame(frame);
return false;
}
bool QuicReceiveControlStream::OnAcceptChFrameStart(
QuicByteCount ) {
return ValidateFrameType(HttpFrameType::ACCEPT_CH);
}
bool QuicReceiveControlStream::OnAcceptChFrame(const AcceptChFrame& frame) {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, spdy_session()->perspective());
if (spdy_session()->debug_visitor()) {
spdy_session()->debug_visitor()->OnAcceptChFrameReceived(frame);
}
spdy_session()->OnAcceptChFrame(frame);
return true;
}
void QuicReceiveControlStream::OnWebTransportStreamFrameType(
QuicByteCount , WebTransportSessionId ) {
QUIC_BUG(WEBTRANSPORT_STREAM on Control Stream)
<< "Parsed WEBTRANSPORT_STREAM on a control stream.";
}
bool QuicReceiveControlStream::OnMetadataFrameStart(
QuicByteCount , QuicByteCount ) {
return ValidateFrameType(HttpFrameType::METADATA);
}
bool QuicReceiveControlStream::OnMetadataFramePayload(
absl::string_view ) {
return true;
}
bool QuicReceiveControlStream::OnMetadataFrameEnd() {
return true;
}
bool QuicReceiveControlStream::OnUnknownFrameStart(
uint64_t frame_type, QuicByteCount ,
QuicByteCount payload_length) {
if (spdy_session()->debug_visitor()) {
spdy_session()->debug_visitor()->OnUnknownFrameReceived(id(), frame_type,
payload_length);
}
return ValidateFrameType(static_cast<HttpFrameType>(frame_type));
}
bool QuicReceiveControlStream::OnUnknownFramePayload(
absl::string_view ) {
return true;
}
bool QuicReceiveControlStream::OnUnknownFrameEnd() {
return true;
}
bool QuicReceiveControlStream::ValidateFrameType(HttpFrameType frame_type) {
if (frame_type == HttpFrameType::DATA ||
frame_type == HttpFrameType::HEADERS ||
(spdy_session()->perspective() == Perspective::IS_CLIENT &&
frame_type == HttpFrameType::MAX_PUSH_ID) ||
(spdy_session()->perspective() == Perspective::IS_SERVER &&
((GetQuicReloadableFlag(enable_h3_origin_frame) &&
frame_type == HttpFrameType::ORIGIN) ||
frame_type == HttpFrameType::ACCEPT_CH))) {
stream_delegate()->OnStreamError(
QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM,
absl::StrCat("Invalid frame type ", static_cast<int>(frame_type),
" received on control stream."));
return false;
}
if (settings_frame_received_) {
if (frame_type == HttpFrameType::SETTINGS) {
stream_delegate()->OnStreamError(
QUIC_HTTP_INVALID_FRAME_SEQUENCE_ON_CONTROL_STREAM,
"SETTINGS frame can only be received once.");
return false;
}
return true;
}
if (frame_type == HttpFrameType::SETTINGS) {
settings_frame_received_ = true;
return true;
}
stream_delegate()->OnStreamError(
QUIC_HTTP_MISSING_SETTINGS_FRAME,
absl::StrCat("First frame received on control stream is type ",
static_cast<int>(frame_type), ", but it must be SETTINGS."));
return false;
}
} | #include "quiche/quic/core/http/quic_receive_control_stream.h"
#include <ostream>
#include <string>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/qpack/qpack_header_table.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/test_tools/qpack/qpack_encoder_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/simple_buffer_allocator.h"
namespace quic {
class QpackEncoder;
namespace test {
namespace {
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::StrictMock;
struct TestParams {
TestParams(const ParsedQuicVersion& version, Perspective perspective)
: version(version), perspective(perspective) {
QUIC_LOG(INFO) << "TestParams: " << *this;
}
TestParams(const TestParams& other)
: version(other.version), perspective(other.perspective) {}
friend std::ostream& operator<<(std::ostream& os, const TestParams& tp) {
os << "{ version: " << ParsedQuicVersionToString(tp.version)
<< ", perspective: "
<< (tp.perspective == Perspective::IS_CLIENT ? "client" : "server")
<< "}";
return os;
}
ParsedQuicVersion version;
Perspective perspective;
};
std::string PrintToString(const TestParams& tp) {
return absl::StrCat(
ParsedQuicVersionToString(tp.version), "_",
(tp.perspective == Perspective::IS_CLIENT ? "client" : "server"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
ParsedQuicVersionVector all_supported_versions = AllSupportedVersions();
for (const auto& version : AllSupportedVersions()) {
if (!VersionUsesHttp3(version.transport_version)) {
continue;
}
for (Perspective p : {Perspective::IS_SERVER, Perspective::IS_CLIENT}) {
params.emplace_back(version, p);
}
}
return params;
}
class TestStream : public QuicSpdyStream {
public:
TestStream(QuicStreamId id, QuicSpdySession* session)
: QuicSpdyStream(id, session, BIDIRECTIONAL) {}
~TestStream() override = default;
void OnBodyAvailable() override {}
};
class QuicReceiveControlStreamTest : public QuicTestWithParam<TestParams> {
public:
QuicReceiveControlStreamTest()
: connection_(new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective(),
SupportedVersions(GetParam().version))),
session_(connection_) {
EXPECT_CALL(session_, OnCongestionWindowChange(_)).Times(AnyNumber());
session_.Initialize();
EXPECT_CALL(
static_cast<const MockQuicCryptoStream&>(*session_.GetCryptoStream()),
encryption_established())
.WillRepeatedly(testing::Return(true));
QuicStreamId id = perspective() == Perspective::IS_SERVER
? GetNthClientInitiatedUnidirectionalStreamId(
session_.transport_version(), 3)
: GetNthServerInitiatedUnidirectionalStreamId(
session_.transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(id, false, 0, absl::string_view(type, 1));
session_.OnStreamFrame(data1);
receive_control_stream_ =
QuicSpdySessionPeer::GetReceiveControlStream(&session_);
stream_ = new TestStream(GetNthClientInitiatedBidirectionalStreamId(
GetParam().version.transport_version, 0),
&session_);
session_.ActivateStream(absl::WrapUnique(stream_));
}
Perspective perspective() const { return GetParam().perspective; }
QuicStreamOffset NumBytesConsumed() {
return QuicStreamPeer::sequencer(receive_control_stream_)
->NumBytesConsumed();
}
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnection>* connection_;
StrictMock<MockQuicSpdySession> session_;
QuicReceiveControlStream* receive_control_stream_;
TestStream* stream_;
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicReceiveControlStreamTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(QuicReceiveControlStreamTest, ResetControlStream) {
EXPECT_TRUE(receive_control_stream_->is_static());
QuicRstStreamFrame rst_frame(kInvalidControlFrameId,
receive_control_stream_->id(),
QUIC_STREAM_CANCELLED, 1234);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM, _, _));
receive_control_stream_->OnStreamReset(rst_frame);
}
TEST_P(QuicReceiveControlStreamTest, ReceiveSettings) {
SettingsFrame settings;
settings.values[10] = 2;
settings.values[SETTINGS_MAX_FIELD_SECTION_SIZE] = 5;
settings.values[SETTINGS_QPACK_BLOCKED_STREAMS] = 12;
settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = 37;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame frame(receive_control_stream_->id(), false, 1, data);
QpackEncoder* qpack_encoder = session_.qpack_encoder();
QpackEncoderHeaderTable* header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_EQ(std::numeric_limits<size_t>::max(),
session_.max_outbound_header_list_size());
EXPECT_EQ(0u, QpackEncoderPeer::maximum_blocked_streams(qpack_encoder));
EXPECT_EQ(0u, header_table->maximum_dynamic_table_capacity());
receive_control_stream_->OnStreamFrame(frame);
EXPECT_EQ(5u, session_.max_outbound_header_list_size());
EXPECT_EQ(12u, QpackEncoderPeer::maximum_blocked_streams(qpack_encoder));
EXPECT_EQ(37u, header_table->maximum_dynamic_table_capacity());
}
TEST_P(QuicReceiveControlStreamTest, ReceiveSettingsTwice) {
SettingsFrame settings;
settings.values[0x21] = 100;
settings.values[0x40] = 200;
std::string settings_frame = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamOffset offset = 1;
EXPECT_EQ(offset, NumBytesConsumed());
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false, offset,
settings_frame));
offset += settings_frame.length();
EXPECT_EQ(offset, NumBytesConsumed());
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_INVALID_FRAME_SEQUENCE_ON_CONTROL_STREAM,
"SETTINGS frame can only be received once.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false, offset,
settings_frame));
QuicByteCount settings_frame_header_length = 2;
EXPECT_EQ(offset + settings_frame_header_length, NumBytesConsumed());
}
TEST_P(QuicReceiveControlStreamTest, ReceiveSettingsFragments) {
SettingsFrame settings;
settings.values[10] = 2;
settings.values[SETTINGS_MAX_FIELD_SECTION_SIZE] = 5;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
std::string data1 = data.substr(0, 1);
std::string data2 = data.substr(1, data.length() - 1);
QuicStreamFrame frame(receive_control_stream_->id(), false, 1, data1);
QuicStreamFrame frame2(receive_control_stream_->id(), false, 2, data2);
EXPECT_NE(5u, session_.max_outbound_header_list_size());
receive_control_stream_->OnStreamFrame(frame);
receive_control_stream_->OnStreamFrame(frame2);
EXPECT_EQ(5u, session_.max_outbound_header_list_size());
}
TEST_P(QuicReceiveControlStreamTest, ReceiveWrongFrame) {
quiche::QuicheBuffer data = HttpEncoder::SerializeDataFrameHeader(
2, quiche::SimpleBufferAllocator::Get());
QuicStreamFrame frame(receive_control_stream_->id(), false, 1,
data.AsStringView());
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM, _, _));
receive_control_stream_->OnStreamFrame(frame);
}
TEST_P(QuicReceiveControlStreamTest,
ReceivePriorityUpdateFrameBeforeSettingsFrame) {
std::string serialized_frame = HttpEncoder::SerializePriorityUpdateFrame({});
QuicStreamFrame data(receive_control_stream_->id(), false,
1, serialized_frame);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_MISSING_SETTINGS_FRAME,
"First frame received on control stream is type "
"984832, but it must be SETTINGS.",
_))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
receive_control_stream_->OnStreamFrame(data);
}
TEST_P(QuicReceiveControlStreamTest, ReceiveGoAwayFrame) {
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_.set_debug_visitor(&debug_visitor);
QuicStreamOffset offset = 1;
SettingsFrame settings;
std::string settings_frame = HttpEncoder::SerializeSettingsFrame(settings);
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(settings));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false, offset,
settings_frame));
offset += settings_frame.length();
GoAwayFrame goaway{ 0};
std::string goaway_frame = HttpEncoder::SerializeGoAwayFrame(goaway);
QuicStreamFrame frame(receive_control_stream_->id(), false, offset,
goaway_frame);
EXPECT_FALSE(session_.goaway_received());
EXPECT_CALL(debug_visitor, OnGoAwayFrameReceived(goaway));
receive_control_stream_->OnStreamFrame(frame);
EXPECT_TRUE(session_.goaway_received());
}
TEST_P(QuicReceiveControlStreamTest, PushPromiseOnControlStreamShouldClose) {
std::string push_promise_frame;
ASSERT_TRUE(
absl::HexStringToBytes("05"
"01"
"00",
&push_promise_frame));
QuicStreamFrame frame(receive_control_stream_->id(), false, 1,
push_promise_frame);
EXPECT_CALL(*connection_, CloseConnection(QUIC_HTTP_FRAME_ERROR, _, _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
receive_control_stream_->OnStreamFrame(frame);
}
TEST_P(QuicReceiveControlStreamTest, ConsumeUnknownFrame) {
EXPECT_EQ(1u, NumBytesConsumed());
QuicStreamOffset offset = 1;
std::string settings_frame = HttpEncoder::SerializeSettingsFrame({});
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false, offset,
settings_frame));
offset += settings_frame.length();
EXPECT_EQ(offset, NumBytesConsumed());
std::string unknown_frame;
ASSERT_TRUE(
absl::HexStringToBytes("21"
"03"
"666f6f",
&unknown_frame));
receive_control_stream_->OnStreamFrame(QuicStreamFrame(
receive_control_stream_->id(), false, offset, unknown_frame));
offset += unknown_frame.size();
EXPECT_EQ(offset, NumBytesConsumed());
}
TEST_P(QuicReceiveControlStreamTest, ReceiveUnknownFrame) {
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_.set_debug_visitor(&debug_visitor);
const QuicStreamId id = receive_control_stream_->id();
QuicStreamOffset offset = 1;
SettingsFrame settings;
std::string settings_frame = HttpEncoder::SerializeSettingsFrame(settings);
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(settings));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(id, false, offset, settings_frame));
offset += settings_frame.length();
std::string unknown_frame;
ASSERT_TRUE(
absl::HexStringToBytes("21"
"03"
"666f6f",
&unknown_frame));
EXPECT_CALL(debug_visitor, OnUnknownFrameReceived(id, 0x21,
3));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(id, false, offset, unknown_frame));
}
TEST_P(QuicReceiveControlStreamTest, CancelPushFrameBeforeSettings) {
std::string cancel_push_frame;
ASSERT_TRUE(
absl::HexStringToBytes("03"
"01"
"01",
&cancel_push_frame));
EXPECT_CALL(*connection_, CloseConnection(QUIC_HTTP_FRAME_ERROR,
"CANCEL_PUSH frame received.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false,
1, cancel_push_frame));
}
TEST_P(QuicReceiveControlStreamTest, AcceptChFrameBeforeSettings) {
std::string accept_ch_frame;
ASSERT_TRUE(
absl::HexStringToBytes("4089"
"00",
&accept_ch_frame));
if (perspective() == Perspective::IS_SERVER) {
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM,
"Invalid frame type 137 received on control stream.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
} else {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_MISSING_SETTINGS_FRAME,
"First frame received on control stream is "
"type 137, but it must be SETTINGS.",
_))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
}
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false,
1, accept_ch_frame));
}
TEST_P(QuicReceiveControlStreamTest, ReceiveAcceptChFrame) {
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_.set_debug_visitor(&debug_visitor);
const QuicStreamId id = receive_control_stream_->id();
QuicStreamOffset offset = 1;
SettingsFrame settings;
std::string settings_frame = HttpEncoder::SerializeSettingsFrame(settings);
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(settings));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(id, false, offset, settings_frame));
offset += settings_frame.length();
std::string accept_ch_frame;
ASSERT_TRUE(
absl::HexStringToBytes("4089"
"00",
&accept_ch_frame));
if (perspective() == Perspective::IS_CLIENT) {
EXPECT_CALL(debug_visitor, OnAcceptChFrameReceived(_));
} else {
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM,
"Invalid frame type 137 received on control stream.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
}
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(id, false, offset, accept_ch_frame));
}
TEST_P(QuicReceiveControlStreamTest, ReceiveOriginFrame) {
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_.set_debug_visitor(&debug_visitor);
const QuicStreamId id = receive_control_stream_->id();
QuicStreamOffset offset = 1;
SettingsFrame settings;
std::string settings_frame = HttpEncoder::SerializeSettingsFrame(settings);
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(settings));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(id, false, offset, settings_frame));
offset += settings_frame.length();
std::string origin_frame;
ASSERT_TRUE(
absl::HexStringToBytes("0C"
"00",
&origin_frame));
if (GetQuicReloadableFlag(enable_h3_origin_frame)) {
if (perspective() == Perspective::IS_CLIENT) {
EXPECT_CALL(debug_visitor, OnOriginFrameReceived(_));
} else {
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_HTTP_FRAME_UNEXPECTED_ON_CONTROL_STREAM,
"Invalid frame type 12 received on control stream.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
}
} else {
EXPECT_CALL(debug_visitor,
OnUnknownFrameReceived(id, 0x0c,
0));
}
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(id, false, offset, origin_frame));
}
TEST_P(QuicReceiveControlStreamTest, UnknownFrameBeforeSettings) {
std::string unknown_frame;
ASSERT_TRUE(
absl::HexStringToBytes("21"
"03"
"666f6f",
&unknown_frame));
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_MISSING_SETTINGS_FRAME,
"First frame received on control stream is type "
"33, but it must be SETTINGS.",
_))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(_, _, _));
EXPECT_CALL(session_, OnConnectionClosed(_, _));
receive_control_stream_->OnStreamFrame(
QuicStreamFrame(receive_control_stream_->id(), false,
1, unknown_frame));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_receive_control_stream.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_receive_control_stream_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
1c34f11c-bc6d-43c9-ba89-f90098acaca7 | cpp | tensorflow/tensorflow | cluster_scoping_pass | tensorflow/compiler/jit/cluster_scoping_pass.cc | tensorflow/compiler/jit/cluster_scoping_pass_test.cc | #include "tensorflow/compiler/jit/cluster_scoping_pass.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
namespace {
class ClusterScopingPassImpl {
public:
ClusterScopingPassImpl(Graph* graph,
OptimizerOptions::GlobalJitLevel global_jit_level)
: graph_(graph),
global_jit_level_(global_jit_level),
unique_scope_id_(0) {}
Status Run();
private:
Status ScopingForPipelineStages();
size_t GetUniqueScopeId() { return unique_scope_id_++; }
void AddScopeToAllTransitivePredecessors(Node* start);
void AddScopeToAllTransitiveSuccessors(Node* start);
private:
Graph* graph_;
OptimizerOptions::GlobalJitLevel global_jit_level_;
size_t unique_scope_id_;
};
std::optional<string> GetXlaInternalScope(Node* node) {
string scope;
if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) {
return scope;
}
return std::nullopt;
}
void SetXlaInternalScope(Node* node, StringPiece scope) {
node->AddAttr(kXlaInternalScopeAttr, scope);
}
void AddOrAppendXlaInternalScope(Node* node, absl::string_view suffix) {
string updated_scope;
std::optional<string> cur_scope = GetXlaInternalScope(node);
if (cur_scope == std::nullopt) {
updated_scope = std::string(suffix);
} else {
updated_scope = absl::StrCat(cur_scope.value(), "&", suffix);
}
SetXlaInternalScope(node, updated_scope);
}
void ClusterScopingPassImpl::AddScopeToAllTransitivePredecessors(Node* start) {
const string unique_suffix = absl::StrCat("_", GetUniqueScopeId());
std::vector<Node*> starts;
starts.push_back(start);
auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); };
ReverseDFSFrom(*graph_, starts, enter, nullptr,
NodeComparatorName());
}
void ClusterScopingPassImpl::AddScopeToAllTransitiveSuccessors(Node* start) {
const string unique_suffix = absl::StrCat("_", GetUniqueScopeId());
std::vector<Node*> starts;
starts.push_back(start);
auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); };
DFSFrom(*graph_, starts, enter, nullptr,
NodeComparatorName(),
nullptr);
}
Status ClusterScopingPassImpl::ScopingForPipelineStages() {
for (Node* n : graph_->nodes()) {
DCHECK(n);
if (n->type_string() == "Unstage") {
AddScopeToAllTransitiveSuccessors(n);
}
if (n->type_string() == "Stage") {
AddScopeToAllTransitivePredecessors(n);
}
}
return absl::OkStatus();
}
Status ClusterScopingPassImpl::Run() {
if (global_jit_level_ == OptimizerOptions::OFF) {
return absl::OkStatus();
}
return ScopingForPipelineStages();
}
}
Status ClusterScopingPass::Run(const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
return ClusterScopingPassImpl{graph, GetGlobalJitLevelForGraph(options)}
.Run();
}
} | #include "tensorflow/compiler/jit/cluster_scoping_pass.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
Status ClusterScoping(std::unique_ptr<Graph>* graph) {
FixupSourceAndSinkEdges(graph->get());
GraphOptimizationPassWrapper wrapper;
wrapper.session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(graph);
ClusterScopingPass pass;
return pass.Run(opt_options);
}
absl::flat_hash_map<string, string> GetXlaInternalScopes(const Graph& graph) {
absl::flat_hash_map<string, string> scopes;
for (Node* node : graph.nodes()) {
string scope;
if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) {
scopes[node->name()] = scope;
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "_XlaInternalScopes:";
for (const auto& p : scopes) {
VLOG(2) << " " << p.first << " -> " << p.second;
}
}
return scopes;
}
Node* BuildStageNode(GraphDefBuilder& builder, string name,
std::initializer_list<DataType> dtypes,
absl::Span<const ops::NodeOut> values) {
auto opts = builder.opts()
.WithName(std::move(name))
.WithAttr("dtypes", std::move(dtypes));
if (opts.HaveError()) {
return nullptr;
}
NodeBuilder node_builder(name, "Stage", opts.op_registry());
node_builder.Input(values);
return opts.FinalizeBuilder(&node_builder);
}
TEST(XlaCompilationTest, StagePipelinePreserved) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0"));
Node* add1 =
ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1"));
Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0"));
ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_EQ(scopes["add0"], scopes["relu0"]);
EXPECT_EQ(scopes["add1"], scopes["relu1"]);
}
TEST(XlaCompilationTest, StagePipelinePreservedAndInitialScopesRespected) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b,
builder.opts().WithName("add0").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* add1 = ops::BinaryOp("Add", unstage, b,
builder.opts().WithName("add1").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* relu0 = ops::UnaryOp("Relu", add0,
builder.opts().WithName("relu0").WithAttr(
kXlaInternalScopeAttr, "ClusterB"));
ops::UnaryOp("Relu", add1,
builder.opts().WithName("relu1").WithAttr(
kXlaInternalScopeAttr, "ClusterD"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_NE(scopes["add0"], scopes["relu0"]);
EXPECT_NE(scopes["add1"], scopes["relu1"]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/cluster_scoping_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/cluster_scoping_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f5a8fa47-b710-49c7-b98f-355ccd37a6f2 | cpp | tensorflow/tensorflow | ifrt_device_utils | tensorflow/core/tfrt/ifrt/ifrt_device_utils.cc | tensorflow/core/tfrt/ifrt/ifrt_device_utils_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_device_utils.h"
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/tf2xla/host_compute_metadata.pb.h"
#include "xla/python/ifrt/array.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/executable.h"
#include "xla/python/ifrt/host_callback.h"
#include "xla/service/computation_placer.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tfrt/ifrt/grid.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_config.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
static constexpr int kTpuTopologyRank = 4;
absl::StatusOr<std::vector<xla::ifrt::Device*>> GetAssignedIfrtDevices(
const xla::ifrt::Client& ifrt_client, int num_replicas,
int num_cores_per_replica,
std::optional<std::vector<int>> device_assignment) {
const int num_devices = num_replicas * num_cores_per_replica;
bool no_device_coordinates = false;
for (auto* device : ifrt_client.devices()) {
if (!device->Attributes().map().contains("coords") ||
!device->Attributes().map().contains("core_on_chip")) {
no_device_coordinates = true;
break;
}
}
if (!device_assignment || device_assignment->empty() ||
no_device_coordinates) {
TF_ASSIGN_OR_RETURN(xla::DeviceAssignment xla_device_assignment,
ifrt_client.GetDefaultDeviceAssignment(
num_replicas, num_cores_per_replica));
VLOG(3) << "Getting default device lists";
std::vector<xla::ifrt::Device*> devices;
devices.reserve(num_devices);
for (int replica_idx = 0; replica_idx < num_replicas; replica_idx++) {
for (int core_idx = 0; core_idx < num_cores_per_replica; core_idx++) {
auto device_id = xla_device_assignment(replica_idx, core_idx);
TF_ASSIGN_OR_RETURN(
xla::ifrt::Device * device,
ifrt_client.LookupDevice(xla::ifrt::DeviceId(device_id)));
devices.push_back(device);
}
}
return devices;
}
absl::flat_hash_map<GridCoords, int> devices_from_attribute;
std::vector<int> coord;
coord.reserve(kTpuTopologyRank);
int device_index = 0;
for (auto coord_attr : *device_assignment) {
coord.push_back(coord_attr);
if (coord.size() == kTpuTopologyRank) {
devices_from_attribute.insert(
{GridCoords(coord[0], coord[1], coord[2], coord[3]), device_index});
device_index++;
coord.clear();
}
}
if (!coord.empty()) {
return absl::FailedPreconditionError(
absl::StrCat("Device assignment attribute is expected to be a multiple "
"of 4, but got ",
device_assignment->size()));
}
if (devices_from_attribute.size() != num_devices) {
return absl::FailedPreconditionError(
absl::StrCat("Device assignment has ", devices_from_attribute.size(),
" devices, but expected ", num_devices));
}
struct IfrtDeviceGrid {
xla::ifrt::Device* device;
GridCoords grid;
int index_at_attribute;
};
std::vector<IfrtDeviceGrid> ifrt_devices;
ifrt_devices.reserve(num_devices);
for (auto* device : ifrt_client.devices()) {
GridCoords grid;
auto coords_it = device->Attributes().map().find("coords");
auto core_on_chip_it = device->Attributes().map().find("core_on_chip");
if (coords_it != device->Attributes().map().end() &&
core_on_chip_it != device->Attributes().map().end()) {
VLOG(3) << "Adding coords and core_on_chip attributes:"
<< device->DebugString();
auto coords_list =
std::get<xla::ifrt::AttributeMap::Int64ListValue>(coords_it->second);
auto core_on_chip = std::get<xla::ifrt::AttributeMap::Int64Value>(
core_on_chip_it->second);
if (coords_list.value.size() != 3) {
return absl::InternalError(absl::StrCat(
"Expected coords to be of size 3, but got ",
coords_list.value.size(), " for device ", device->DebugString()));
}
grid = GridCoords(coords_list.value[0], coords_list.value[1],
coords_list.value[2], core_on_chip.value);
} else {
return absl::InternalError(
absl::StrCat("Device ", device->DebugString(),
" does not have coords or core_on_chip attribute."));
}
auto device_it_from_attribute = devices_from_attribute.find(grid);
if (device_it_from_attribute == devices_from_attribute.end()) {
VLOG(1) << "Device coordinates " << grid.ToString()
<< " does not match any TPU device assigned "
<< absl::StrJoin(*device_assignment, " ");
continue;
}
ifrt_devices.push_back(
{.device = device,
.grid = grid,
.index_at_attribute = device_it_from_attribute->second});
}
if (ifrt_devices.size() != num_devices) {
return absl::FailedPreconditionError(absl::StrCat(
"Match ", ifrt_devices.size(), " devices, but expected ", num_devices));
}
absl::c_sort(ifrt_devices, [&](const auto& lhs, const auto& rhs) {
return lhs.index_at_attribute < rhs.index_at_attribute;
});
std::vector<xla::ifrt::Device*> result;
result.reserve(ifrt_devices.size());
for (auto& device_grid : ifrt_devices) {
result.push_back(device_grid.device);
VLOG(3) << "Device: " << device_grid.device->DebugString()
<< " is assigned";
}
return result;
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_device_utils.h"
#include <memory>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/python/ifrt/attribute_map.h"
#include "xla/python/ifrt/device.h"
#include "xla/python/ifrt/mock.h"
#include "xla/service/computation_placer.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::testing::ElementsAre;
using ::testing::Return;
using ::testing::ReturnRef;
using ::tsl::testing::StatusIs;
static constexpr int kNumReplicas = 1;
static constexpr int kNumCoresPerReplica = 2;
static constexpr int kNumDevices = 4;
static constexpr int kDeviceIdOffset = 8;
class IfrtDeviceUtilsTest : public ::testing::Test {
protected:
void SetUp() override {
mocked_devices_.reserve(kNumDevices);
devices_.reserve(kNumDevices);
for (int i = 0; i < kNumDevices; ++i) {
mocked_devices_.push_back(std::make_unique<xla::ifrt::MockDevice>());
ON_CALL(*mocked_devices_[i], Attributes())
.WillByDefault(ReturnRef(device_attributes_maps_[i]));
ON_CALL(*mocked_devices_[i], Id())
.WillByDefault(Return(xla::ifrt::DeviceId(kDeviceIdOffset + i)));
ON_CALL(client_, LookupDevice(xla::ifrt::DeviceId(kDeviceIdOffset + i)))
.WillByDefault(Return(mocked_devices_[i].get()));
devices_.push_back(mocked_devices_[i].get());
};
ON_CALL(client_, devices()).WillByDefault(Return(devices_));
xla::DeviceAssignment assignment(kNumReplicas, kNumCoresPerReplica);
assignment(0, 0) = kDeviceIdOffset + 2;
assignment(0, 1) = kDeviceIdOffset + 3;
ON_CALL(client_,
GetDefaultDeviceAssignment(kNumReplicas, kNumCoresPerReplica))
.WillByDefault(Return(assignment));
}
xla::ifrt::MockClient client_;
std::vector<std::unique_ptr<xla::ifrt::MockDevice>> mocked_devices_;
std::vector<xla::ifrt::Device*> devices_;
std::vector<xla::ifrt::AttributeMap> device_attributes_maps_ = {
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({1, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(0)}}),
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({1, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(1)}}),
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({2, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(0)}}),
xla::ifrt::AttributeMap(xla::ifrt::AttributeMap::Map{
{"coords", xla::ifrt::AttributeMap::Int64ListValue({2, 0, 0})},
{"core_on_chip", xla::ifrt::AttributeMap::Int64Value(1)}}),
};
};
TEST_F(IfrtDeviceUtilsTest, Basic) {
std::vector<int> device_assignment_attr = {1, 0, 0, 1, 1, 0, 0, 0};
TF_ASSERT_OK_AND_ASSIGN(
auto devices_from_attribute,
GetAssignedIfrtDevices(client_, kNumReplicas, kNumCoresPerReplica,
device_assignment_attr));
EXPECT_THAT(devices_from_attribute, ElementsAre(devices_[1], devices_[0]));
}
TEST_F(IfrtDeviceUtilsTest, SeparateXCoordinates) {
std::vector<int> device_assignment_attr = {1, 0, 0, 1, 2, 0, 0, 0};
TF_ASSERT_OK_AND_ASSIGN(
auto devices_from_attribute,
GetAssignedIfrtDevices(client_, kNumReplicas, kNumCoresPerReplica,
device_assignment_attr));
EXPECT_THAT(devices_from_attribute, ElementsAre(devices_[1], devices_[2]));
}
TEST_F(IfrtDeviceUtilsTest, EmptyDeviceAssignmentShallReturnDefault) {
TF_ASSERT_OK_AND_ASSIGN(
auto devices_from_attribute,
GetAssignedIfrtDevices(client_, kNumReplicas, kNumCoresPerReplica,
std::nullopt));
EXPECT_THAT(devices_from_attribute, ElementsAre(devices_[2], devices_[3]));
}
TEST_F(IfrtDeviceUtilsTest, MismatchCoordinatesShallFail) {
std::vector<int> device_assignment_attr = {1, 0, 0, 1, 3, 0, 0, 0};
auto status = GetAssignedIfrtDevices(client_, 1, 2, device_assignment_attr);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kFailedPrecondition));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_device_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_device_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
80bcb2c4-7146-42ea-ae36-aa47df8769b3 | cpp | tensorflow/tensorflow | array | tensorflow/lite/array.cc | tensorflow/lite/array_test.cc | #include "tensorflow/lite/array.h"
namespace tflite {
namespace array_internal {
void TfLiteArrayDeleter::operator()(TfLiteIntArray* a) {
if (a) {
TfLiteIntArrayFree(a);
}
}
void TfLiteArrayDeleter::operator()(TfLiteFloatArray* a) {
if (a) {
TfLiteFloatArrayFree(a);
}
}
}
} | #include "tensorflow/lite/array.h"
#include <algorithm>
#include <initializer_list>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/types/span.h"
#include "tensorflow/lite/core/c/common.h"
using testing::ElementsAreArray;
using testing::Eq;
namespace tflite {
namespace {
absl::Span<int> GetSpan(TfLiteIntArray& array) {
return {array.data, static_cast<size_t>(array.size)};
}
absl::Span<float> GetSpan(TfLiteFloatArray& array) {
return {array.data, static_cast<size_t>(array.size)};
}
template <class T>
class TfLiteArrayTest : public testing::Test {
static_assert(
std::is_same_v<TfLiteIntArray, TfLiteArrayUniquePtr<int>::element_type>,
"TfLiteArrayUniquePtr<int>::element_type should be TfLiteIntArray");
static_assert(
std::is_same_v<TfLiteFloatArray,
TfLiteArrayUniquePtr<float>::element_type>,
"TfLiteArrayUniquePtr<float>::element_type should be TfLiteFloatArray");
};
using ArrayTypes = testing::Types<int, float>;
TYPED_TEST_SUITE(TfLiteArrayTest, ArrayTypes);
TYPED_TEST(TfLiteArrayTest, BuildArrayWithSize) {
constexpr int size = 3;
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray<TypeParam>(size);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(size));
std::fill_n(array->data, size, static_cast<TypeParam>(1));
}
TYPED_TEST(TfLiteArrayTest, BuildFromDynamicArray) {
constexpr int size = 4;
constexpr TypeParam values[size] = {1, 2, 3, 4};
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray(size, values);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(size));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
TYPED_TEST(TfLiteArrayTest, BuildFromCArray) {
TypeParam values[] = {1, 2, 3, 4};
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray(values);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(sizeof(values) / sizeof(TypeParam)));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
TYPED_TEST(TfLiteArrayTest, BuildFromVector) {
std::vector<TypeParam> values = {1, 2, 3, 4};
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray(values);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(values.size()));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
TYPED_TEST(TfLiteArrayTest, BuildFromVectorForceType) {
using DifferentType =
std::conditional_t<std::is_same_v<TypeParam, int>, float, int>;
std::vector<DifferentType> values = {1, 2, 3, 4};
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray<TypeParam>(values);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(values.size()));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
TYPED_TEST(TfLiteArrayTest, BuildFromSpan) {
std::vector<TypeParam> values = {1, 2, 3, 4};
TfLiteArrayUniquePtr<TypeParam> array =
BuildTfLiteArray(absl::Span<const TypeParam>(values));
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(values.size()));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
TYPED_TEST(TfLiteArrayTest, BuildFromInitializerList) {
std::initializer_list<TypeParam> values{1, 2, 3, 4};
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray(values);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(values.size()));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
TYPED_TEST(TfLiteArrayTest, BuildUsingSingleElementInitializerList) {
constexpr TypeParam value = 42;
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray({value});
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(1));
EXPECT_THAT(array->data[0], Eq(value));
}
TYPED_TEST(TfLiteArrayTest, BuildFromTfLiteArray) {
std::initializer_list<TypeParam> values{1, 2, 3, 4};
const auto ref = BuildTfLiteArray(values);
TfLiteArrayUniquePtr<TypeParam> array = BuildTfLiteArray(*ref);
ASSERT_NE(array, nullptr);
EXPECT_THAT(array->size, Eq(values.size()));
EXPECT_THAT(GetSpan(*array), ElementsAreArray(values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/array.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/array_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc9125ea-a219-4e03-801b-8bfe86092d36 | cpp | tensorflow/tensorflow | offset_buffer | tensorflow/compiler/mlir/lite/offset_buffer.h | tensorflow/compiler/mlir/lite/offset_buffer_test.cc | #ifndef TENSORFLOW_COMPILER_MLIR_LITE_OFFSET_BUFFER_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_OFFSET_BUFFER_H_
#include <cstdint>
namespace tflite {
inline bool IsValidBufferOffset(const int64_t offset) { return offset > 1; }
}
#endif | #include "tensorflow/compiler/mlir/lite/offset_buffer.h"
#include "tensorflow/core/platform/test.h"
namespace tflite {
namespace {
TEST(OffsetBufferTest, IsValidBufferOffsetTrueGreaterThan1) {
EXPECT_TRUE(IsValidBufferOffset(2));
}
TEST(OffsetBufferTest, IsValidBufferOffsetFalseForLessThanOrEqualTo1) {
EXPECT_FALSE(IsValidBufferOffset(1));
EXPECT_FALSE(IsValidBufferOffset(0));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/offset_buffer.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/offset_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fc21e004-d9cf-4df2-88e5-7f58d4352930 | cpp | tensorflow/tensorflow | tile_assignment | third_party/xla/xla/hlo/ir/tile_assignment.cc | third_party/xla/xla/tests/tile_assignment_test.cc | #include "xla/hlo/ir/tile_assignment.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/util.h"
namespace xla {
namespace {
void CanonicalizeIotaDims(absl::Span<int64_t>& dims, absl::Span<int>& perm) {
DCHECK_EQ(dims.size(), perm.size());
if (dims.size() <= 1) {
return;
}
absl::InlinedVector<int, 6> old_to_new_dims(dims.size());
while (true) {
bool changed = false;
int new_ndims = 0;
for (int i = 0; i < dims.size(); ++i) {
if (dims[i] == 1) {
old_to_new_dims[i] = -1;
} else {
old_to_new_dims[i] = new_ndims;
++new_ndims;
}
}
if (new_ndims != dims.size()) {
for (int i = 0, new_idx = 0; i < dims.size(); ++i) {
int new_dim = old_to_new_dims[i];
if (new_dim >= 0) {
dims[new_dim] = dims[i];
}
int new_perm_dim = old_to_new_dims[perm[i]];
if (new_perm_dim >= 0) {
perm[new_idx] = new_perm_dim;
++new_idx;
DCHECK_LE(new_idx, new_ndims);
}
}
perm = perm.subspan(0, new_ndims);
dims = dims.subspan(0, new_ndims);
}
for (int i = 1, base = 0, n = dims.size(); i < n; ++i) {
const int base_dim = perm[base];
const int dim = perm[i];
if (base_dim + (i - base) == dim) {
dims[base_dim] *= dims[dim];
dims[dim] = 1;
changed = true;
} else {
base = i;
}
}
if (!changed) {
break;
}
}
}
enum class TransposeKind {
kNoop,
kReshape,
kTranspose,
};
TransposeKind GetTransposeKind(absl::Span<const int64_t> dims,
absl::Span<const int> perm) {
TransposeKind kind = TransposeKind::kNoop;
int prev_non_one_dim = -1;
for (int i = 0; i < perm.size(); ++i) {
const auto& d = perm[i];
if (dims[d] == 1) {
if (d != i && dims[i] != 1) kind = TransposeKind::kReshape;
continue;
}
if (d <= prev_non_one_dim) return TransposeKind::kTranspose;
prev_non_one_dim = d;
}
return kind;
}
std::pair<absl::InlinedVector<int64_t, 6>, absl::InlinedVector<int, 6>>
FullyDecanonicalize(absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm) {
absl::InlinedVector<int64_t, 6> new_reshape_dims;
absl::InlinedVector<int, 6> old_to_new_dims(reshape_dims.size() + 1);
for (int i = 0, n = reshape_dims.size(); i < n; ++i) {
int64_t dim_size = reshape_dims[i];
while (dim_size % 2 == 0) {
new_reshape_dims.push_back(2);
dim_size /= 2;
}
for (int i = 3; i * i <= dim_size; i += 2) {
while (dim_size % i == 0) {
new_reshape_dims.push_back(i);
dim_size /= i;
}
}
if (dim_size > 1) {
CHECK_GT(dim_size, 2);
new_reshape_dims.push_back(dim_size);
}
old_to_new_dims[i + 1] = new_reshape_dims.size();
}
absl::InlinedVector<int, 6> new_transpose_perm;
new_transpose_perm.reserve(new_reshape_dims.size());
for (int i = 0; i < transpose_perm.size(); ++i) {
const int old_dim = transpose_perm[i];
for (int j = old_to_new_dims[old_dim], n = old_to_new_dims[old_dim + 1];
j < n; ++j) {
new_transpose_perm.push_back(j);
}
}
return std::make_pair(std::move(new_reshape_dims),
std::move(new_transpose_perm));
}
}
IotaTileAssignment IotaTileAssignment::Create(
absl::Span<const int64_t> dims) {
return IotaTileAssignment(dims, {Product(dims)}, {0});
}
IotaTileAssignment IotaTileAssignment::Create(
absl::Span<const int64_t> dims, absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm) {
absl::InlinedVector<int64_t, 6> canonicalized_dims(reshape_dims.begin(),
reshape_dims.end());
absl::InlinedVector<int, 6> canonicalized_perm(transpose_perm.begin(),
transpose_perm.end());
auto dims_span = absl::MakeSpan(canonicalized_dims);
auto perm_span = absl::MakeSpan(canonicalized_perm);
CanonicalizeIotaDims(dims_span, perm_span);
if (dims_span.empty()) {
canonicalized_dims[0] = 1;
dims_span = absl::MakeSpan(canonicalized_dims.data(), 1);
canonicalized_perm[0] = 0;
perm_span = absl::MakeSpan(canonicalized_perm.data(), 1);
}
return IotaTileAssignment(dims, dims_span, perm_span);
}
Array<int64_t> IotaTileAssignment::ToArray() const {
Array<int64_t> array(reshape_dims());
array.FillIota(0);
array.TransposeDimensions(transpose_perm());
array.Reshape(dims());
return array;
}
IotaTileAssignment::IotaTileAssignment(const IotaTileAssignment& other)
: IotaTileAssignment(other.ndims_, other.reshape_ndims_) {
std::memcpy(storage_.get(), other.storage_.get(), size_bytes());
}
IotaTileAssignment& IotaTileAssignment::operator=(
const IotaTileAssignment& other) {
const int new_size = other.size_bytes();
if (size_bytes() != new_size) {
storage_.reset(new char[new_size]);
}
ndims_ = other.ndims_;
reshape_ndims_ = other.reshape_ndims_;
std::memcpy(storage_.get(), other.storage_.get(), new_size);
return *this;
}
IotaTileAssignment::IotaTileAssignment(absl::Span<const int64_t> dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm)
: IotaTileAssignment(dims.size(), reshape_dims.size()) {
DCHECK_EQ(reshape_dims.size(), transpose_perm.size());
std::memcpy(dims_ptr(), dims.data(), ndims_ * sizeof(int64_t));
DCHECK_EQ(num_elements(), Product(reshape_dims));
std::memcpy(reshape_dims_ptr(), reshape_dims.data(),
reshape_ndims_ * sizeof(int64_t));
std::memcpy(transpose_perm_ptr(), transpose_perm.data(),
reshape_ndims_ * sizeof(int));
}
IotaTileAssignment::IotaTileAssignment(int ndims, int reshape_ndims)
: ndims_(ndims),
reshape_ndims_(reshape_ndims),
storage_(new char[size_bytes()]) {}
std::optional<IotaTileAssignment> IotaTileAssignment::Transpose(
absl::Span<const int> perm) const {
DCHECK_EQ(ndims_, perm.size());
auto dims = this->dims();
const TransposeKind kind = GetTransposeKind(dims, perm);
if (kind == TransposeKind::kNoop) return *this;
absl::InlinedVector<int64_t, 6> new_dims(ndims_);
for (int64_t i = 0; i < ndims_; ++i) {
new_dims[i] = dims[perm[i]];
}
if (kind == TransposeKind::kReshape) {
return IotaTileAssignment::Create(new_dims, reshape_dims(),
transpose_perm());
}
if (reshape_ndims_ == 1) {
return IotaTileAssignment::Create(new_dims, dims, perm);
}
bool is_pure_transpose = true;
absl::InlinedVector<int64_t, 6> non_one_dims;
absl::InlinedVector<int, 6> one_to_non_one(ndims_);
non_one_dims.reserve(ndims_);
auto reshape_dims = this->reshape_dims();
auto transpose_perm = this->transpose_perm();
for (int i = 0; i < ndims_; ++i) {
const int64_t dim = dims[i];
if (dim == 1) {
one_to_non_one[i] = -1;
continue;
}
if (non_one_dims.size() >= reshape_ndims_ ||
reshape_dims[transpose_perm[non_one_dims.size()]] != dim) {
is_pure_transpose = false;
}
one_to_non_one[i] = non_one_dims.size();
non_one_dims.push_back(dims[i]);
}
if (is_pure_transpose) {
CHECK_EQ(reshape_ndims_, non_one_dims.size());
absl::InlinedVector<int, 6> new_perm;
new_perm.reserve(non_one_dims.size());
for (int i = 0; i < ndims_; ++i) {
if (dims[perm[i]] == 1) continue;
new_perm.push_back(transpose_perm[one_to_non_one[perm[i]]]);
}
CHECK_EQ(reshape_ndims_, new_perm.size());
return IotaTileAssignment::Create(new_dims, reshape_dims, new_perm);
}
auto [decanonicalized_reshape_dims, decanonicalized_transpose_perm] =
FullyDecanonicalize(reshape_dims, transpose_perm);
CHECK_LE(non_one_dims.size(), decanonicalized_reshape_dims.size());
absl::InlinedVector<absl::InlinedVector<int, 2>, 6> grouped_reshape_dims(
non_one_dims.size());
int transpose_perm_idx = 0;
for (int i = 0, n = non_one_dims.size(),
dn = decanonicalized_reshape_dims.size();
i < n && transpose_perm_idx < dn; ++i) {
int reshape_dim_idx = decanonicalized_transpose_perm[transpose_perm_idx];
int64_t cand = decanonicalized_reshape_dims[reshape_dim_idx];
int64_t target = non_one_dims[i];
while (target % cand == 0) {
target /= cand;
grouped_reshape_dims[i].push_back(reshape_dim_idx);
if (++transpose_perm_idx >= dn) {
break;
}
reshape_dim_idx = decanonicalized_transpose_perm[transpose_perm_idx];
cand = decanonicalized_reshape_dims[reshape_dim_idx];
}
if (target != 1) {
return std::nullopt;
}
}
absl::InlinedVector<int, 6> flattened_transpose_perm;
flattened_transpose_perm.reserve(reshape_ndims_);
for (int i = 0; i < perm.size(); ++i) {
const int dim = perm[i];
if (one_to_non_one[dim] < 0) {
continue;
}
auto& group = grouped_reshape_dims[one_to_non_one[dim]];
flattened_transpose_perm.insert(flattened_transpose_perm.end(),
group.begin(), group.end());
}
CHECK_EQ(flattened_transpose_perm.size(),
decanonicalized_transpose_perm.size());
return IotaTileAssignment::Create(new_dims, decanonicalized_reshape_dims,
flattened_transpose_perm);
}
void IotaTileAssignment::Print(Printer* printer) const {
printer->Append("[");
AppendJoin(printer, dims(), ",");
printer->Append("]<=[");
AppendJoin(printer, reshape_dims(), ",");
printer->Append("]");
if (reshape_ndims_ > 1) {
printer->Append("T(");
AppendJoin(printer, transpose_perm(), ",");
printer->Append(")");
}
}
std::string IotaTileAssignment::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
int64_t IotaTileAssignment::value_at(absl::Span<const int64_t> index) const {
DCHECK_EQ(index.size(), ndims_);
int64_t linear_index = index[0];
auto dims = this->dims();
for (int64_t i = 1; i < ndims_; ++i) {
linear_index *= dims[i];
linear_index += index[i];
}
auto reshape_dims = this->reshape_dims();
auto transpose_perm = this->transpose_perm();
absl::InlinedVector<int64_t, 6> reshape_index(reshape_ndims_);
for (int64_t i = reshape_ndims_ - 1; i >= 0; --i) {
int dim = transpose_perm[i];
int dim_size = reshape_dims[dim];
reshape_index[dim] = linear_index % dim_size;
linear_index /= dim_size;
}
int64_t value = reshape_index[0];
for (int64_t i = 1; i < reshape_ndims_; ++i) {
value *= reshape_dims[i];
value += reshape_index[i];
}
return value;
}
bool TileAssignment::operator==(const TileAssignment& other) const {
if (iota_ && other.iota_) {
return *iota_ == *other.iota_;
}
return array() == other.array();
}
int64_t TileAssignment::operator()(absl::Span<const int64_t> indexes) const {
return array_ ? (*array_)(indexes) : iota_->value_at(indexes);
}
absl::Span<const int64_t> TileAssignment::dimensions() const {
return array_ ? array_->dimensions() : iota_->dims();
}
int64_t TileAssignment::num_dimensions() const {
return array_ ? array_->num_dimensions() : iota_->ndims();
}
int64_t TileAssignment::dim(int64_t n) const {
return array_ ? array_->dim(n) : iota_->dim(n);
}
int64_t TileAssignment::num_elements() const {
return array_ ? array_->num_elements() : iota_->num_elements();
}
int64_t TileAssignment::first() const { return array_ ? *array_->begin() : 0; }
void TileAssignment::Each(
absl::FunctionRef<void(absl::Span<const int64_t>, int64_t)> f) const {
MaybeMaterializeFullArray();
array_->Each(f);
}
absl::Status TileAssignment::EachStatus(
absl::FunctionRef<absl::Status(absl::Span<const int64_t>, int64_t)> f)
const {
MaybeMaterializeFullArray();
return array_->EachStatus(f);
}
[[nodiscard]] TileAssignment TileAssignment::Reshape(
absl::Span<const int64_t> new_dimensions) const {
if (iota_) {
CHECK_EQ(Product(new_dimensions), iota_->num_elements());
return TileAssignment(
IotaTileAssignment(new_dimensions, iota_->reshape_dims(),
iota_->transpose_perm()),
nullptr);
}
auto reshaped = std::make_shared<Array<int64_t>>(*array_);
reshaped->Reshape(new_dimensions);
return TileAssignment(std::move(reshaped));
}
[[nodiscard]] TileAssignment TileAssignment::Transpose(
absl::Span<const int> perm) const {
const TransposeKind kind = GetTransposeKind(dimensions(), perm);
if (kind == TransposeKind::kNoop) {
return *this;
}
if (iota_) {
auto transposed = iota_->Transpose(perm);
if (transposed) {
return TileAssignment(std::move(*transposed));
}
}
auto cloned_array = shared_array_clone();
cloned_array->TransposeDimensions(perm);
return TileAssignment(std::move(cloned_array));
}
void TileAssignment::Print(Printer* printer) const {
if (iota_) {
printer->Append("devices=");
iota_->Print(printer);
} else {
printer->Append("devices=[");
AppendJoin(printer, array().dimensions(), ",");
printer->Append("]");
AppendJoin(printer, array(), ",");
}
}
std::string TileAssignment::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
bool TileAssignment::UsesDevice(int64_t device) const {
return iota_ ? device < iota_->num_elements()
: absl::c_linear_search(array(), device);
}
const Array<int64_t>& TileAssignment::array() const {
MaybeMaterializeFullArray();
return *array_;
}
const std::shared_ptr<const Array<int64_t>>& TileAssignment::shared_array()
const {
MaybeMaterializeFullArray();
return shared_array_;
}
std::shared_ptr<Array<int64_t>> TileAssignment::shared_array_clone() const {
MaybeMaterializeFullArray();
return std::make_shared<Array<int64_t>>(*array_);
}
void TileAssignment::MaybeMaterializeFullArray() const {
if (array_ == nullptr) {
DCHECK(shared_array_ == nullptr);
DCHECK(iota_.has_value());
auto full = std::make_shared<Array<int64_t>>(iota_->ToArray());
shared_array_ = std::move(full);
array_ = shared_array_.get();
}
}
} | #include "xla/hlo/ir/tile_assignment.h"
#include <memory>
#include <vector>
#include "absl/hash/hash.h"
#include "xla/array3d.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
std::vector<int64_t> ToVectorUsingEach(const TileAssignment& tile) {
std::vector<int64_t> result;
result.reserve(tile.num_elements());
tile.Each([&](absl::Span<const int64_t> index, int64_t device) {
result.push_back(device);
});
return result;
}
TEST(TileAssignmentTest, Replicated) {
TileAssignment tile;
EXPECT_EQ(tile.num_dimensions(), 1);
EXPECT_EQ(tile.dim(0), 0);
}
TEST(TileAssignmentTest, Maximal) {
TileAssignment tile(5);
EXPECT_EQ(tile.num_dimensions(), 1);
EXPECT_EQ(tile.dim(0), 1);
EXPECT_EQ(tile(0), 5);
EXPECT_EQ(tile({0}), 5);
EXPECT_FALSE(tile.iota());
EXPECT_TRUE(tile.UsesDevice(5));
EXPECT_EQ(tile.first(), 5);
EXPECT_FALSE(tile.UsesDevice(0));
EXPECT_THAT(ToVectorUsingEach(tile), ElementsAre(5));
}
TEST(TileAssignmentTest, V1V2Equivalence) {
Array3D<int64_t> array(
{{{0, 8, 4, 12}, {1, 9, 5, 13}}, {{2, 10, 6, 14}, {3, 11, 7, 15}}});
TileAssignment v1(std::make_shared<const Array<int64_t>>(array));
TileAssignment v2({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
EXPECT_EQ(v1, v2);
EXPECT_EQ(v2, v1);
EXPECT_EQ(v1.first(), 0);
EXPECT_EQ(v2.first(), 0);
EXPECT_NE(v1.iota().has_value(), v2.iota().has_value());
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
TEST(TileAssignmentTest, CopyConstruction) {
TileAssignment tile({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
TileAssignment copied(tile);
EXPECT_EQ(tile, copied);
EXPECT_EQ(tile.iota().has_value(), copied.iota().has_value());
EXPECT_EQ(absl::HashOf(tile), absl::HashOf(copied));
}
TEST(TileAssignmentTest, CopyAssignment) {
TileAssignment tile({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
TileAssignment copied = tile;
EXPECT_EQ(tile, copied);
EXPECT_EQ(tile.iota().has_value(), copied.iota().has_value());
EXPECT_EQ(absl::HashOf(tile), absl::HashOf(copied));
}
class FormattedTileAssignmentTest : public ::testing::TestWithParam<bool> {
protected:
bool ShouldConvertToV1() { return GetParam(); }
};
TEST_P(FormattedTileAssignmentTest, TrivialIotaTile) {
TileAssignment tile({4, 4, 2});
EXPECT_EQ(tile.ToString(), "devices=[4,4,2]<=[32]");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 4, 2}));
EXPECT_EQ(tile.num_dimensions(), 3);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 4);
EXPECT_EQ(tile.dim(2), 2);
EXPECT_EQ(tile(0, 0, 0), 0);
EXPECT_EQ(tile({3, 2, 1}), 29);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
EXPECT_EQ(tile.ToString(), "devices=[4,4,2]<=[2,4,4]T(2,1,0)");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 4, 2}, {2, 4, 4}, {2, 1, 0}));
EXPECT_EQ(tile.num_dimensions(), 3);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 4);
EXPECT_EQ(tile.dim(2), 2);
EXPECT_EQ(tile(0, 0, 0), 0);
EXPECT_EQ(tile({3, 2, 1}), 27);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 16, 4, 20, 8, 24, 12, 28, 1, 17, 5, 21, 9, 25, 13, 29, 2,
18, 6, 22, 10, 26, 14, 30, 3, 19, 7, 23, 11, 27, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, NonCanonicalTransposedIotaTile) {
TileAssignment tile({4, 8}, {2, 4, 4}, {1, 2, 0});
EXPECT_EQ(tile.ToString(), "devices=[4,8]<=[2,16]T(1,0)");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 8}, {2, 16}, {1, 0}));
EXPECT_EQ(tile.num_dimensions(), 2);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 8);
EXPECT_EQ(tile(0, 0), 0);
EXPECT_EQ(tile({3, 2}), 13);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24,
9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, ReshapeTrivalIotaTile) {
TileAssignment tile({4, 4, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment reshaped = tile.Reshape({2, 8, 2});
EXPECT_NE(reshaped, tile);
EXPECT_EQ(reshaped, TileAssignment({2, 8, 2}));
EXPECT_EQ(reshaped.num_dimensions(), 3);
EXPECT_EQ(reshaped.dim(0), 2);
EXPECT_EQ(reshaped.dim(1), 8);
EXPECT_EQ(reshaped.dim(2), 2);
EXPECT_EQ(reshaped(0, 0, 0), 0);
EXPECT_EQ(reshaped({1, 3, 1}), 23);
EXPECT_EQ(reshaped.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(reshaped.UsesDevice(0));
EXPECT_TRUE(reshaped.UsesDevice(31));
EXPECT_FALSE(reshaped.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(reshaped),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
TEST_P(FormattedTileAssignmentTest, ReshapeTransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment reshaped = tile.Reshape({2, 2, 4, 2});
EXPECT_NE(reshaped, tile);
EXPECT_EQ(reshaped, TileAssignment({2, 2, 4, 2}, {2, 4, 4}, {2, 1, 0}));
EXPECT_EQ(reshaped.num_dimensions(), 4);
EXPECT_EQ(reshaped.dim(0), 2);
EXPECT_EQ(reshaped.dim(1), 2);
EXPECT_EQ(reshaped.dim(2), 4);
EXPECT_EQ(reshaped.dim(3), 2);
EXPECT_EQ(reshaped(0, 0, 0, 0), 0);
EXPECT_EQ(reshaped({1, 1, 2, 1}), 27);
EXPECT_EQ(reshaped.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(reshaped.UsesDevice(0));
EXPECT_TRUE(reshaped.UsesDevice(31));
EXPECT_FALSE(reshaped.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(reshaped),
ElementsAre(0, 16, 4, 20, 8, 24, 12, 28, 1, 17, 5, 21, 9, 25, 13, 29, 2,
18, 6, 22, 10, 26, 14, 30, 3, 19, 7, 23, 11, 27, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeTrivalIotaTile) {
TileAssignment tile({4, 4, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({2, 0, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({2, 4, 4}, {16, 2}, {1, 0}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 2);
EXPECT_EQ(xposed.dim(1), 4);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({1, 3, 1}), 27);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(31));
EXPECT_FALSE(xposed.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1,
3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeTransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 2, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({4, 2, 4}, {8, 4}, {1, 0}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 2);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({3, 0, 3}), 15);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(31));
EXPECT_FALSE(xposed.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 16, 20, 24, 28, 1, 5, 9, 13, 17, 21, 25, 29, 2,
6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeIotaTileWithDegernateDims) {
TileAssignment tile({4, 4, 1}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 2, 0});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({4, 1, 4}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 0, 3}), 11);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileSplittingCanonicalizedReshapeDims) {
TileAssignment tile({8, 2, 16}, {16, 16}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 2, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({8, 16, 2}, {16, 8, 2}, {1, 0, 2}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 8);
EXPECT_EQ(xposed.dim(1), 16);
EXPECT_EQ(xposed.dim(2), 2);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 7, 1}), 117);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(255));
EXPECT_FALSE(xposed.UsesDevice(256));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(
0, 1, 16, 17, 32, 33, 48, 49, 64, 65, 80, 81, 96, 97, 112, 113, 128,
129, 144, 145, 160, 161, 176, 177, 192, 193, 208, 209, 224, 225, 240,
241, 2, 3, 18, 19, 34, 35, 50, 51, 66, 67, 82, 83, 98, 99, 114, 115,
130, 131, 146, 147, 162, 163, 178, 179, 194, 195, 210, 211, 226, 227,
242, 243, 4, 5, 20, 21, 36, 37, 52, 53, 68, 69, 84, 85, 100, 101, 116,
117, 132, 133, 148, 149, 164, 165, 180, 181, 196, 197, 212, 213, 228,
229, 244, 245, 6, 7, 22, 23, 38, 39, 54, 55, 70, 71, 86, 87, 102, 103,
118, 119, 134, 135, 150, 151, 166, 167, 182, 183, 198, 199, 214, 215,
230, 231, 246, 247, 8, 9, 24, 25, 40, 41, 56, 57, 72, 73, 88, 89, 104,
105, 120, 121, 136, 137, 152, 153, 168, 169, 184, 185, 200, 201, 216,
217, 232, 233, 248, 249, 10, 11, 26, 27, 42, 43, 58, 59, 74, 75, 90,
91, 106, 107, 122, 123, 138, 139, 154, 155, 170, 171, 186, 187, 202,
203, 218, 219, 234, 235, 250, 251, 12, 13, 28, 29, 44, 45, 60, 61, 76,
77, 92, 93, 108, 109, 124, 125, 140, 141, 156, 157, 172, 173, 188,
189, 204, 205, 220, 221, 236, 237, 252, 253, 14, 15, 30, 31, 46, 47,
62, 63, 78, 79, 94, 95, 110, 111, 126, 127, 142, 143, 158, 159, 174,
175, 190, 191, 206, 207, 222, 223, 238, 239, 254, 255));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileSplittingBothCanonicalizedReshapeDimsAndTileDims) {
TileAssignment tile({14, 3, 5}, {6, 5, 7}, {2, 0, 1});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 0, 2});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({3, 14, 5}, {2, 3, 5, 7}, {1, 3, 0, 2}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 3);
EXPECT_EQ(xposed.dim(1), 14);
EXPECT_EQ(xposed.dim(2), 5);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 11, 3}), 201);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(209));
EXPECT_FALSE(xposed.UsesDevice(210));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(
0, 7, 14, 21, 28, 105, 112, 119, 126, 133, 1, 8, 15, 22, 29, 106, 113,
120, 127, 134, 2, 9, 16, 23, 30, 107, 114, 121, 128, 135, 3, 10, 17,
24, 31, 108, 115, 122, 129, 136, 4, 11, 18, 25, 32, 109, 116, 123,
130, 137, 5, 12, 19, 26, 33, 110, 117, 124, 131, 138, 6, 13, 20, 27,
34, 111, 118, 125, 132, 139, 35, 42, 49, 56, 63, 140, 147, 154, 161,
168, 36, 43, 50, 57, 64, 141, 148, 155, 162, 169, 37, 44, 51, 58, 65,
142, 149, 156, 163, 170, 38, 45, 52, 59, 66, 143, 150, 157, 164, 171,
39, 46, 53, 60, 67, 144, 151, 158, 165, 172, 40, 47, 54, 61, 68, 145,
152, 159, 166, 173, 41, 48, 55, 62, 69, 146, 153, 160, 167, 174, 70,
77, 84, 91, 98, 175, 182, 189, 196, 203, 71, 78, 85, 92, 99, 176, 183,
190, 197, 204, 72, 79, 86, 93, 100, 177, 184, 191, 198, 205, 73, 80,
87, 94, 101, 178, 185, 192, 199, 206, 74, 81, 88, 95, 102, 179, 186,
193, 200, 207, 75, 82, 89, 96, 103, 180, 187, 194, 201, 208, 76, 83,
90, 97, 104, 181, 188, 195, 202, 209));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileGroupingCanonicalizedReshapeDims) {
TileAssignment tile({1, 4, 16}, {4, 4, 4}, {1, 0, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({2, 0, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({16, 1, 4}, {4, 4, 4}, {0, 2, 1}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 16);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({7, 0, 3}), 31);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(63));
EXPECT_FALSE(xposed.UsesDevice(64));
EXPECT_THAT(ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19,
23, 27, 31, 32, 36, 40, 44, 33, 37, 41, 45, 34, 38,
42, 46, 35, 39, 43, 47, 48, 52, 56, 60, 49, 53, 57,
61, 50, 54, 58, 62, 51, 55, 59, 63));
}
TEST_P(FormattedTileAssignmentTest, TransposeNoopIotaTile) {
TileAssignment tile({4, 4}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 1});
EXPECT_EQ(xposed, tile);
EXPECT_EQ(xposed.num_dimensions(), 2);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 4);
EXPECT_EQ(xposed(0, 0), 0);
EXPECT_EQ(xposed({2, 3}), 14);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15));
}
TEST_P(FormattedTileAssignmentTest, TransposeNoopIotaTileWithDegernateDims) {
TileAssignment tile({1, 4, 1, 1, 4, 1}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 5, 0, 4, 3, 2});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed.num_dimensions(), 6);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 1);
EXPECT_EQ(xposed.dim(3), 4);
EXPECT_EQ(xposed.dim(4), 1);
EXPECT_EQ(xposed.dim(5), 1);
EXPECT_EQ(xposed(0, 0, 0, 0, 0, 0), 0);
EXPECT_EQ(xposed({2, 0, 0, 3, 0, 0}), 14);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15));
}
INSTANTIATE_TEST_SUITE_P(All, FormattedTileAssignmentTest, ::testing::Bool());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/tile_assignment.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/tile_assignment_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cbc1f255-a4f1-468b-93fc-5e815aab1aa2 | cpp | tensorflow/tensorflow | spectrogram_op | tensorflow/core/kernels/spectrogram_op.cc | tensorflow/core/kernels/spectrogram_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/spectrogram.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class AudioSpectrogramOp : public OpKernel {
public:
explicit AudioSpectrogramOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, context->GetAttr("window_size", &window_size_));
OP_REQUIRES_OK(context, context->GetAttr("stride", &stride_));
OP_REQUIRES_OK(context,
context->GetAttr("magnitude_squared", &magnitude_squared_));
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
OP_REQUIRES(context, input.dims() == 2,
errors::InvalidArgument("input must be 2-dimensional",
input.shape().DebugString()));
Spectrogram spectrogram;
OP_REQUIRES(context, spectrogram.Initialize(window_size_, stride_),
errors::InvalidArgument(
"Spectrogram initialization failed for window size ",
window_size_, " and stride ", stride_));
const auto input_as_matrix = input.matrix<float>();
const int64_t sample_count = input.dim_size(0);
const int64_t channel_count = input.dim_size(1);
const int64_t output_width = spectrogram.output_frequency_channels();
const int64_t length_minus_window = (sample_count - window_size_);
int64_t output_height;
if (length_minus_window < 0) {
output_height = 0;
} else {
output_height = 1 + (length_minus_window / stride_);
}
const int64_t output_slices = channel_count;
Tensor* output_tensor = nullptr;
OP_REQUIRES_OK(
context,
context->allocate_output(
0, TensorShape({output_slices, output_height, output_width}),
&output_tensor));
auto output_flat = output_tensor->flat<float>().data();
std::vector<float> input_for_channel(sample_count);
for (int64_t channel = 0; channel < channel_count; ++channel) {
OP_REQUIRES(context, spectrogram.Reset(),
errors::InvalidArgument("Failed to Reset()"));
float* output_slice =
output_flat + (channel * output_height * output_width);
for (int i = 0; i < sample_count; ++i) {
input_for_channel[i] = input_as_matrix(i, channel);
}
std::vector<std::vector<float>> spectrogram_output;
OP_REQUIRES(context,
spectrogram.ComputeSquaredMagnitudeSpectrogram(
input_for_channel, &spectrogram_output),
errors::InvalidArgument("Spectrogram compute failed"));
OP_REQUIRES(context, (spectrogram_output.size() == output_height),
errors::InvalidArgument(
"Spectrogram size calculation failed: Expected height ",
output_height, " but got ", spectrogram_output.size()));
OP_REQUIRES(context,
spectrogram_output.empty() ||
(spectrogram_output[0].size() == output_width),
errors::InvalidArgument(
"Spectrogram size calculation failed: Expected width ",
output_width, " but got ", spectrogram_output[0].size()));
for (int row_index = 0; row_index < output_height; ++row_index) {
const std::vector<float>& spectrogram_row =
spectrogram_output[row_index];
DCHECK_EQ(spectrogram_row.size(), output_width);
float* output_row = output_slice + (row_index * output_width);
if (magnitude_squared_) {
for (int i = 0; i < output_width; ++i) {
output_row[i] = spectrogram_row[i];
}
} else {
for (int i = 0; i < output_width; ++i) {
output_row[i] = sqrtf(spectrogram_row[i]);
}
}
}
}
}
private:
int32 window_size_;
int32 stride_;
bool magnitude_squared_;
};
REGISTER_KERNEL_BUILDER(Name("AudioSpectrogram").Device(DEVICE_CPU),
AudioSpectrogramOp);
} | #define EIGEN_USE_THREADS
#include <functional>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/audio_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace ops {
namespace {
TEST(SpectrogramOpTest, SimpleTest) {
Scope root = Scope::NewRootScope();
Tensor audio_tensor(DT_FLOAT, TensorShape({8, 1}));
test::FillValues<float>(&audio_tensor,
{-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f});
Output audio_const_op = Const(root.WithOpName("audio_const_op"),
Input::Initializer(audio_tensor));
AudioSpectrogram spectrogram_op =
AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, 8, 1);
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{spectrogram_op.spectrogram}, &outputs));
const Tensor& spectrogram_tensor = outputs[0];
EXPECT_EQ(3, spectrogram_tensor.dims());
EXPECT_EQ(5, spectrogram_tensor.dim_size(2));
EXPECT_EQ(1, spectrogram_tensor.dim_size(1));
EXPECT_EQ(1, spectrogram_tensor.dim_size(0));
test::ExpectTensorNear<float>(
spectrogram_tensor,
test::AsTensor<float>({0, 1, 2, 1, 0}, TensorShape({1, 1, 5})), 1e-3);
}
TEST(SpectrogramOpTest, SquaredTest) {
Scope root = Scope::NewRootScope();
Tensor audio_tensor(DT_FLOAT, TensorShape({8, 1}));
test::FillValues<float>(&audio_tensor,
{-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f});
Output audio_const_op = Const(root.WithOpName("audio_const_op"),
Input::Initializer(audio_tensor));
AudioSpectrogram spectrogram_op =
AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op, 8, 1,
AudioSpectrogram::Attrs().MagnitudeSquared(true));
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{spectrogram_op.spectrogram}, &outputs));
const Tensor& spectrogram_tensor = outputs[0];
EXPECT_EQ(3, spectrogram_tensor.dims());
EXPECT_EQ(5, spectrogram_tensor.dim_size(2));
EXPECT_EQ(1, spectrogram_tensor.dim_size(1));
EXPECT_EQ(1, spectrogram_tensor.dim_size(0));
test::ExpectTensorNear<float>(
spectrogram_tensor,
test::AsTensor<float>({0, 1, 4, 1, 0}, TensorShape({1, 1, 5})), 1e-3);
}
TEST(SpectrogramOpTest, MultichannelTest) {
Scope root = Scope::NewRootScope();
const int audio_size = 8;
const int channel_size = 2;
Tensor audio_tensor(DT_FLOAT, TensorShape({audio_size, channel_size}));
test::FillValues<float>(
&audio_tensor, {-1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, -1.0f,
-1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f});
Output audio_const_op = Const(root.WithOpName("audio_const_op"),
Input::Initializer(audio_tensor));
AudioSpectrogram spectrogram_op =
AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op,
audio_size, channel_size);
TF_ASSERT_OK(root.status());
ClientSession session(root);
std::vector<Tensor> outputs;
TF_EXPECT_OK(session.Run(ClientSession::FeedType(),
{spectrogram_op.spectrogram}, &outputs));
const Tensor& spectrogram_tensor = outputs[0];
EXPECT_EQ(3, spectrogram_tensor.dims());
EXPECT_EQ(5, spectrogram_tensor.dim_size(2));
EXPECT_EQ(1, spectrogram_tensor.dim_size(1));
EXPECT_EQ(channel_size, spectrogram_tensor.dim_size(0));
for (int channel = 0; channel < channel_size; channel++) {
test::ExpectTensorNear<float>(
spectrogram_tensor.SubSlice(channel),
test::AsTensor<float>({0, 1, 2, 1, 0}, TensorShape({1, 5})), 1e-3);
}
}
TEST(SpectrogramOpTest, InvalidWindowSize) {
Scope root = Scope::NewRootScope();
const int audio_size = 8;
const int channel_size = 2;
Tensor audio_tensor(DT_FLOAT, TensorShape({audio_size, channel_size}));
test::FillValues<float>(
&audio_tensor, {-1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, -1.0f,
-1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f});
Output audio_const_op = Const(root.WithOpName("audio_const_op"),
Input::Initializer(audio_tensor));
AudioSpectrogram spectrogram_op =
AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op,
1, 1);
EXPECT_THAT(root.status(),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("window size")));
}
TEST(SpectrogramOpTest, InvalidStride) {
Scope root = Scope::NewRootScope();
const int audio_size = 8;
const int channel_size = 2;
Tensor audio_tensor(DT_FLOAT, TensorShape({audio_size, channel_size}));
test::FillValues<float>(
&audio_tensor, {-1.0f, -1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, -1.0f,
-1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f});
Output audio_const_op = Const(root.WithOpName("audio_const_op"),
Input::Initializer(audio_tensor));
AudioSpectrogram spectrogram_op =
AudioSpectrogram(root.WithOpName("spectrogram_op"), audio_const_op,
2, 0);
EXPECT_THAT(root.status(),
tsl::testing::StatusIs(tsl::error::Code::INVALID_ARGUMENT,
::testing::ContainsRegex("stride")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/spectrogram_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/spectrogram_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ce8c34f-84df-4adc-a5d2-a1671a470374 | cpp | tensorflow/tensorflow | collective_pipeliner | third_party/xla/xla/service/collective_pipeliner.cc | third_party/xla/xla/service/collective_pipeliner_test.cc | #include "xla/service/collective_pipeliner.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/numeric/int128.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instruction_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/constant_value.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/service/value_range.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
const char* const CollectivePipeliner::kInsertedByPreviousStep =
"InsertedByPreviousStep";
const char* const CollectivePipeliner::kSunkByPreviousStep =
"SunkByPreviousStep";
namespace {
using InstructionMap =
absl::flat_hash_map<const HloInstruction*, HloInstruction*>;
using LoopVariantParameterInfo =
std::vector<std::pair<int64_t, HloInstruction*>>;
absl::Status UpdateControlDependencies(HloInstruction* original,
HloInstruction* new_instr,
const InstructionMap& cloned_map) {
for (auto* pred : original->control_predecessors()) {
auto it = cloned_map.find(pred);
if (it == cloned_map.end()) {
continue;
}
TF_RETURN_IF_ERROR(it->second->AddControlDependencyTo(new_instr));
}
return absl::OkStatus();
}
bool AllIndicesConstantsExceptOne(
const HloDynamicUpdateSliceInstruction* dyn_update, int64_t index) {
if (dyn_update->operand(index)->IsConstant()) {
return false;
}
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
if (i == index) {
continue;
}
if (!dyn_update->operand(i)->IsConstant()) {
return false;
}
}
return true;
}
std::optional<int> GetSlicedDimension(
const HloDynamicUpdateSliceInstruction* dyn_update) {
std::optional<int> sliced_dim;
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
const HloInstruction* idx = dyn_update->operand(i);
if (!idx->IsConstant()) {
if (sliced_dim.has_value()) {
return std::nullopt;
}
sliced_dim = i - dyn_update->first_index_operand_number();
continue;
}
if (Cast<HloConstantInstruction>(idx)->literal().GetFirstInteger() != 0) {
return std::nullopt;
}
}
return sliced_dim;
}
bool CheckIndexIsMonotonic(
const HloInstruction* index,
const absl::flat_hash_map<const HloInstruction*, Range>& induction_map) {
Range range = RecursivelyIdentifyRange(index, induction_map);
VLOG(5) << "Range for: " << index->ToString() << " " << range.ToString();
return !range.IsEmpty() && range.IsLinear();
}
bool CheckParameterUsageIsCompatible(const HloInstruction* gte,
const HloInstruction* dus,
const HloInstruction* dus_idx,
int64_t sliced_index) {
for (auto* user : gte->users()) {
if (dus != user) {
VLOG(5) << "CheckParameterUsageIsCompatible(): User not a dynamic slice "
"or the dynamic-update-slice for the output."
<< user->ToString();
return false;
}
if (user->operand(static_cast<HloDynamicSliceInstruction*>(user)
->first_index_operand_number() +
sliced_index) != dus_idx) {
VLOG(5) << "CheckParameterUsageIsCompatible(): Idx is not the same as "
"dynamic-update-slice() "
<< user->ToString();
return false;
}
}
return true;
}
std::optional<int64_t> GetLevelFromCustomCall(const HloInstruction* instr) {
if (!instr->IsCustomCall(CollectivePipeliner::kInsertedByPreviousStep)) {
return std::nullopt;
}
return Cast<HloConstantInstruction>(instr->operand(1))
->literal()
.GetFirstInteger();
}
std::optional<std::vector<HloInstruction*>>
CollectDynamicSliceIndicesIfConstant(HloInstruction* instr) {
CHECK_EQ(instr->opcode(), HloOpcode::kDynamicSlice);
std::vector<HloInstruction*> indices;
HloDynamicSliceInstruction* dyn_slice =
Cast<HloDynamicSliceInstruction>(instr);
for (int64_t i = dyn_slice->first_index_operand_number();
i < instr->operand_count(); ++i) {
HloInstruction* operand = dyn_slice->mutable_operand(i);
CHECK_EQ(operand->shape().dimensions_size(), 0);
std::vector<std::pair<HloInstruction*, int>> stack(
1, std::make_pair(operand, 0));
absl::flat_hash_set<HloInstruction*> visited;
while (!stack.empty()) {
auto& [curr_instr, operand_idx] = stack.back();
if (operand_idx == curr_instr->operand_count()) {
indices.push_back(curr_instr);
stack.pop_back();
continue;
}
HloInstruction* next_operand = curr_instr->mutable_operand(operand_idx++);
if (next_operand->opcode() == HloOpcode::kParameter ||
next_operand->HasSideEffect()) {
return std::nullopt;
}
if (visited.insert(next_operand).second) {
stack.push_back(std::make_pair(next_operand, 0));
}
}
}
return indices;
}
bool IsSupportedLoopIndexType(PrimitiveType type) {
switch (type) {
case PrimitiveType::S32:
case PrimitiveType::S64:
case PrimitiveType::S16:
case PrimitiveType::S8:
case PrimitiveType::U32:
case PrimitiveType::U64:
case PrimitiveType::U16:
case PrimitiveType::U8:
return true;
default:
return false;
}
}
std::optional<Literal> CreateLiteralOfShape(const Shape& shape, int64_t value) {
return primitive_util::PrimitiveTypeSwitch<std::optional<Literal>>(
[&](auto kType) -> std::optional<Literal> {
if constexpr (primitive_util::IsIntegralType(kType)) {
using NativeT = typename primitive_util::NativeTypeOf<kType>;
CHECK_LE(value, static_cast<absl::int128>(
std::numeric_limits<NativeT>::max()));
CHECK_GE(value, static_cast<absl::int128>(
std::numeric_limits<NativeT>::min()));
return LiteralUtil::CreateR0(static_cast<NativeT>(value));
}
return std::nullopt;
},
shape.element_type());
}
bool CollectSimpleDependencies(HloInstruction* i,
std::vector<HloInstruction*>& deps_vector,
absl::flat_hash_set<HloInstruction*>& deps_set) {
if (i->opcode() == HloOpcode::kDynamicSlice) {
auto indices = CollectDynamicSliceIndicesIfConstant(i);
if (!indices.has_value()) {
return false;
}
deps_vector.insert(deps_vector.end(), indices->begin(), indices->end());
deps_set.insert(indices->begin(), indices->end());
return true;
}
for (HloInstruction* op : i->mutable_operands()) {
absl::InlinedVector<HloInstruction*, 4> to_add;
if (op->opcode() == HloOpcode::kBroadcast) {
if (deps_set.insert(op).second) {
to_add.push_back(op);
op = op->mutable_operand(0);
if (op->opcode() == HloOpcode::kConstant) {
if (deps_set.insert(op).second) {
to_add.push_back(op);
}
}
}
}
deps_vector.insert(deps_vector.end(), to_add.rbegin(), to_add.rend());
}
return true;
}
std::pair<std::vector<HloDynamicUpdateSliceInstruction*>,
std::vector<HloInstruction*>>
CheckStoreIntoSliceIsCompatible(HloInstruction* instr,
const HloComputation* while_body,
int64_t level_to_operate_on,
bool multi_uses_pipelining,
HloPredicate acceptable_formatting,
bool multi_dyn_updates = false) {
std::pair<std::vector<HloDynamicUpdateSliceInstruction*>,
std::vector<HloInstruction*>>
empty_pair{{}, {}};
if ((!multi_uses_pipelining && instr->user_count() != 1) ||
instr->operand_count() != 1 || instr->HasControlDependencies()) {
return empty_pair;
}
absl::flat_hash_set<HloInstruction*> added_instructions;
HloInstruction* folded_instr = instr;
std::vector<HloInstruction*> formatting_ops;
absl::flat_hash_set<HloInstruction*> formatting_set;
auto is_acceptable_user = [&](HloInstruction* i) {
if (i->HasControlDependencies() || !acceptable_formatting(i)) {
return false;
}
if (i->opcode() == HloOpcode::kReduce &&
(ShapeUtil::ElementsIn(i->shape()) ==
ShapeUtil::ElementsIn(instr->operand(0)->shape()) ||
ShapeUtil::ElementsIn(instr->operand(0)->shape()) < 1024)) {
return true;
}
return HloPredicateIsOp<HloOpcode::kSlice, HloOpcode::kDynamicSlice,
HloOpcode::kPad, HloOpcode::kCollectivePermute,
HloOpcode::kConvert, HloOpcode::kReshape,
HloOpcode::kAllReduce, HloOpcode::kTranspose,
HloOpcode::kBroadcast, HloOpcode::kBitcast>(i) ||
(multi_uses_pipelining && i->IsElementwise()) ||
i->IsCustomCall(CollectivePipeliner::kInsertedByPreviousStep) ||
i->IsCustomCall(CollectivePipeliner::kSunkByPreviousStep);
};
auto is_final_slice_insertion = [&](HloInstruction* i) {
HloDynamicUpdateSliceInstruction* dyn_update =
DynCast<HloDynamicUpdateSliceInstruction>(i);
if (dyn_update == nullptr || dyn_update->user_count() != 1) {
return false;
}
if (level_to_operate_on == 0) {
if (dyn_update->users()[0] == while_body->root_instruction()) {
return true;
}
return false;
}
for (int64_t i = dyn_update->first_index_operand_number();
i < dyn_update->operand_count(); ++i) {
if (auto level = GetLevelFromCustomCall(dyn_update->operand(i))) {
if (*level == level_to_operate_on) {
return true;
}
return false;
}
}
return false;
};
absl::flat_hash_set<HloInstruction*> final_slice_set;
std::vector<HloDynamicUpdateSliceInstruction*> final_slice_insertions;
std::vector<std::pair<HloInstruction*, int>> stack;
stack.push_back(std::make_pair(folded_instr, 0));
while (!stack.empty()) {
auto& data = stack.back();
HloInstruction* inst = data.first;
if (data.second == inst->user_count()) {
stack.pop_back();
continue;
}
HloInstruction* next_user = inst->users()[data.second++];
if (is_final_slice_insertion(next_user)) {
if (next_user->user_count() != 1 || next_user->operand(1) != inst) {
return empty_pair;
}
if (final_slice_set.contains(next_user)) {
continue;
}
if (!multi_dyn_updates && !final_slice_insertions.empty()) {
return empty_pair;
}
final_slice_insertions.push_back(
Cast<HloDynamicUpdateSliceInstruction>(next_user));
final_slice_set.insert(next_user);
continue;
}
if (!is_acceptable_user(next_user)) {
return empty_pair;
}
if (added_instructions.insert(next_user).second) {
stack.push_back(std::make_pair(next_user, 0));
}
}
if (final_slice_insertions.empty()) {
return empty_pair;
}
stack.push_back(std::make_pair(folded_instr, 0));
added_instructions.clear();
while (!stack.empty()) {
auto& data = stack.back();
HloInstruction* instr = data.first;
if (data.second == 0 && instr != folded_instr) {
if (!CollectSimpleDependencies(instr, formatting_ops, formatting_set)) {
return empty_pair;
}
if (formatting_set.insert(instr).second) {
formatting_ops.push_back(instr);
}
}
if (data.second == instr->user_count()) {
stack.pop_back();
continue;
}
HloInstruction* next_user = instr->users()[data.second++];
if (is_final_slice_insertion(next_user)) {
continue;
}
if (added_instructions.insert(next_user).second) {
stack.push_back(std::make_pair(next_user, 0));
}
}
return std::make_pair(final_slice_insertions, formatting_ops);
}
bool IsLoopIterator(const HloInstruction* instr,
int64_t loop_iteration_tuple_idx) {
if (instr->opcode() != HloOpcode::kGetTupleElement ||
instr->operand(0)->opcode() != HloOpcode::kParameter) {
return false;
}
return instr->tuple_index() == loop_iteration_tuple_idx;
}
std::vector<HloInstruction*> CollectDependenciesToPipeline(
absl::Span<const HloInstruction* const> source_ops,
absl::Span<HloInstruction* const> ops) {
absl::flat_hash_set<const HloInstruction*> formatting_set(ops.begin(),
ops.end());
formatting_set.insert(source_ops.begin(), source_ops.end());
std::vector<HloInstruction*> to_return;
for (const HloInstruction* op : ops) {
for (HloInstruction* operand : op->operands()) {
if (!formatting_set.count(operand)) {
formatting_set.insert(operand);
to_return.push_back(operand);
}
}
}
return to_return;
}
std::optional<std::vector<HloInstruction*>> CollectIndependentOperandChain(
HloInstruction* instr, int64_t loop_iter,
const absl::flat_hash_set<const HloInstruction*>& loop_invariant_params,
HloPredicate should_allow_loop_variant_parameter_in_chain,
const absl::flat_hash_set<const HloInstruction*>&
loop_invariant_instructions,
bool should_add_loop_invariant_op_in_chain) {
std::vector<HloInstruction*> chain;
absl::flat_hash_set<const HloInstruction*> visited_set({instr});
std::vector<std::pair<HloInstruction*, int>> stack(1, {instr, 0});
auto is_loop_variant_parameter_input =
[&loop_invariant_params, loop_iter](const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kGetTupleElement ||
instr->operand(0)->opcode() != HloOpcode::kParameter) {
return false;
}
return !IsLoopIterator(instr, loop_iter) &&
!loop_invariant_params.count(instr);
};
while (!stack.empty()) {
auto& curr = stack.back();
if (curr.second == curr.first->operand_count()) {
if (curr.first != instr) {
chain.push_back(curr.first);
}
stack.pop_back();
continue;
}
HloInstruction* curr_operand = curr.first->mutable_operand(curr.second++);
if (curr_operand->opcode() == HloOpcode::kParameter) {
continue;
}
if (is_loop_variant_parameter_input(curr_operand) &&
!should_allow_loop_variant_parameter_in_chain(curr_operand)) {
return std::nullopt;
}
if (visited_set.insert(curr_operand).second) {
stack.emplace_back(curr_operand, 0);
}
}
for (auto* chain_instr : chain) {
if (chain_instr->opcode() == HloOpcode::kAfterAll) {
continue;
}
if (chain_instr->opcode() == HloOpcode::kRecvDone) {
return std::nullopt;
}
const bool all_users_in_chain = absl::c_all_of(
chain_instr->users(), [&visited_set](const HloInstruction* u) {
return visited_set.contains(u);
});
const bool is_scalar_shaped =
ShapeUtil::IsEffectiveScalar(chain_instr->shape());
if (!all_users_in_chain) {
bool allow_loop_variant_parameter_in_chain =
(chain_instr->opcode() != HloOpcode::kGetTupleElement ||
chain_instr->operand(0)->opcode() != HloOpcode::kParameter ||
!should_allow_loop_variant_parameter_in_chain(chain_instr));
bool add_loop_invariant_op_in_chain =
(should_add_loop_invariant_op_in_chain &&
loop_invariant_instructions.contains(chain_instr));
if ((!loop_invariant_params.contains(chain_instr) && !is_scalar_shaped &&
allow_loop_variant_parameter_in_chain) &&
!add_loop_invariant_op_in_chain) {
return std::nullopt;
}
}
}
return std::move(chain);
}
std::optional<std::vector<HloInstruction*>> CollectChainsToPushBackwards(
HloInstruction* instr, int64_t loop_iter, const HloComputation* while_body,
int64_t level_to_operate_on,
const absl::flat_hash_set<const HloInstruction*>& loop_invariant_params,
HloPredicate should_allow_loop_variant_parameter_in_chain,
bool should_allow_control_dependencies,
const absl::flat_hash_set<const HloInstruction*>&
loop_invariant_instructions,
bool should_add_loop_invariant_op_in_chain) {
if (instr->HasControlDependencies() && !should_allow_control_dependencies) {
return std::nullopt;
}
return CollectIndependentOperandChain(
instr, loop_iter, loop_invariant_params,
should_allow_loop_variant_parameter_in_chain, loop_invariant_instructions,
should_add_loop_invariant_op_in_chain);
}
std::optional<int64_t> FindOutputIndexForDynamicUpdateSlice(
const HloInstruction* dus, const HloInstruction* root_instr) {
std::optional<int64_t> output_idx;
while (dus->opcode() == HloOpcode::kDynamicUpdateSlice) {
if (dus->user_count() != 1) {
output_idx = std::nullopt;
break;
}
if (dus->users()[0] == root_instr) {
auto indices = root_instr->OperandIndices(dus);
if (indices.size() != 1) {
output_idx = std::nullopt;
break;
}
output_idx = indices[0];
break;
}
dus = Cast<HloDynamicUpdateSliceInstruction>(dus->users()[0]);
}
return output_idx;
}
std::vector<HloInstruction*> MapNewOperands(
absl::Span<HloInstruction* const> operands, const InstructionMap& clone_map,
bool allow_unmapped = false) {
std::vector<HloInstruction*> new_operands;
new_operands.reserve(operands.size());
for (HloInstruction* operand : operands) {
auto it = clone_map.find(operand);
HloInstruction* mapped_operand = operand;
CHECK(it != clone_map.end() || allow_unmapped)
<< operand->ToString() << " not present in map";
if (it != clone_map.end()) {
mapped_operand = it->second;
}
new_operands.push_back(mapped_operand);
}
return new_operands;
}
struct WhileMoveInfo {
std::vector<HloInstruction*> collectives_to_move;
std::vector<HloDynamicUpdateSliceInstruction*> dynamic_update_slices;
std::vector<HloInstruction*> formatting_ops;
int64_t sliced_idx;
std::vector<int64_t> output_indices;
};
std::string ToString(const WhileMoveInfo& move_info) {
CHECK_EQ(move_info.dynamic_update_slices.size(),
move_info.output_indices.size());
std::vector<std::pair<decltype(move_info.dynamic_update_slices)::value_type,
decltype(move_info.output_indices)::value_type>>
zip_result;
zip_result.reserve(move_info.dynamic_update_slices.size());
for (int64_t i = 0; i < move_info.dynamic_update_slices.size(); ++i) {
zip_result.push_back(std::make_pair(move_info.dynamic_update_slices[i],
move_info.output_indices[i]));
}
return absl::StrFormat(
"\tCollectives:\n\t\t%s\n\tDynamicUpdateSlices:\n\t\t%s\n\tFormatting "
"ops:\n\t\t%s\n\tSliced index: %d",
absl::StrJoin(move_info.collectives_to_move, ",\n\t\t",
[](std::string* out, HloInstruction* instr) {
absl::StrAppend(out, instr->name());
}),
absl::StrJoin(zip_result, ",\n\t\t",
[](std::string* out, const auto& item) {
absl::StrAppend(
out, absl::StrFormat("%s (%d)", item.first->name(),
item.second));
}),
absl::StrJoin(move_info.formatting_ops, ",\n\t\t",
[](std::string* out, HloInstruction* instr) {
absl::StrAppend(out, instr->name());
}),
move_info.sliced_idx);
}
void UpdateInstructionChannelId(HloInstruction* cloned_instr,
int64_t& next_channel_id) {
if (const auto* send_recv_instr =
DynCast<HloSendRecvInstruction>(cloned_instr)) {
if (!send_recv_instr->is_host_transfer()) {
return;
}
}
if (auto* channel_instr = DynCast<HloChannelInstruction>(cloned_instr)) {
if (channel_instr->opcode() == HloOpcode::kSendDone ||
channel_instr->opcode() == HloOpcode::kRecvDone) {
auto* operand = channel_instr->operand(0);
CHECK(operand->opcode() == HloOpcode::kSend ||
operand->opcode() == HloOpcode::kRecv);
channel_instr->set_channel_id(
Cast<HloChannelInstruction>(operand)->channel_id());
return;
}
if (channel_instr->channel_id()) {
channel_instr->set_channel_id(next_channel_id++);
}
}
}
template <typename Comp>
absl::StatusOr<HloInstruction*> CloneBackwardChain(
Comp& target_computation, const WhileMoveInfo& move_info,
InstructionMap& clone_map, int64_t loop_iter_idx, int64_t& next_channel_id,
LoopVariantParameterInfo* loop_variant_parameter_info = nullptr) {
std::vector<HloInstruction*> to_clone(move_info.formatting_ops.begin(),
move_info.formatting_ops.end());
to_clone.push_back(move_info.collectives_to_move[0]);
HloInstruction* last_cloned = nullptr;
for (auto* chain_op : to_clone) {
if (IsLoopIterator(chain_op, loop_iter_idx) ||
clone_map.contains(chain_op)) {
continue;
}
auto new_operands = MapNewOperands(chain_op->operands(), clone_map);
HloInstruction* cloned = target_computation.AddInstruction(
chain_op->CloneWithNewOperands(chain_op->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(chain_op, cloned, clone_map));
UpdateInstructionChannelId(cloned, next_channel_id);
clone_map[chain_op] = cloned;
last_cloned = cloned;
if (loop_variant_parameter_info != nullptr &&
chain_op->opcode() == HloOpcode::kGetTupleElement &&
chain_op->operand(0)->opcode() == HloOpcode::kParameter &&
chain_op->tuple_index() != loop_iter_idx) {
loop_variant_parameter_info->push_back(
std::make_pair(chain_op->tuple_index(), cloned));
}
}
CHECK_NE(last_cloned, nullptr);
return last_cloned;
}
class WhileLoopAnalysis {
public:
explicit WhileLoopAnalysis(
HloInstruction* while_instr, int64_t max_pipelining_per_loop,
bool pipeline_use_tree, bool process_different_sized_options,
TuplePointsToAnalysis* tuple_points_to_analysis, CallGraph* call_graph,
std::optional<ConstantValue> known_start = std::nullopt)
: while_(while_instr),
loop_start_(known_start),
max_pipelining_per_loop_(max_pipelining_per_loop),
tuple_points_to_analysis_(tuple_points_to_analysis),
call_graph_(call_graph),
pipeline_use_tree_(pipeline_use_tree),
process_different_sized_options_(process_different_sized_options) {}
std::optional<ConstantValue> GetLoopIterationCount() const;
std::optional<ConstantValue> GetLoopStart() const;
std::optional<ConstantValue> GetLoopIncrement() const;
const std::vector<WhileMoveInfo>& GetMoveInfos() const;
std::optional<int64_t> GetLoopIterationIdx() const {
return loop_iteration_idx_;
}
int64_t GetDUSIndex(const HloInstruction* dus) const;
const absl::flat_hash_map<HloInstruction*, int64_t>& GetDUSIndices() const {
return dus_index_map_;
}
int64_t GetUniqueDUSIndices() const { return dus_index_map_.size(); }
int64_t GetMaxPipeliningPerLoop() const { return max_pipelining_per_loop_; }
bool ComputeLoopStatistics();
std::optional<std::pair<int64_t, int64_t>> IsSupportedDynamicUpdateSlice(
const HloDynamicUpdateSliceInstruction* dyn_update,
const HloInstruction* instr,
const std::vector<HloInstruction*>& formatting_ops,
CollectivePipeliner::PipeliningDirection direction,
int64_t level_to_operate_on,
const absl::flat_hash_map<int64_t, int64_t>& parameter_gtes_count,
const absl::flat_hash_map<const HloInstruction*, Range>& index_ranges)
const;
void MergeIntoExistingCollectivesForward(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order);
void MergeIntoExistingCollectivesForwardSink(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order);
void MergeIntoExistingCollectives(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order,
CollectivePipeliner::PipeliningDirection direction);
void CollectCollectivesToMove(
int64_t level_to_operate_on,
CollectivePipeliner::PipeliningDirection direction,
HloPredicate should_process, HloPredicate acceptable_formatting,
HloPredicate should_allow_loop_variant_parameter_in_chain =
HloPredicateFalse,
bool should_allow_control_dependencies = false,
bool should_add_loop_invariant_op_in_chain = false);
HloInstruction* while_loop_instruction() const { return while_; }
void ExtractLoopInvariantOps();
private:
HloInstruction* while_;
std::optional<ConstantValue> loop_iteration_count_;
std::optional<ConstantValue> loop_increment_;
std::optional<ConstantValue> loop_start_;
std::optional<ConstantValue> loop_bound_;
std::optional<int64_t> loop_iteration_idx_;
std::vector<WhileMoveInfo> move_infos_;
absl::flat_hash_map<HloInstruction*, int64_t> dus_index_map_;
absl::flat_hash_set<const HloInstruction*> invariant_loop_parameters_;
absl::flat_hash_set<const HloInstruction*> invariant_loop_instructions_;
int64_t max_pipelining_per_loop_;
TuplePointsToAnalysis* tuple_points_to_analysis_;
CallGraph* call_graph_;
bool pipeline_use_tree_;
bool process_different_sized_options_;
};
int64_t WhileLoopAnalysis::GetDUSIndex(const HloInstruction* dus) const {
auto it = dus_index_map_.find(dus);
CHECK(it != dus_index_map_.end());
return it->second;
}
void WhileLoopAnalysis::ExtractLoopInvariantOps() {
for (HloInstruction* inst :
while_->while_body()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kConstant) {
invariant_loop_instructions_.insert(inst);
continue;
}
if (invariant_loop_instructions_.contains(inst)) {
continue;
}
bool should_add = true;
for (const HloInstruction* operand : inst->operands()) {
should_add &= (invariant_loop_instructions_.contains(operand) ||
invariant_loop_parameters_.contains(operand));
}
if (should_add) {
invariant_loop_instructions_.insert(inst);
}
}
}
bool WhileLoopAnalysis::ComputeLoopStatistics() {
if (loop_iteration_count_) {
return true;
}
std::optional<ParsedWhileLoop> parsed_loop = PatternMatchParseWhileLoop(
while_, {tuple_points_to_analysis_, call_graph_});
if (!parsed_loop || !parsed_loop->static_while_loop) {
return false;
}
if (!IsSupportedLoopIndexType(
while_->shape()
.tuple_shapes(parsed_loop->static_while_loop->induction_var_index)
.element_type())) {
return false;
}
const HloInstruction* loop_root = while_->while_body()->root_instruction();
const int64_t bitwidth = primitive_util::BitWidth(
loop_root->operand(parsed_loop->static_while_loop->induction_var_index)
->shape()
.element_type());
const bool is_signed = primitive_util::IsSignedIntegralType(
loop_root->operand(parsed_loop->static_while_loop->induction_var_index)
->shape()
.element_type());
const ConstantValue bound =
is_signed ? ConstantValue::GetSigned(
parsed_loop->static_while_loop->loop_bound, bitwidth)
: ConstantValue::GetUnsigned(
parsed_loop->static_while_loop->loop_bound, bitwidth);
const ConstantValue increment =
is_signed ? ConstantValue::GetSigned(
parsed_loop->static_while_loop->step_size, bitwidth)
: ConstantValue::GetUnsigned(
parsed_loop->static_while_loop->step_size, bitwidth);
loop_start_ =
is_signed ? ConstantValue::GetSigned(
parsed_loop->static_while_loop->induction_var_init_value,
bitwidth)
: ConstantValue::GetUnsigned(
parsed_loop->static_while_loop->induction_var_init_value,
bitwidth);
auto iteration_range = bound.sub(*loop_start_);
auto iter_count = iteration_range.div(increment);
loop_iteration_count_ =
iteration_range.mod(increment).gt(
ConstantValue::GetZero(increment.GetBitwidth(), increment.IsSigned()))
? iter_count.add(ConstantValue::GetOne(increment.GetBitwidth(),
increment.IsSigned()))
: iter_count;
if (loop_iteration_count_->lt(iter_count)) {
return false;
}
loop_bound_ = bound;
loop_increment_ = increment;
loop_iteration_idx_ = parsed_loop->static_while_loop->induction_var_index;
VLOG(1) << "Bound: " << loop_bound_->ToString()
<< " Start: " << loop_start_->ToString()
<< " Increment: " << loop_increment_->ToString();
if (loop_root->opcode() == HloOpcode::kTuple) {
for (int i = 0; i < loop_root->operand_count(); ++i) {
if (loop_root->operand(i)->opcode() != HloOpcode::kGetTupleElement) {
continue;
}
if (i != loop_root->operand(i)->tuple_index()) {
continue;
}
invariant_loop_parameters_.insert(loop_root->operand(i));
}
}
ExtractLoopInvariantOps();
return true;
}
std::optional<std::pair<int64_t, int64_t>>
WhileLoopAnalysis::IsSupportedDynamicUpdateSlice(
const HloDynamicUpdateSliceInstruction* dyn_update,
const HloInstruction* instr,
const std::vector<HloInstruction*>& formatting_ops,
CollectivePipeliner::PipeliningDirection direction,
int64_t level_to_operate_on,
const absl::flat_hash_map<int64_t, int64_t>& parameter_gtes_count,
const absl::flat_hash_map<const HloInstruction*, Range>& index_ranges)
const {
HloComputation* while_body = while_->while_body();
const HloInstruction* loop_parameter =
while_body->parameter_instructions()[0];
std::optional<int64_t> sliced_dim = GetSlicedDimension(dyn_update);
if (!sliced_dim.has_value()) {
VLOG(5) << "Skipping " << instr->name()
<< " because couldn't find sliced dimension";
return std::nullopt;
}
if (direction == CollectivePipeliner::PipeliningDirection::kForwardSink &&
(*sliced_dim != 0 || dyn_update->shape().dimensions(0) !=
loop_iteration_count_->GetUnsignedValue())) {
VLOG(5) << "Skipping " << instr->name()
<< " because number of iteration of the loop doesn't match "
"slices being inserted or slice dim is not 0. slice_dim = "
<< *sliced_dim
<< " loop count = " << loop_iteration_count_->GetUnsignedValue();
return std::nullopt;
}
if (!process_different_sized_options_) {
if (!formatting_ops.empty()) {
if (instr->operand(0)->shape() != formatting_ops.back()->shape()) {
VLOG(5) << "Skipping " << instr->name()
<< " because operand and last formatting op don't have the "
"same shape";
return std::nullopt;
}
auto dependencies_to_pipeline = CollectDependenciesToPipeline(
absl::MakeConstSpan({instr}), absl::MakeConstSpan(formatting_ops));
bool skip_because_not_same_size = false;
for (auto* dependency : dependencies_to_pipeline) {
if (ShapeUtil::IsEffectiveScalar(dependency->shape())) {
skip_because_not_same_size = true;
break;
}
}
if (skip_because_not_same_size) {
VLOG(5)
<< "Skipping " << instr->name()
<< " because formatting ops do not have the expected shapes/sizes";
return std::nullopt;
}
} else if (instr->operand(0)->shape() != instr->shape()) {
VLOG(5) << "Skipping " << instr->name()
<< " because instr does not have the same shape as its operand";
return std::nullopt;
}
}
const HloInstruction* to_insert_into = dyn_update->operand(0);
if (level_to_operate_on == 0 &&
(to_insert_into->opcode() != HloOpcode::kGetTupleElement ||
to_insert_into->operand(0) != loop_parameter)) {
VLOG(5) << "Skipping " << instr->name()
<< " because slice to insert into is not a GTE from input "
"parameter "
<< to_insert_into->ToString();
return std::nullopt;
}
if (level_to_operate_on == 0) {
if (to_insert_into->opcode() == HloOpcode::kGetTupleElement) {
if (parameter_gtes_count.at(to_insert_into->tuple_index()) != 1) {
VLOG(5) << "Skipping " << instr->name()
<< " because there are multiple parameter GTEs for this slice";
return std::nullopt;
}
}
const HloInstruction* dyn_update_idx = dyn_update->operand(
dyn_update->first_index_operand_number() + *sliced_dim);
if (level_to_operate_on == 0 &&
!CheckParameterUsageIsCompatible(to_insert_into, dyn_update,
dyn_update_idx, *sliced_dim)) {
VLOG(5) << "Skipping " << instr->name()
<< " because parameter usage doesn't follow the expected pattern";
return std::nullopt;
}
if (!AllIndicesConstantsExceptOne(
dyn_update,
dyn_update->first_index_operand_number() + *sliced_dim)) {
VLOG(5) << "Skipping " << instr->name()
<< " because update slicing doesn't match expectation";
return std::nullopt;
}
if (!CheckIndexIsMonotonic(dyn_update_idx, index_ranges)) {
VLOG(5) << "Skipping " << instr->name()
<< " because update index is not monotonic";
return std::nullopt;
}
}
std::optional<int64_t> output_idx = FindOutputIndexForDynamicUpdateSlice(
dyn_update, while_body->root_instruction());
if (!output_idx.has_value()) {
VLOG(5) << "Skipping " << instr->name()
<< " because couldn't find unique output index for insertion";
return std::nullopt;
}
return std::make_pair(*sliced_dim, *output_idx);
}
void WhileLoopAnalysis::MergeIntoExistingCollectivesForward(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order) {
CHECK_EQ(indices_to_merge.size(), 1);
CHECK_EQ(dyn_updates.size(), 1);
int64_t target_idx = indices_to_merge[0];
CHECK_EQ(move_infos_[target_idx].dynamic_update_slices.size(), 1);
CHECK_EQ(move_infos_[target_idx].collectives_to_move.size(), 1);
HloDynamicUpdateSliceInstruction* dyn_update = dyn_updates[0];
CHECK_EQ(move_infos_[target_idx].dynamic_update_slices[0], dyn_update)
<< "Not the same dynamic-update-slice for converging entry";
absl::flat_hash_set<const HloInstruction*> existing_entry_instrs(
move_infos_[target_idx].formatting_ops.begin(),
move_infos_[target_idx].formatting_ops.end());
existing_entry_instrs.insert(move_infos_[target_idx].collectives_to_move[0]);
if (existing_entry_instrs.count(instr)) {
return;
}
move_infos_[target_idx].formatting_ops.push_back(instr);
for (auto* op : formatting_ops) {
if (!existing_entry_instrs.count(op)) {
move_infos_[target_idx].formatting_ops.push_back(op);
}
}
absl::c_sort(move_infos_[target_idx].formatting_ops,
[&](const HloInstruction* a, const HloInstruction* b) {
return instruction_order[a] < instruction_order[b];
});
}
void WhileLoopAnalysis::MergeIntoExistingCollectivesForwardSink(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order) {
CHECK(!indices_to_merge.empty());
const int64_t target_idx = *absl::c_min_element(indices_to_merge);
absl::flat_hash_set<const HloInstruction*> existing_formatting_ops(
move_infos_[target_idx].formatting_ops.begin(),
move_infos_[target_idx].formatting_ops.end());
absl::flat_hash_set<const HloInstruction*> existing_collectives_to_move(
move_infos_[target_idx].collectives_to_move.begin(),
move_infos_[target_idx].collectives_to_move.end());
absl::flat_hash_set<const HloInstruction*> existing_dyn_updates(
move_infos_[target_idx].dynamic_update_slices.begin(),
move_infos_[target_idx].dynamic_update_slices.end());
auto merge_entry_to_target =
[&](std::vector<HloInstruction*> collectives_to_merge,
std::vector<HloInstruction*>& formatting_ops_to_merge,
std::vector<HloDynamicUpdateSliceInstruction*>& dyn_updates_to_merge,
int64_t sliced_idx_to_merge,
std::vector<int64_t>& output_indices_to_merge) {
for (HloInstruction* op : collectives_to_merge) {
if (!existing_collectives_to_move.count(op)) {
move_infos_[target_idx].collectives_to_move.push_back(op);
}
}
for (HloInstruction* op : formatting_ops_to_merge) {
if (!existing_formatting_ops.count(op)) {
move_infos_[target_idx].formatting_ops.push_back(op);
}
}
for (int64_t i = 0; i < dyn_updates_to_merge.size(); ++i) {
HloDynamicUpdateSliceInstruction* dyn_update =
dyn_updates_to_merge[i];
index_per_dyn_update_slice[dyn_update] = target_idx;
if (!existing_dyn_updates.count(dyn_update)) {
move_infos_[target_idx].dynamic_update_slices.push_back(dyn_update);
CHECK_EQ(sliced_idx_to_merge, move_infos_[target_idx].sliced_idx);
move_infos_[target_idx].output_indices.push_back(
output_indices_to_merge[i]);
}
}
};
for (int64_t idx : indices_to_merge) {
if (idx == target_idx) {
continue;
}
merge_entry_to_target(
move_infos_[idx].collectives_to_move, move_infos_[idx].formatting_ops,
move_infos_[idx].dynamic_update_slices, move_infos_[idx].sliced_idx,
move_infos_[idx].output_indices);
move_infos_.erase(move_infos_.begin() + idx);
}
merge_entry_to_target({instr}, formatting_ops, dyn_updates, sliced_idx,
output_indices);
absl::c_sort(move_infos_[target_idx].formatting_ops,
[&](const HloInstruction* a, const HloInstruction* b) {
return instruction_order[a] < instruction_order[b];
});
}
void WhileLoopAnalysis::MergeIntoExistingCollectives(
HloInstruction* instr, std::vector<HloInstruction*> formatting_ops,
std::vector<HloDynamicUpdateSliceInstruction*> dyn_updates,
int64_t sliced_idx, std::vector<int64_t> output_indices,
std::vector<int64_t> indices_to_merge,
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice,
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order,
CollectivePipeliner::PipeliningDirection direction) {
if (direction == CollectivePipeliner::PipeliningDirection::kForwardSink) {
MergeIntoExistingCollectivesForwardSink(
instr, formatting_ops, dyn_updates, sliced_idx, output_indices,
indices_to_merge, index_per_dyn_update_slice, instruction_order);
return;
}
if (direction == CollectivePipeliner::PipeliningDirection::kForward) {
MergeIntoExistingCollectivesForward(instr, formatting_ops, dyn_updates,
indices_to_merge, instruction_order);
return;
}
CHECK(false) << "Backward pipelining is not supported in "
"MergeIntoExistingCollectives ";
}
void WhileLoopAnalysis::CollectCollectivesToMove(
int64_t level_to_operate_on,
CollectivePipeliner::PipeliningDirection direction,
HloPredicate should_process, HloPredicate acceptable_formatting,
HloPredicate should_allow_loop_variant_parameter_in_chain,
bool should_allow_control_dependencies,
bool should_add_loop_invariant_op_in_chain) {
move_infos_.clear();
HloComputation* while_body = while_->while_body();
const HloInstruction* loop_parameter =
while_body->parameter_instructions()[0];
if (absl::c_any_of(loop_parameter->users(), [](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kGetTupleElement;
})) {
return;
}
if (absl::c_any_of(while_->users(), [](const HloInstruction* instr) {
return instr->opcode() != HloOpcode::kGetTupleElement;
})) {
return;
}
absl::flat_hash_map<int64_t, int64_t> parameter_gtes_count;
for (auto* user : loop_parameter->users()) {
CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement);
++parameter_gtes_count[user->tuple_index()];
}
absl::flat_hash_map<const HloInstruction*, Range> index_ranges;
absl::flat_hash_map<const HloInstruction*, int64_t>
index_per_dyn_update_slice;
std::optional<Range> index_range;
if (loop_bound_) {
index_range = Range{*loop_start_,
loop_start_->add(loop_iteration_count_
->sub(ConstantValue::GetOne(
loop_start_->GetBitwidth(),
loop_start_->IsSigned()))
.mul(*loop_increment_)),
true};
}
int64_t count = 0;
absl::flat_hash_map<const HloInstruction*, int64_t> instruction_order;
std::vector<HloInstruction*> instructions_post_order =
while_body->MakeInstructionPostOrder();
for (auto* instr : instructions_post_order) {
if (instr->opcode() == HloOpcode::kGetTupleElement) {
if (index_range && instr->tuple_index() == 0) {
index_ranges.insert({instr, *index_range});
}
}
instruction_order[instr] = count++;
}
for (auto* instr : instructions_post_order) {
if (direction == CollectivePipeliner::PipeliningDirection::kForward &&
(instr->operand_count() != 1 ||
instr->shape().dimensions_size() !=
instr->operand(0)->shape().dimensions_size())) {
continue;
}
if (!should_process(instr)) {
continue;
}
if (direction == CollectivePipeliner::PipeliningDirection::kForward ||
direction == CollectivePipeliner::PipeliningDirection::kForwardSink) {
auto [dyn_updates, formatting_ops] = CheckStoreIntoSliceIsCompatible(
instr, while_body, level_to_operate_on, pipeline_use_tree_,
acceptable_formatting,
direction ==
CollectivePipeliner::PipeliningDirection::kForwardSink);
if (dyn_updates.empty()) {
VLOG(5)
<< "Skipping " << instr->name()
<< " because storing into slice is not compatible with pipelining";
continue;
}
CHECK(direction != CollectivePipeliner::PipeliningDirection::kForward ||
dyn_updates.size() == 1);
int64_t sliced_idx = -1;
std::vector<int64_t> output_indices;
bool skip_instr = false;
bool not_first_dyn_update = false;
for (HloDynamicUpdateSliceInstruction* dyn_update : dyn_updates) {
std::optional<std::pair<int64_t, int64_t>> maybe_dus_info =
IsSupportedDynamicUpdateSlice(dyn_update, instr, formatting_ops,
direction, level_to_operate_on,
parameter_gtes_count, index_ranges);
if (!maybe_dus_info.has_value()) {
VLOG(5) << "Skipping " << instr->name() << " because "
<< dyn_update->name() << " is not supported";
skip_instr = true;
break;
}
output_indices.push_back(maybe_dus_info->second);
if (not_first_dyn_update) {
CHECK_NE(dyn_update->operand(0), dyn_updates[0]->operand(0));
CHECK_EQ(sliced_idx, maybe_dus_info->first);
} else {
sliced_idx = maybe_dus_info->first;
}
not_first_dyn_update = true;
}
if (skip_instr) {
continue;
}
CHECK_NE(sliced_idx, -1);
std::vector<int64_t> indices_to_merge;
for (HloDynamicUpdateSliceInstruction* dyn_update : dyn_updates) {
if (index_per_dyn_update_slice.find(dyn_update) !=
index_per_dyn_update_slice.end()) {
int64_t index = index_per_dyn_update_slice[dyn_update];
if (!absl::c_linear_search(indices_to_merge, index)) {
indices_to_merge.push_back(index);
}
}
}
if (!indices_to_merge.empty()) {
MergeIntoExistingCollectives(
instr, formatting_ops, dyn_updates, sliced_idx, output_indices,
indices_to_merge, index_per_dyn_update_slice, instruction_order,
direction);
} else {
absl::c_sort(formatting_ops,
[&](const HloInstruction* a, const HloInstruction* b) {
return instruction_order[a] < instruction_order[b];
});
for (HloDynamicUpdateSliceInstruction* dyn_update : dyn_updates) {
index_per_dyn_update_slice[dyn_update] = move_infos_.size();
}
move_infos_.push_back({{instr},
dyn_updates,
std::move(formatting_ops),
sliced_idx,
std::move(output_indices)});
}
} else {
CHECK_EQ(direction, CollectivePipeliner::PipeliningDirection::kBackward);
auto chain_collected = CollectChainsToPushBackwards(
instr, *loop_iteration_idx_, while_body, level_to_operate_on,
invariant_loop_parameters_,
should_allow_loop_variant_parameter_in_chain,
should_allow_control_dependencies, invariant_loop_instructions_,
should_add_loop_invariant_op_in_chain);
if (!chain_collected.has_value()) {
VLOG(5) << "Skipping " << instr->name()
<< " because didn't find compatible slice of parameter";
continue;
}
move_infos_.push_back(
WhileMoveInfo{{instr}, {}, std::move(*chain_collected), {}, {}});
}
if (move_infos_.size() >= max_pipelining_per_loop_) {
break;
}
}
if (direction != CollectivePipeliner::PipeliningDirection::kForward) {
return;
}
dus_index_map_.clear();
for (auto& to_move : move_infos_) {
CHECK_EQ(to_move.dynamic_update_slices.size(), 1);
HloInstruction* dus_index =
to_move.dynamic_update_slices[0]->mutable_operand(
to_move.dynamic_update_slices[0]->first_index_operand_number() +
to_move.sliced_idx);
auto it = dus_index_map_.find(dus_index);
int64_t dus_index_tuple_position = dus_index_map_.size();
if (it != dus_index_map_.end()) {
dus_index_tuple_position = it->second;
} else {
dus_index_map_[dus_index] = dus_index_tuple_position;
}
}
}
std::optional<ConstantValue> WhileLoopAnalysis::GetLoopIterationCount() const {
return loop_iteration_count_;
}
std::optional<ConstantValue> WhileLoopAnalysis::GetLoopStart() const {
return loop_start_;
}
std::optional<ConstantValue> WhileLoopAnalysis::GetLoopIncrement() const {
return loop_increment_;
}
const std::vector<WhileMoveInfo>& WhileLoopAnalysis::GetMoveInfos() const {
return move_infos_;
}
bool IsLoopInvariant(
const HloInstruction* instr,
absl::flat_hash_map<const HloInstruction*, bool>& invariant_cache) {
auto it = invariant_cache.find(instr);
if (it != invariant_cache.end()) {
return it->second;
}
std::vector<std::pair<const HloInstruction*, int>> stack(
1, std::make_pair(instr, 0));
while (!stack.empty()) {
auto& current = stack.back();
invariant_cache[std::get<0>(current)] = true;
if (std::get<0>(current)->HasSideEffect() ||
std::get<0>(current)->opcode() == HloOpcode::kParameter) {
invariant_cache[std::get<0>(current)] = false;
stack.pop_back();
continue;
}
if (std::get<0>(current)->operands().empty()) {
invariant_cache[std::get<0>(current)] = true;
stack.pop_back();
continue;
}
if (std::get<1>(current) > 0) {
auto* current_operand =
std::get<0>(current)->operand(std::get<1>(current) - 1);
auto cop_it = invariant_cache.find(current_operand);
CHECK(cop_it != invariant_cache.end())
<< "Entry expected to be populated";
if (!cop_it->second) {
invariant_cache[std::get<0>(current)] = false;
stack.pop_back();
continue;
}
}
if (std::get<0>(current)->operand_count() == std::get<1>(current)) {
stack.pop_back();
continue;
}
auto* next_operand = std::get<0>(current)->operand(std::get<1>(current)++);
auto op_it = invariant_cache.find(next_operand);
if (op_it == invariant_cache.end()) {
stack.push_back(std::make_pair(next_operand, 0));
} else if (!op_it->second) {
invariant_cache[next_operand] &= op_it->second;
}
}
it = invariant_cache.find(instr);
CHECK(it != invariant_cache.end())
<< "We should have computed \"instr\" value";
return it->second;
}
Shape ComputeFullOutputShape(const WhileMoveInfo& move_info,
const Shape& base_shape) {
HloDynamicUpdateSliceInstruction* dus = move_info.dynamic_update_slices[0];
return ShapeUtil::PrependMajorDimension(
dus->operand(0)->shape().dimensions()[move_info.sliced_idx], base_shape);
}
HloInstruction* CreateZero(HloComputation* comp, const Shape& shape,
PrimitiveType ptype) {
if (shape.dimensions_size() == 0) {
return comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype)));
}
HloInstruction* zero_constant =
comp->AddInstruction(HloInstruction::CreateBroadcast(
shape,
comp->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype))),
{}));
return zero_constant;
}
}
using Interval = std::pair<int64_t, int64_t>;
using Intervals = std::vector<Interval>;
absl::StatusOr<std::vector<Interval>> ParseVectorOfPairs(
absl::string_view str) {
TF_ASSIGN_OR_RETURN(std::vector<ReplicaGroup> replica_groups,
ParseReplicaGroupsOnly(str));
std::vector<Interval> res;
res.reserve(replica_groups.size());
for (const ReplicaGroup& replica_group : replica_groups) {
TF_RET_CHECK(replica_group.replica_ids_size() == 2);
int64_t a = replica_group.replica_ids(0);
int64_t b = replica_group.replica_ids(1);
res.emplace_back(a, b);
}
return res;
}
absl::Status UpdateSendRecvValidation(
HloInstruction* instruction, bool is_peeled,
CollectivePipeliner::PipeliningDirection direction,
const WhileLoopAnalysis& loop_analysis) {
if (instruction->opcode() != HloOpcode::kCollectivePermute) {
return absl::OkStatus();
}
const auto& frontend_attributes = instruction->frontend_attributes().map();
if (!frontend_attributes.contains(kSendRecvValidationAttr)) {
return absl::OkStatus();
}
VLOG(3) << "Trip count = "
<< loop_analysis.GetLoopIterationCount()->GetSignedValue();
VLOG(3) << "Collective permute with _xla_send_recv_validation: "
<< instruction->ToString();
TF_ASSIGN_OR_RETURN(
Intervals old_intervals,
ParseVectorOfPairs(frontend_attributes.at(kSendRecvValidationAttr)));
Intervals intervals;
if (direction == CollectivePipeliner::kForward) {
for (auto [a, b] : old_intervals) {
if (is_peeled) {
if (a <= 0 && 0 <= b) {
intervals.push_back({0, 0});
} else {
intervals.push_back({1, 0});
}
} else {
intervals.push_back(
{std::max(int64_t{0}, a - 1), std::max(int64_t{0}, b - 1)});
}
}
} else if (direction == CollectivePipeliner::kBackward) {
auto trip_count_value = loop_analysis.GetLoopIterationCount();
if (!trip_count_value) {
return absl::InternalError(
"Unable to deduce loop trip count in collective pipeliner. This is "
"required for backward pipelining while fixing the "
"_xla_send_recv_validation attribute");
}
int64_t trip_count = trip_count_value->GetSignedValue();
int64_t last_iteration = trip_count - 1;
for (auto [a, b] : old_intervals) {
if (is_peeled) {
if (a <= last_iteration && last_iteration <= b) {
intervals.push_back({0, 0});
} else {
intervals.push_back({1, 0});
}
} else {
intervals.push_back({a, std::min(last_iteration - 1, b)});
}
}
}
hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute(
instruction, kSendRecvValidationAttr, intervals);
VLOG(3) << "Updated collective_permute with _xla_send_recv_validation: "
<< instruction->ToString();
return absl::OkStatus();
}
absl::Status TransformLoopForward(
const WhileLoopAnalysis& loop_analysis, bool insert_non_alias_custom_call,
int64_t level_to_operate_on, bool pipeline_use_tree,
bool process_different_sized_ops, HloPredicate should_process,
HloPredicate acceptable_formatting, HloPredicate reuse_output_buffer,
int64_t& next_channel_id) {
InstructionMap while_body_to_peeled;
absl::flat_hash_set<HloInstruction*> to_skip_set;
absl::flat_hash_map<HloInstruction*, HloInstruction*> formatting_map;
absl::flat_hash_map<HloInstruction*, int64_t> is_output_instruction;
std::vector<int64_t> moves_requiring_special_output;
int64_t count = 0;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
CHECK_EQ(to_move.dynamic_update_slices.size(), 1);
to_skip_set.insert(to_move.collectives_to_move.front());
if (!to_move.formatting_ops.empty()) {
formatting_map[to_move.formatting_ops.back()] =
to_move.collectives_to_move.front();
}
const Shape& output_shape =
to_move.formatting_ops.empty()
? to_move.collectives_to_move.front()->shape()
: to_move.formatting_ops.back()->shape();
if (!reuse_output_buffer(to_move.collectives_to_move.front()) ||
output_shape !=
to_move.collectives_to_move.front()->operand(0)->shape()) {
moves_requiring_special_output.push_back(count);
to_skip_set.insert(to_move.dynamic_update_slices.front());
}
++count;
}
HloInstruction* while_loop = loop_analysis.while_loop_instruction();
HloComputation* while_body = while_loop->while_body();
CHECK_EQ(while_body->parameter_instructions().size(), 1)
<< "Expected only one parameter";
HloInstruction* loop_parameter = while_body->parameter_instructions()[0];
HloInstruction* loop_init = while_loop->mutable_operand(0);
const int64_t initial_inputs = loop_init->operand_count();
while_body_to_peeled[loop_parameter] = loop_init;
for (auto* user : loop_parameter->users()) {
CHECK_EQ(user->opcode(), HloOpcode::kGetTupleElement)
<< "Expected only get-tuple-elements as users";
while_body_to_peeled[user] =
loop_init->mutable_operand(user->tuple_index());
}
CHECK_EQ(while_body->root_instruction()->opcode(), HloOpcode::kTuple);
for (int i = 0; i < while_body->root_instruction()->operand_count(); ++i) {
is_output_instruction[while_body->root_instruction()->mutable_operand(i)] =
i;
}
HloComputation* loop_computation = while_loop->parent();
std::vector<HloInstruction*> new_init_operands;
std::vector<Shape> new_parameter_shapes;
std::vector<HloInstruction*> new_root_operands;
const int64_t operands_indices_count =
loop_init->operand_count() + loop_analysis.GetUniqueDUSIndices();
const int64_t new_loop_tuple_operand_count =
operands_indices_count + moves_requiring_special_output.size();
new_parameter_shapes.resize(new_loop_tuple_operand_count);
new_root_operands.resize(new_loop_tuple_operand_count);
new_init_operands.resize(new_loop_tuple_operand_count);
for (int i = 0; i < loop_parameter->shape().tuple_shapes().size(); ++i) {
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_root_operands[i] = while_body->root_instruction()->mutable_operand(i);
new_init_operands[i] = loop_init->mutable_operand(i);
}
for (auto* instr : while_body->MakeInstructionPostOrder()) {
if (instr == loop_parameter) {
continue;
}
if (ContainsKey(to_skip_set, instr)) {
auto it = while_body_to_peeled.find(instr->operand(0));
CHECK(it != while_body_to_peeled.end());
HloInstruction* passthrough_operand = it->second;
while_body_to_peeled[instr] = passthrough_operand;
continue;
}
auto formatting_it = formatting_map.find(instr);
if (formatting_it != formatting_map.end()) {
auto it = while_body_to_peeled.find(formatting_it->second);
CHECK(it != while_body_to_peeled.end());
HloInstruction* passthrough_operand = it->second;
while_body_to_peeled[instr] = passthrough_operand;
continue;
}
std::vector<HloInstruction*> new_operands =
MapNewOperands(instr->operands(), while_body_to_peeled);
HloInstruction* cloned_instr = loop_computation->AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
TF_RETURN_IF_ERROR(
UpdateControlDependencies(instr, cloned_instr, while_body_to_peeled));
UpdateInstructionChannelId(cloned_instr, next_channel_id);
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
cloned_instr, true, CollectivePipeliner::PipeliningDirection::kForward,
loop_analysis));
while_body_to_peeled[instr] = cloned_instr;
auto output_it = is_output_instruction.find(instr);
if (output_it != is_output_instruction.end()) {
new_init_operands[output_it->second] = cloned_instr;
}
}
for (auto& dus : loop_analysis.GetDUSIndices()) {
new_parameter_shapes[dus.second + initial_inputs] = dus.first->shape();
new_root_operands[dus.second + initial_inputs] = dus.first;
new_init_operands[dus.second + initial_inputs] =
while_body_to_peeled[dus.first];
}
absl::flat_hash_map<int64_t, int64_t> moves_requiring_special_output_to_idx;
for (int i = 0; i < moves_requiring_special_output.size(); ++i) {
HloInstruction* collective =
loop_analysis.GetMoveInfos()[moves_requiring_special_output[i]]
.collectives_to_move.front();
moves_requiring_special_output_to_idx[moves_requiring_special_output[i]] =
operands_indices_count + i;
new_parameter_shapes[operands_indices_count + i] =
collective->operand(0)->shape();
new_root_operands[operands_indices_count + i] =
collective->mutable_operand(0);
new_init_operands[operands_indices_count + i] =
while_body_to_peeled[collective->mutable_operand(0)];
}
for (auto& move_info : loop_analysis.GetMoveInfos()) {
auto pipelined_instrs = CollectDependenciesToPipeline(
absl::MakeConstSpan(move_info.collectives_to_move),
absl::MakeSpan(move_info.formatting_ops));
for (auto* pipelined : pipelined_instrs) {
is_output_instruction[pipelined] = new_init_operands.size();
new_parameter_shapes.push_back(pipelined->shape());
new_root_operands.push_back(pipelined);
new_init_operands.push_back(while_body_to_peeled[pipelined]);
}
}
Shape loop_state_shape = ShapeUtil::MakeTupleShape(new_parameter_shapes);
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
InstructionMap pipelined_values_map_inloop;
InstructionMap pipelined_values_map_outloop;
replacements[loop_parameter] = HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes), "loop_peel_param");
replacements[while_loop->while_condition()->parameter_instructions()[0]] =
HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes),
"loop_peel_cond_param");
replacements[while_body->root_instruction()] =
HloInstruction::CreateTuple(new_root_operands);
HloComputation* new_while_condition =
loop_computation->parent()->AddEmbeddedComputation(
while_loop->while_condition()->CloneWithReplacements(&replacements));
HloComputation* new_while_body =
loop_computation->parent()->AddEmbeddedComputation(
while_body->CloneWithReplacements(&replacements));
for (HloInstruction* instruction : new_while_body->instructions()) {
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
instruction, false, CollectivePipeliner::PipeliningDirection::kForward,
loop_analysis));
}
HloInstruction* new_init = loop_computation->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
while_body_to_peeled[while_body->root_instruction()] = new_init;
TF_RETURN_IF_ERROR(UpdateControlDependencies(while_body->root_instruction(),
new_init, while_body_to_peeled));
HloInstruction* new_while_loop =
loop_computation->AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, new_while_condition, new_while_body, new_init));
TF_RETURN_IF_ERROR(
while_loop->ReplaceAllUsesWithDifferentShape(new_while_loop));
TF_RETURN_IF_ERROR(
loop_computation->RemoveInstructionAndUnusedOperands(while_loop));
WhileLoopAnalysis new_loop_analysis(
new_while_loop, loop_analysis.GetMaxPipeliningPerLoop(),
pipeline_use_tree, process_different_sized_ops,
nullptr,
nullptr,
loop_analysis.GetLoopStart()->add(*loop_analysis.GetLoopIncrement()));
new_loop_analysis.ComputeLoopStatistics();
new_loop_analysis.CollectCollectivesToMove(
level_to_operate_on, CollectivePipeliner::PipeliningDirection::kForward,
should_process, acceptable_formatting);
CHECK_EQ(new_loop_analysis.GetMoveInfos().size(),
loop_analysis.GetMoveInfos().size());
for (int64_t i = new_loop_tuple_operand_count;
i < new_parameter_shapes.size(); ++i) {
HloInstruction* pipelined_value_load_inloop =
new_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
new_while_body->parameter_instruction(0), i));
HloInstruction* pipelined_value_load_outloop =
loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_loop, i));
pipelined_values_map_inloop[new_while_body->root_instruction()->operand(
i)] = pipelined_value_load_inloop;
pipelined_values_map_outloop[new_while_body->root_instruction()->operand(
i)] = pipelined_value_load_outloop;
}
auto insert_slice = [](HloInstruction* to_insert, int64_t index_position,
int64_t num_indices, HloInstruction* dus_index,
HloInstruction* base) {
HloComputation* computation = to_insert->parent();
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(dus_index->shape().element_type())));
std::vector<HloInstruction*> indices(num_indices, zero);
indices[index_position] = dus_index;
return computation->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
base->shape(), base, to_insert, indices));
};
auto process_slice =
[&next_channel_id, insert_non_alias_custom_call, level_to_operate_on](
HloInstruction* stacked_data,
const InstructionMap& pipelined_values_map,
const WhileMoveInfo& move_info) -> absl::StatusOr<HloInstruction*> {
HloInstruction* processed = stacked_data->parent()->AddInstruction(
move_info.collectives_to_move.front()->CloneWithNewOperands(
move_info.collectives_to_move.front()->shape(), {stacked_data}));
UpdateInstructionChannelId(processed, next_channel_id);
if (insert_non_alias_custom_call) {
HloInstruction* level =
stacked_data->parent()->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0(level_to_operate_on + 1)));
processed = stacked_data->parent()->AddInstruction(
HloInstruction::CreateCustomCall(
processed->shape(), {processed, level},
CollectivePipeliner::kInsertedByPreviousStep));
}
InstructionMap cloned_map = pipelined_values_map;
cloned_map[move_info.collectives_to_move.front()] = processed;
for (auto* formatting_op : move_info.formatting_ops) {
auto new_operands = MapNewOperands(formatting_op->operands(), cloned_map);
processed = stacked_data->parent()->AddInstruction(
formatting_op->CloneWithNewOperands(formatting_op->shape(),
new_operands));
cloned_map[formatting_op] = processed;
}
return processed;
};
auto extract_and_process_slice =
[&process_slice](
HloInstruction* stacked_data, HloInstruction* data_to_slice,
const WhileMoveInfo& move_info,
const InstructionMap& pipelined_values_map,
HloInstruction* dus_index) -> absl::StatusOr<HloInstruction*> {
HloComputation* computation = stacked_data->parent();
const Shape& slice_target_shape =
move_info.collectives_to_move.front()->operand(0)->shape();
HloInstruction* sliced_data = data_to_slice;
HloDynamicUpdateSliceInstruction* dyn_update =
move_info.dynamic_update_slices.front();
PrimitiveType element_type =
dyn_update
->operand(dyn_update->first_index_operand_number() +
move_info.sliced_idx)
->shape()
.element_type();
HloInstruction* zero = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(element_type)));
std::vector<HloInstruction*> indices(
dyn_update->operand_count() - dyn_update->first_index_operand_number(),
zero);
indices[move_info.sliced_idx] = dus_index;
if (slice_target_shape != data_to_slice->shape()) {
absl::InlinedVector<int64_t, 4> dynamic_slice_sizes;
dynamic_slice_sizes.reserve(slice_target_shape.dimensions_size());
for (int i = 0; i < slice_target_shape.dimensions_size(); ++i) {
dynamic_slice_sizes.push_back(slice_target_shape.dimensions(i));
}
sliced_data =
computation->AddInstruction(HloInstruction::CreateDynamicSlice(
slice_target_shape, data_to_slice, indices, dynamic_slice_sizes));
}
TF_ASSIGN_OR_RETURN(
sliced_data,
process_slice(sliced_data, pipelined_values_map, move_info));
return computation->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
dyn_update->shape(), stacked_data, sliced_data, indices));
};
for (int i = 0; i < new_loop_analysis.GetMoveInfos().size(); ++i) {
auto& move_info = new_loop_analysis.GetMoveInfos()[i];
HloDynamicUpdateSliceInstruction* dyn_update =
move_info.dynamic_update_slices.front();
std::vector<HloInstruction*> loop_output_to_replace;
HloInstruction* parameter_instr =
new_while_body->parameter_instructions()[0];
for (auto* user : new_while_loop->users()) {
if (user->tuple_index() != move_info.output_indices[0]) {
continue;
}
loop_output_to_replace.push_back(user);
}
const HloInstruction* dus_index_curr_iteration = dyn_update->operand(
dyn_update->first_index_operand_number() + move_info.sliced_idx);
const int64_t offset_for_index =
new_loop_analysis.GetDUSIndex(dus_index_curr_iteration) +
initial_inputs;
Shape index_shape = dus_index_curr_iteration->shape();
HloInstruction* input_dus_idx =
new_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
index_shape, parameter_instr, offset_for_index));
if (insert_non_alias_custom_call) {
HloInstruction* level =
new_while_body->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0(level_to_operate_on + 1)));
input_dus_idx =
new_while_body->AddInstruction(HloInstruction::CreateCustomCall(
index_shape, {input_dus_idx, level},
CollectivePipeliner::kInsertedByPreviousStep));
}
HloInstruction* output_dus_idx =
loop_computation->AddInstruction(HloInstruction::CreateGetTupleElement(
index_shape, new_while_loop, offset_for_index));
HloInstruction* input_stacked_data = dyn_update->mutable_operand(0);
HloInstruction* output_stacked_data =
loop_computation->AddInstruction(HloInstruction::CreateGetTupleElement(
dyn_update->shape(), new_while_loop, move_info.output_indices[0]));
HloInstruction* input_data_to_slice = input_stacked_data;
HloInstruction* output_data_to_slice = output_stacked_data;
auto it = moves_requiring_special_output_to_idx.find(i);
if (it != moves_requiring_special_output_to_idx.end()) {
input_data_to_slice =
new_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
move_info.collectives_to_move.front()->operand(0)->shape(),
parameter_instr, it->second));
output_data_to_slice = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(
move_info.collectives_to_move.front()->operand(0)->shape(),
new_while_loop, it->second));
}
TF_ASSIGN_OR_RETURN(input_stacked_data,
extract_and_process_slice(
input_stacked_data, input_data_to_slice, move_info,
pipelined_values_map_inloop, input_dus_idx));
TF_ASSIGN_OR_RETURN(
output_stacked_data,
extract_and_process_slice(output_stacked_data, output_data_to_slice,
move_info, pipelined_values_map_outloop,
output_dus_idx));
auto replace_instructions_with =
[](absl::Span<HloInstruction*> to_replace_instrs,
HloInstruction* new_instr) {
for (auto* to_replace : to_replace_instrs) {
HloComputation* computation = to_replace->parent();
TF_RETURN_IF_ERROR(to_replace->ReplaceAllUsesWith(new_instr));
TF_RETURN_IF_ERROR(
computation->RemoveInstructionAndUnusedOperands(to_replace));
}
return absl::OkStatus();
};
auto* new_peeled_dus = input_stacked_data;
if (it == moves_requiring_special_output_to_idx.end()) {
new_peeled_dus = insert_slice(
move_info.collectives_to_move.front()->mutable_operand(0),
move_info.sliced_idx,
dyn_update->operand_count() -
dyn_update->first_index_operand_number(),
dyn_update->mutable_operand(dyn_update->first_index_operand_number() +
move_info.sliced_idx),
input_stacked_data);
}
TF_RETURN_IF_ERROR(dyn_update->ReplaceAllUsesWith(new_peeled_dus));
TF_RETURN_IF_ERROR(
new_while_body->RemoveInstructionAndUnusedOperands(dyn_update));
TF_RETURN_IF_ERROR(replace_instructions_with(
absl::MakeSpan(loop_output_to_replace), output_stacked_data));
}
TF_RETURN_IF_ERROR(loop_computation->parent()->RemoveUnusedComputations());
return absl::OkStatus();
}
absl::Status TransformLoopForwardSink(const WhileLoopAnalysis& loop_analysis,
bool insert_non_alias_custom_call,
int64_t level_to_operate_on,
bool pipeline_use_tree,
bool process_different_sized_ops,
HloPredicate should_process,
int64_t& next_channel_id) {
absl::flat_hash_map<HloInstruction*, int64_t> is_output_instruction;
absl::flat_hash_map<const HloInstruction*, bool> invariant_cache;
HloInstruction* while_loop = loop_analysis.while_loop_instruction();
HloComputation* while_body = while_loop->while_body();
CHECK_EQ(while_body->parameter_instructions().size(), 1)
<< "Expected only one parameter";
HloInstruction* loop_parameter = while_body->parameter_instructions()[0];
HloInstruction* loop_init = while_loop->mutable_operand(0);
for (HloInstruction* inst : while_body->root_instruction()->operands()) {
if (inst->opcode() == HloOpcode::kDynamicUpdateSlice &&
inst->operand(1)->IsCustomCall(
CollectivePipeliner::kSunkByPreviousStep)) {
HloInstruction* cc = inst->mutable_operand(1);
TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(1, cc->mutable_operand(0)));
TF_RETURN_IF_ERROR(cc->parent()->RemoveInstruction(cc));
}
}
CHECK_EQ(while_body->root_instruction()->opcode(), HloOpcode::kTuple);
for (int i = 0; i < while_body->root_instruction()->operand_count(); ++i) {
is_output_instruction[while_body->root_instruction()->mutable_operand(i)] =
i;
}
HloComputation* loop_computation = while_loop->parent();
HloComputation* body_computation = while_loop->while_body();
std::vector<HloInstruction*> new_init_operands;
std::vector<Shape> new_parameter_shapes;
std::vector<HloInstruction*> new_root_operands;
absl::flat_hash_set<int64_t> indices_to_insert;
const int64_t operands_indices_count = loop_init->operand_count();
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
new_parameter_shapes.resize(operands_indices_count);
new_root_operands.resize(operands_indices_count);
new_init_operands.resize(operands_indices_count);
absl::flat_hash_set<int64_t> original_to_move_indices;
VLOG(1) << "Initial size for " << body_computation->name() << ": "
<< operands_indices_count;
absl::flat_hash_map<HloInstruction*, int64_t> collective_to_new_tuple_index;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
for (HloInstruction* collective : to_move.collectives_to_move) {
Shape shape =
ComputeFullOutputShape(to_move, collective->operand(0)->shape());
new_init_operands.push_back(
CreateZero(loop_computation, shape, shape.element_type()));
new_parameter_shapes.push_back(shape);
collective_to_new_tuple_index[collective] = new_root_operands.size();
indices_to_insert.insert(new_root_operands.size());
new_root_operands.push_back(collective->mutable_operand(0));
}
CHECK_EQ(to_move.dynamic_update_slices.size(),
to_move.output_indices.size());
for (int64_t i = 0; i < to_move.dynamic_update_slices.size(); ++i) {
int64_t output_idx = to_move.output_indices[i];
original_to_move_indices.insert(output_idx);
}
}
for (int i = 0; i < loop_parameter->shape().tuple_shapes().size(); ++i) {
if (original_to_move_indices.contains(i)) {
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_init_operands[i] = loop_init->mutable_operand(i);
continue;
}
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_init_operands[i] = loop_init->mutable_operand(i);
new_root_operands[i] = while_body->root_instruction()->mutable_operand(i);
}
VLOG(1) << "Size of " << body_computation->name()
<< " after adding collectives: " << new_root_operands.size();
absl::flat_hash_set<HloInstruction*> added_pipelined;
for (auto& move_info : loop_analysis.GetMoveInfos()) {
auto pipelined_instrs = CollectDependenciesToPipeline(
absl::MakeSpan(move_info.collectives_to_move),
absl::MakeSpan(move_info.formatting_ops));
for (auto* pipelined : pipelined_instrs) {
if (pipelined->opcode() == HloOpcode::kConstant) {
continue;
}
if (added_pipelined.contains(pipelined)) {
continue;
}
const bool is_loop_invariant =
IsLoopInvariant(pipelined, invariant_cache);
is_output_instruction[pipelined] = new_init_operands.size();
if (is_loop_invariant) {
new_parameter_shapes.push_back(pipelined->shape());
new_init_operands.push_back(
CreateZero(loop_computation, pipelined->shape(),
pipelined->shape().element_type()));
new_root_operands.push_back(pipelined);
added_pipelined.insert(pipelined);
continue;
}
Shape expanded_shape =
ComputeFullOutputShape(move_info, pipelined->shape());
new_parameter_shapes.push_back(expanded_shape);
new_init_operands.push_back(CreateZero(loop_computation, expanded_shape,
expanded_shape.element_type()));
Shape extra_trivial_dim_shape =
ShapeUtil::PrependMajorDimension(1, pipelined->shape());
HloInstruction* reshaped = body_computation->AddInstruction(
HloInstruction::CreateReshape(extra_trivial_dim_shape, pipelined));
Shape index_shape =
move_info.dynamic_update_slices.front()->index_shapes()[0];
std::vector<HloInstruction*> indices(
expanded_shape.dimensions_size(),
CreateZero(body_computation, index_shape,
index_shape.element_type()));
indices[0] = move_info.dynamic_update_slices.front()->index_operands()[0];
HloInstruction* input =
body_computation->AddInstruction(HloInstruction::CreateCustomCall(
expanded_shape,
{body_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0((int32_t)new_root_operands.size())))},
"PlaceHolder"));
reshaped = body_computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(expanded_shape, input,
reshaped, indices));
new_root_operands.push_back(reshaped);
added_pipelined.insert(pipelined);
}
}
VLOG(1) << "Size of " << body_computation->name()
<< " after adding dependencies: " << new_root_operands.size();
std::unique_ptr<HloInstruction> new_parameter =
HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes),
absl::StrCat("sink_", loop_parameter->name()));
for (auto& to_move : loop_analysis.GetMoveInfos()) {
for (HloInstruction* collective : to_move.collectives_to_move) {
int64_t new_tuple_index = collective_to_new_tuple_index[collective];
HloInstruction* collective_operand = collective->mutable_operand(0);
HloInstruction* to_insert =
body_computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::PrependMajorDimension(1, collective_operand->shape()),
collective_operand));
Shape expanded_shape =
ComputeFullOutputShape(to_move, collective_operand->shape());
HloInstruction* input =
body_computation->AddInstruction(HloInstruction::CreateCustomCall(
expanded_shape,
{body_computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0((int32_t)new_tuple_index)))},
"PlaceHolder"));
HloDynamicUpdateSliceInstruction* dyn_update =
to_move.dynamic_update_slices[0];
std::vector<HloInstruction*> indices(
expanded_shape.dimensions_size(),
CreateZero(body_computation, dyn_update->index_shapes()[0],
dyn_update->index_shapes()[0].element_type()));
indices[0] = dyn_update->index_operands()[0];
to_insert = body_computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(expanded_shape, input,
to_insert, indices));
new_root_operands[new_tuple_index] = to_insert;
}
}
for (auto* p_user : body_computation->parameter_instructions()[0]->users()) {
CHECK_EQ(p_user->opcode(), HloOpcode::kGetTupleElement);
const int64_t tuple_idx = p_user->tuple_index();
if (!original_to_move_indices.contains(tuple_idx)) {
continue;
}
replacements[p_user] =
HloInstruction::CreateGetTupleElement(new_parameter.get(), tuple_idx);
std::vector<HloInstruction*> stack(p_user->users().begin(),
p_user->users().end());
new_root_operands[tuple_idx] = replacements[p_user].get();
while (!stack.empty()) {
auto* u = stack.back();
stack.pop_back();
replacements[u] = nullptr;
for (auto* user : u->users()) {
if (user == body_computation->root_instruction()) {
continue;
}
stack.push_back(user);
}
}
}
std::unique_ptr<HloInstruction> new_root_instr =
HloInstruction::CreateTuple(new_root_operands);
replacements[body_computation->parameter_instruction(0)] =
std::move(new_parameter);
replacements[body_computation->root_instruction()] =
std::move(new_root_instr);
replacements[while_loop->while_condition()->parameter_instruction(0)] =
HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes),
absl::StrCat(
"sink_",
while_loop->while_condition()->parameter_instruction(0)->name()));
HloInstruction* new_init = loop_computation->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
HloComputation* cloned_body =
body_computation->parent()->AddEmbeddedComputation(
body_computation->CloneWithReplacements(&replacements));
HloComputation* cloned_cond =
body_computation->parent()->AddEmbeddedComputation(
while_loop->while_condition()->CloneWithReplacements(&replacements));
for (int64_t i = 0; i < cloned_body->root_instruction()->operand_count();
++i) {
HloInstruction* output =
cloned_body->root_instruction()->mutable_operand(i);
if (output->opcode() != HloOpcode::kDynamicUpdateSlice) {
continue;
}
if (!output->operand(0)->IsCustomCall("PlaceHolder")) {
continue;
}
auto idx = Cast<HloConstantInstruction>(output->operand(0)->operand(0))
->literal()
.GetFirstInteger();
auto* new_param =
cloned_body->AddInstruction(HloInstruction::CreateGetTupleElement(
output->shape(), cloned_body->parameter_instruction(0), *idx));
HloInstruction* old_operand_param = output->mutable_operand(0);
TF_RETURN_IF_ERROR(output->ReplaceOperandWith(0, new_param));
TF_RETURN_IF_ERROR(
old_operand_param->parent()->RemoveInstruction(old_operand_param));
if (insert_non_alias_custom_call && indices_to_insert.contains(i)) {
auto* old_operand = output->mutable_operand(1);
auto* custom_call =
cloned_body->AddInstruction(HloInstruction::CreateCustomCall(
old_operand->shape(), {old_operand},
CollectivePipeliner::kSunkByPreviousStep));
TF_RETURN_IF_ERROR(output->ReplaceOperandWith(1, custom_call));
}
}
HloInstruction* new_while =
loop_computation->AddInstruction(HloInstruction::CreateWhile(
new_init->shape(), cloned_cond, cloned_body, new_init));
std::vector<HloInstruction*> new_output_tuple;
new_output_tuple.resize(operands_indices_count, nullptr);
InstructionMap pipelined_map;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
for (int64_t i = 0; i < to_move.collectives_to_move.size(); ++i) {
HloInstruction* collective = to_move.collectives_to_move[i];
int64_t gte_index = collective_to_new_tuple_index[collective];
HloInstruction* to_sink = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while, gte_index));
pipelined_map[collective->mutable_operand(0)] = to_sink;
}
const int64_t new_dim_limit =
to_move.dynamic_update_slices[0]->shape().dimensions(0);
auto pipelined_instrs = CollectDependenciesToPipeline(
absl::MakeSpan(to_move.collectives_to_move),
absl::MakeSpan(to_move.formatting_ops));
for (auto* original_pipelined : pipelined_instrs) {
if (original_pipelined->opcode() == HloOpcode::kConstant) {
continue;
}
const bool is_loop_invariant =
IsLoopInvariant(original_pipelined, invariant_cache);
CHECK(is_output_instruction.contains(original_pipelined));
int64_t pipelined_idx = is_output_instruction[original_pipelined];
HloInstruction* pipelined = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while, pipelined_idx));
if (is_loop_invariant) {
Shape full_shape = ComputeFullOutputShape(to_move, pipelined->shape());
absl::InlinedVector<int64_t, 4> operand_dims;
operand_dims.resize(pipelined->shape().dimensions_size());
absl::c_iota(operand_dims, 1);
HloInstruction* broadcasted =
loop_computation->AddInstruction(HloInstruction::CreateBroadcast(
full_shape, pipelined, operand_dims));
pipelined_map[original_pipelined] = broadcasted;
} else {
pipelined_map[original_pipelined] = pipelined;
}
}
for (HloInstruction* collective : to_move.collectives_to_move) {
HloInstruction* pipelined_instr_cloned =
loop_computation->AddInstruction(collective->CloneWithNewOperands(
ComputeFullOutputShape(to_move, collective->shape()),
{pipelined_map[collective->mutable_operand(0)]}));
UpdateInstructionChannelId(pipelined_instr_cloned, next_channel_id);
pipelined_map[collective] = pipelined_instr_cloned;
}
absl::flat_hash_set<HloInstruction*> to_add_batch_set;
auto collect_operands = [&pipelined_map, &to_add_batch_set,
loop_computation,
&to_move](HloInstruction* instr) {
std::vector<HloInstruction*> operands;
for (auto* operand : instr->mutable_operands()) {
if (operand->opcode() == HloOpcode::kConstant) {
HloInstruction* cloned_constant = loop_computation->AddInstruction(
operand->CloneWithNewOperands(operand->shape(), {}));
if (!to_add_batch_set.contains(instr)) {
operands.push_back(cloned_constant);
continue;
}
Shape full_shape =
ComputeFullOutputShape(to_move, cloned_constant->shape());
absl::InlinedVector<int64_t, 4> operand_dims;
operand_dims.resize(cloned_constant->shape().dimensions_size());
absl::c_iota(operand_dims, 1);
HloInstruction* broadcasted =
loop_computation->AddInstruction(HloInstruction::CreateBroadcast(
full_shape, cloned_constant, operand_dims));
operands.push_back(broadcasted);
continue;
}
auto it = pipelined_map.find(operand);
CHECK(it != pipelined_map.end());
operands.push_back(it->second);
}
return operands;
};
for (auto* current : to_move.formatting_ops) {
if (IsLoopInvariant(current, invariant_cache)) {
continue;
}
to_add_batch_set.insert(current);
}
for (HloInstruction* formatting_op : to_move.formatting_ops) {
if (pipelined_map.contains(formatting_op)) {
continue;
}
if (!to_add_batch_set.contains(formatting_op) &&
formatting_op->opcode() != HloOpcode::kBroadcast) {
HloInstruction* cloned_not_to_batch = loop_computation->AddInstruction(
formatting_op->CloneWithNewOperands(
formatting_op->shape(), collect_operands(formatting_op)));
UpdateInstructionChannelId(cloned_not_to_batch, next_channel_id);
pipelined_map[formatting_op] = cloned_not_to_batch;
continue;
}
if (formatting_op->IsElementwise() ||
formatting_op->opcode() == HloOpcode::kReshape ||
formatting_op->opcode() == HloOpcode::kAllReduce ||
formatting_op->opcode() == HloOpcode::kConvert ||
formatting_op->opcode() == HloOpcode::kCollectivePermute) {
HloInstruction* cloned_elementwise = loop_computation->AddInstruction(
formatting_op->CloneWithNewOperands(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collect_operands(formatting_op)));
pipelined_map[formatting_op] = cloned_elementwise;
continue;
}
if (formatting_op->opcode() == HloOpcode::kReduce) {
auto operands = collect_operands(formatting_op);
std::vector<int64_t> dimensions(formatting_op->dimensions().begin(),
formatting_op->dimensions().end());
for (auto& dim : dimensions) {
++dim;
}
if (operands[1]->opcode() == HloOpcode::kBroadcast) {
CHECK(operands[1]->operand(0)->opcode() == HloOpcode::kConstant);
operands[1] = operands[1]->mutable_operand(0);
}
HloInstruction* expanded_reduce =
loop_computation->AddInstruction(HloInstruction::CreateReduce(
ComputeFullOutputShape(to_move, formatting_op->shape()),
operands[0], operands[1], dimensions,
formatting_op->to_apply()));
pipelined_map[formatting_op] = expanded_reduce;
continue;
}
if (formatting_op->opcode() == HloOpcode::kBroadcast) {
auto operands = collect_operands(formatting_op);
std::vector<int64_t> dimensions(1, 0);
for (const int64_t dim : formatting_op->dimensions()) {
dimensions.push_back(dim + 1);
}
if (operands[0]->shape().dimensions_size() == 0) {
dimensions.clear();
}
HloInstruction* expanded_broadcast =
loop_computation->AddInstruction(HloInstruction::CreateBroadcast(
ComputeFullOutputShape(to_move, formatting_op->shape()),
operands[0], dimensions));
pipelined_map[formatting_op] = expanded_broadcast;
continue;
}
if (formatting_op->opcode() == HloOpcode::kSlice) {
std::vector<int64_t> slice_start = formatting_op->slice_starts();
std::vector<int64_t> slice_limits = formatting_op->slice_limits();
std::vector<int64_t> slice_strides = formatting_op->slice_strides();
slice_start.insert(slice_start.begin(), 0);
slice_limits.insert(slice_limits.begin(), new_dim_limit);
slice_strides.insert(slice_strides.begin(), 1);
HloInstruction* expanded_slice =
loop_computation->AddInstruction(HloInstruction::CreateSlice(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collect_operands(formatting_op)[0], slice_start, slice_limits,
slice_strides));
pipelined_map[formatting_op] = expanded_slice;
continue;
}
if (formatting_op->opcode() == HloOpcode::kDynamicSlice) {
std::vector<int64_t> dynamic_slice_sizes =
formatting_op->dynamic_slice_sizes();
dynamic_slice_sizes.insert(dynamic_slice_sizes.begin(), new_dim_limit);
HloDynamicSliceInstruction* dynslice =
Cast<HloDynamicSliceInstruction>(formatting_op);
HloInstruction* zero = loop_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(
formatting_op->operand(dynslice->first_index_operand_number())
->shape()
.element_type())));
std::vector<HloInstruction*> indices(1, zero);
auto collected_operands = collect_operands(formatting_op);
indices.insert(indices.end(), std::next(collected_operands.begin()),
collected_operands.end());
HloInstruction* expanded_dynslice =
loop_computation->AddInstruction(HloInstruction::CreateDynamicSlice(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collected_operands[0], indices, dynamic_slice_sizes));
pipelined_map[formatting_op] = expanded_dynslice;
continue;
}
if (formatting_op->opcode() == HloOpcode::kPad) {
HloPadInstruction* pad_instruction =
Cast<HloPadInstruction>(formatting_op);
PaddingConfig p_config = pad_instruction->padding_config();
PaddingConfig new_p_config;
new_p_config.add_dimensions();
for (auto& dim : p_config.dimensions()) {
auto* new_dim = new_p_config.add_dimensions();
*new_dim = dim;
}
auto new_operands = collect_operands(formatting_op);
HloInstruction* expanded_pad =
loop_computation->AddInstruction(HloInstruction::CreatePad(
ComputeFullOutputShape(to_move, formatting_op->shape()),
new_operands[0], new_operands[1], new_p_config));
pipelined_map[formatting_op] = expanded_pad;
continue;
}
if (formatting_op->opcode() == HloOpcode::kTranspose) {
HloTransposeInstruction* transpose_instruction =
Cast<HloTransposeInstruction>(formatting_op);
std::vector<int64_t> new_dims(
transpose_instruction->dimensions().begin(),
transpose_instruction->dimensions().end());
new_dims.insert(new_dims.begin(), 0);
for (int64_t& dim : new_dims) {
++dim;
}
HloInstruction* expanded_transpose =
loop_computation->AddInstruction(HloInstruction::CreateTranspose(
ComputeFullOutputShape(to_move, formatting_op->shape()),
collect_operands(formatting_op)[0], new_dims));
pipelined_map[formatting_op] = expanded_transpose;
continue;
}
CHECK(false) << "Unsupported instruction " << formatting_op->ToString();
}
for (int64_t i = 0; i < to_move.output_indices.size(); ++i) {
HloDynamicUpdateSliceInstruction* d_update =
to_move.dynamic_update_slices[i];
HloInstruction* inserted_operand = d_update->mutable_operand(1);
CHECK(pipelined_map.contains(inserted_operand))
<< "Expected to be processed";
HloInstruction* expanded_inserted = pipelined_map[inserted_operand];
if (!ShapeUtil::Compatible(expanded_inserted->shape(),
d_update->shape())) {
expanded_inserted =
loop_computation->AddInstruction(HloInstruction::CreateReshape(
d_update->shape(), expanded_inserted));
}
new_output_tuple[to_move.output_indices[i]] = expanded_inserted;
}
}
for (int64_t i = 0; i < operands_indices_count; ++i) {
if (new_output_tuple[i] != nullptr) {
continue;
}
new_output_tuple[i] = loop_computation->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while, i));
}
HloInstruction* new_tuple = loop_computation->AddInstruction(
HloInstruction::CreateTuple(new_output_tuple));
TF_RETURN_IF_ERROR(while_loop->ReplaceAllUsesWithDifferentShape(new_tuple));
TF_RETURN_IF_ERROR(
loop_computation->RemoveInstructionAndUnusedOperands(while_loop));
TF_RETURN_IF_ERROR(loop_computation->parent()->RemoveUnusedComputations());
return absl::OkStatus();
}
static absl::Status TransformLoopBackward(
const WhileLoopAnalysis& loop_analysis, bool insert_non_alias_custom_call,
int64_t level_to_operate_on, bool process_different_sized_ops,
HloPredicate should_process, HloPredicate acceptable_formatting,
CollectivePipeliner::HloPostprocessor postprocess_peeled,
CollectivePipeliner::HloPostprocessor postprocess_rotated,
int64_t& next_channel_id) {
absl::flat_hash_map<HloInstruction*, HloInstruction*> while_body_to_peeled;
absl::flat_hash_map<HloInstruction*, int64_t> collective_to_move_map;
absl::flat_hash_set<HloInstruction*> is_pipelined_instruction;
absl::flat_hash_map<HloInstruction*, int64_t> is_output_instruction;
absl::flat_hash_set<const HloInstruction*> sideeffect_unused_instructions;
int64_t count = 0;
for (auto& to_move : loop_analysis.GetMoveInfos()) {
CHECK_EQ(to_move.collectives_to_move.size(), 1);
HloInstruction* instr = to_move.collectives_to_move[0];
collective_to_move_map[instr] = count;
is_pipelined_instruction.insert(instr);
is_pipelined_instruction.insert(to_move.formatting_ops.begin(),
to_move.formatting_ops.end());
++count;
if (instr->operand_count() == 1) {
const HloInstruction* opnd = instr->operand(0);
if (opnd->HasSideEffect() && opnd->user_count() == 1) {
sideeffect_unused_instructions.insert(opnd);
}
}
}
HloInstruction* while_loop = loop_analysis.while_loop_instruction();
HloComputation* while_body = while_loop->while_body();
CHECK_EQ(while_body->parameter_instructions().size(), 1)
<< "Expected only one parameter";
HloInstruction* loop_parameter = while_body->parameter_instructions()[0];
HloInstruction* loop_initial_iteration_idx =
while_loop->mutable_operand(0)->mutable_operand(
*loop_analysis.GetLoopIterationIdx());
while_body_to_peeled[loop_parameter] = while_loop;
CHECK_EQ(while_body->root_instruction()->opcode(), HloOpcode::kTuple);
for (int i = 0; i < while_body->root_instruction()->operand_count(); ++i) {
is_output_instruction[while_body->root_instruction()->mutable_operand(i)] =
i;
}
std::vector<HloInstruction*> new_init_operands;
std::vector<Shape> new_parameter_shapes;
std::vector<HloInstruction*> new_root_operands;
const int64_t operands_indices_count =
while_loop->shape().tuple_shapes_size() +
loop_analysis.GetMoveInfos().size() + 1;
new_parameter_shapes.resize(operands_indices_count);
new_root_operands.resize(operands_indices_count);
new_init_operands.resize(operands_indices_count);
for (int i = 0; i < loop_parameter->shape().tuple_shapes_size(); ++i) {
new_parameter_shapes[i] = loop_parameter->shape().tuple_shapes(i);
new_root_operands[i] = while_body->root_instruction()->mutable_operand(i);
new_init_operands[i] = while_loop->mutable_operand(0)->mutable_operand(i);
}
InstructionMap chain_clone_map;
chain_clone_map[loop_parameter] = while_loop->mutable_operand(0);
for (auto* u : loop_parameter->users()) {
if (IsLoopIterator(u, *loop_analysis.GetLoopIterationIdx())) {
chain_clone_map[u] = loop_initial_iteration_idx;
}
}
for (int i = 0; i < loop_analysis.GetMoveInfos().size(); ++i) {
const int64_t idx = i + loop_parameter->shape().tuple_shapes_size();
new_parameter_shapes[idx] =
loop_analysis.GetMoveInfos()[i].collectives_to_move[0]->shape();
new_root_operands[idx] =
loop_analysis.GetMoveInfos()[i].collectives_to_move[0];
TF_ASSIGN_OR_RETURN(
new_init_operands[idx],
CloneBackwardChain(*while_loop->parent(),
loop_analysis.GetMoveInfos()[i], chain_clone_map,
*loop_analysis.GetLoopIterationIdx(),
next_channel_id));
if (postprocess_peeled.has_value()) {
TF_RETURN_IF_ERROR(postprocess_peeled.value()(new_init_operands[idx]));
}
}
ConstantValue next_loop_iteration =
loop_analysis.GetLoopStart()->add(*loop_analysis.GetLoopIncrement());
const Shape& loop_index_shape =
while_loop->shape().tuple_shapes(*loop_analysis.GetLoopIterationIdx());
HloInstruction* next_iteration_idx = while_loop->parent()->AddInstruction(
HloInstruction::CreateConstant(*CreateLiteralOfShape(
loop_index_shape, next_loop_iteration.GetSignedValue())));
new_parameter_shapes.back() = loop_parameter->shape().tuple_shapes(
*loop_analysis.GetLoopIterationIdx());
new_init_operands.back() = next_iteration_idx;
auto body_builder = HloComputation::Builder(while_body->name());
HloInstruction* new_loop_param =
body_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes), "param"));
HloInstruction* loop_iterator_for_pipelined_instrs =
body_builder.AddInstruction(HloInstruction::CreateGetTupleElement(
new_loop_param, new_init_operands.size() - 1));
InstructionMap while_body_replacement_map;
while_body_replacement_map[loop_parameter] = new_loop_param;
InstructionMap collective_to_move_clone_map;
collective_to_move_clone_map[loop_parameter] = new_loop_param;
for (auto* u : loop_parameter->users()) {
if (IsLoopIterator(u, *loop_analysis.GetLoopIterationIdx())) {
collective_to_move_clone_map[u] = loop_iterator_for_pipelined_instrs;
}
}
LoopVariantParameterInfo loop_variant_parameter_info;
for (auto* instr : while_body->MakeInstructionPostOrder()) {
if (instr == loop_parameter || instr == while_body->root_instruction() ||
sideeffect_unused_instructions.contains(instr)) {
continue;
}
HloInstruction* cloned_instr = nullptr;
auto it = collective_to_move_map.find(instr);
if (it != collective_to_move_map.end()) {
TF_ASSIGN_OR_RETURN(
cloned_instr,
CloneBackwardChain(body_builder,
loop_analysis.GetMoveInfos()[it->second],
collective_to_move_clone_map,
*loop_analysis.GetLoopIterationIdx(),
next_channel_id, &loop_variant_parameter_info));
if (postprocess_rotated.has_value()) {
TF_RETURN_IF_ERROR(postprocess_rotated.value()(cloned_instr));
}
} else {
auto new_operands =
MapNewOperands(instr->operands(), while_body_replacement_map);
cloned_instr = body_builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(instr, cloned_instr,
while_body_replacement_map));
UpdateInstructionChannelId(cloned_instr, next_channel_id);
}
if (it != collective_to_move_map.end()) {
const int64_t tuple_idx =
while_loop->shape().tuple_shapes_size() + it->second;
HloInstruction* pipelined_value = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(new_loop_param, tuple_idx));
while_body_replacement_map[instr] = pipelined_value;
new_root_operands[tuple_idx] = cloned_instr;
continue;
}
while_body_replacement_map[instr] = cloned_instr;
}
for (const auto& [idx, value] : loop_variant_parameter_info) {
auto it = while_body_replacement_map.find(new_root_operands[idx]);
CHECK(it != while_body_replacement_map.end())
<< new_root_operands[idx]->ToString() << " not present in map";
TF_RETURN_IF_ERROR(value->ReplaceAllUsesWith(it->second));
}
new_root_operands.back() =
body_builder.AddInstruction(HloInstruction::CreateBinary(
loop_index_shape, HloOpcode::kAdd,
while_body_replacement_map
[new_root_operands[*loop_analysis.GetLoopIterationIdx()]],
body_builder.AddInstruction(
HloInstruction::CreateConstant(*CreateLiteralOfShape(
loop_index_shape, next_loop_iteration.GetSignedValue())))));
HloInstruction* new_loop_root =
body_builder.AddInstruction(HloInstruction::CreateTuple(
MapNewOperands(new_root_operands, while_body_replacement_map,
true)));
while_body_replacement_map[while_body->root_instruction()] = new_loop_root;
HloComputation* new_while_body =
while_loop->GetModule()->AddEmbeddedComputation(
body_builder.Build(new_loop_root));
TF_RETURN_IF_ERROR(UpdateControlDependencies(while_body->root_instruction(),
new_loop_root,
while_body_replacement_map));
for (HloInstruction* instruction : new_while_body->instructions()) {
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
instruction, false, CollectivePipeliner::PipeliningDirection::kBackward,
loop_analysis));
}
auto cond_builder =
HloComputation::Builder(while_loop->while_condition()->name());
HloInstruction* new_cond_param =
cond_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape(new_parameter_shapes), "cond_param"));
HloInstruction* loop_bound = cond_builder.AddInstruction(
HloInstruction::CreateConstant(*CreateLiteralOfShape(
loop_initial_iteration_idx->shape(),
loop_analysis.GetLoopStart()
->add(loop_analysis.GetLoopIterationCount()
->sub(ConstantValue::GetOne(
loop_analysis.GetLoopStart()->GetBitwidth(),
loop_analysis.GetLoopStart()->IsSigned()))
.mul(*loop_analysis.GetLoopIncrement()))
.GetSignedValue())));
ComparisonDirection cd =
loop_analysis.GetLoopIncrement()->GetSignedValue() > 0
? ComparisonDirection::kLt
: ComparisonDirection::kGt;
HloInstruction* loop_iterator =
cond_builder.AddInstruction(HloInstruction::CreateGetTupleElement(
new_cond_param, *loop_analysis.GetLoopIterationIdx()));
HloInstruction* comparison =
cond_builder.AddInstruction(HloInstruction::CreateCompare(
while_loop->while_condition()->root_instruction()->shape(),
loop_iterator, loop_bound, cd));
HloComputation* new_while_condition =
while_loop->GetModule()->AddEmbeddedComputation(
cond_builder.Build(comparison));
HloInstruction* new_loop_init = while_loop->parent()->AddInstruction(
HloInstruction::CreateTuple(new_init_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(while_body->root_instruction(),
new_loop_init, chain_clone_map));
HloInstruction* new_while_loop =
while_loop->parent()->AddInstruction(HloInstruction::CreateWhile(
new_while_body->root_instruction()->shape(), new_while_condition,
new_while_body, new_loop_init));
while_body_replacement_map.clear();
while_body_replacement_map[loop_parameter] = new_while_loop;
std::vector<HloInstruction*> output_tuple_instructions(
while_loop->shape().tuple_shapes_size(), nullptr);
for (auto* instr : while_body->MakeInstructionPostOrder()) {
if (instr == loop_parameter || instr == while_body->root_instruction() ||
sideeffect_unused_instructions.contains(instr)) {
continue;
}
auto instruction_is_output_it = is_output_instruction.find(instr);
auto it = collective_to_move_map.find(instr);
if (it != collective_to_move_map.end()) {
const int64_t tuple_idx =
while_loop->shape().tuple_shapes_size() + it->second;
HloInstruction* pipelined_value = while_loop->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(new_while_loop, tuple_idx));
while_body_replacement_map[instr] = pipelined_value;
if (instruction_is_output_it != is_output_instruction.end()) {
output_tuple_instructions[instruction_is_output_it->second] =
pipelined_value;
}
continue;
}
auto new_operands =
MapNewOperands(instr->operands(), while_body_replacement_map);
HloInstruction* cloned_instr = while_loop->parent()->AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
TF_RETURN_IF_ERROR(UpdateControlDependencies(instr, cloned_instr,
while_body_replacement_map));
UpdateInstructionChannelId(cloned_instr, next_channel_id);
TF_RETURN_IF_ERROR(UpdateSendRecvValidation(
cloned_instr, true, CollectivePipeliner::PipeliningDirection::kBackward,
loop_analysis));
while_body_replacement_map[instr] = cloned_instr;
if (instruction_is_output_it != is_output_instruction.end()) {
output_tuple_instructions[instruction_is_output_it->second] =
cloned_instr;
}
}
HloInstruction* final_loop_output = while_loop->parent()->AddInstruction(
HloInstruction::CreateTuple(output_tuple_instructions));
HloComputation* loop_computation = while_loop->parent();
TF_RETURN_IF_ERROR(
while_loop->ReplaceAllUsesWithDifferentShape(final_loop_output));
TF_RETURN_IF_ERROR(
loop_computation->RemoveInstructionAndUnusedOperands(while_loop));
TF_RETURN_IF_ERROR(loop_computation->parent()->RemoveUnusedComputations());
return absl::OkStatus();
}
absl::StatusOr<bool> CollectivePipeliner::RunPipeliner(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to_analysis,
TuplePointsToAnalysis::Run(module));
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
std::vector<std::pair<HloInstruction*, std::unique_ptr<WhileLoopAnalysis>>>
loop_analyses;
for (HloComputation* computation : module->MakeComputationPostOrder()) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() != HloOpcode::kWhile) {
continue;
}
if (std::none_of(instruction->while_body()->instructions().begin(),
instruction->while_body()->instructions().end(),
config_.should_process)) {
continue;
}
VLOG(1) << "Pipelinable while: " << instruction->name();
auto loop_analysis = std::make_unique<WhileLoopAnalysis>(
instruction, config_.max_pipelining_per_loop,
config_.pipeline_use_tree, config_.process_different_sized_ops,
tuple_points_to_analysis.get(), call_graph.get());
loop_analysis->ComputeLoopStatistics();
if (loop_analysis->GetLoopIterationCount() &&
loop_analysis->GetLoopIterationCount()->GetUnsignedValue() > 0) {
loop_analyses.push_back(
std::make_pair(instruction, std::move(loop_analysis)));
}
}
}
int64_t transformed_loops = 0;
int64_t transformed_instructions = 0;
int64_t next_channel_id = hlo_query::NextChannelId(*module);
VLOG(1) << "Pipelining on direction: "
<< GetPipelineDirectionString(config_.pipelining_direction);
for (auto& [instruction, loop_analysis] : loop_analyses) {
VLOG(1) << "While iterations: "
<< loop_analysis->GetLoopIterationCount()->ToString();
loop_analysis->CollectCollectivesToMove(
config_.level_to_operate_on, config_.pipelining_direction,
config_.should_process, config_.acceptable_formatting,
config_.should_allow_loop_variant_parameter_in_chain,
config_.should_allow_control_dependencies,
config_.should_add_loop_invariant_op_in_chain);
if (loop_analysis->GetMoveInfos().empty()) {
continue;
}
transformed_instructions += loop_analysis->GetMoveInfos().size();
VLOG(1) << "Found Collectives to optimize";
if (VLOG_IS_ON(1)) {
int64_t id = 0;
for (auto& to_move : loop_analysis->GetMoveInfos()) {
VLOG(1) << "MoveInfo #" << id++ << "\n" << ToString(to_move);
}
}
if (config_.pipelining_direction == PipeliningDirection::kForward) {
CHECK(config_.reuse_pipelined_op_buffer);
TF_RETURN_IF_ERROR(TransformLoopForward(
*loop_analysis, !config_.last_run, config_.level_to_operate_on,
config_.pipeline_use_tree, config_.process_different_sized_ops,
config_.should_process, config_.acceptable_formatting,
config_.reuse_pipelined_op_buffer, next_channel_id));
} else if (config_.pipelining_direction ==
PipeliningDirection::kForwardSink) {
TF_RETURN_IF_ERROR(TransformLoopForwardSink(
*loop_analysis, !config_.last_run, config_.level_to_operate_on,
config_.pipeline_use_tree, config_.process_different_sized_ops,
config_.should_process, next_channel_id));
} else {
CHECK_EQ(config_.pipelining_direction, PipeliningDirection::kBackward);
TF_RETURN_IF_ERROR(TransformLoopBackward(
*loop_analysis, !config_.last_run, config_.level_to_operate_on,
config_.process_different_sized_ops, config_.should_process,
config_.acceptable_formatting, config_.postprocess_backward_peeled_op,
config_.postprocess_backward_rotated_op, next_channel_id));
}
++transformed_loops;
changed = true;
}
if (config_.last_run) {
std::vector<HloInstruction*> to_remove;
for (HloComputation* computation : module->MakeComputationPostOrder()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->IsCustomCall(
CollectivePipeliner::kInsertedByPreviousStep)) {
to_remove.push_back(instruction);
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(instruction->mutable_operand(0)));
changed = true;
}
}
}
for (auto* instruction : to_remove) {
TF_RETURN_IF_ERROR(
instruction->parent()->RemoveInstructionAndUnusedOperands(
instruction));
}
}
VLOG(1) << "Transformed loops: " << transformed_loops
<< " and transformed instructions: " << transformed_instructions
<< " for pipelining direction: "
<< GetPipelineDirectionString(config_.pipelining_direction);
if (changed) {
TF_RETURN_IF_ERROR(HloDCE().Run(module, execution_threads).status());
}
return changed;
}
absl::StatusOr<bool> CollectivePipeliner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
CHECK(config_.acceptable_formatting);
CHECK(config_.should_process);
if (config_.pipelining_direction != PipeliningDirection::kForwardSink) {
return RunPipeliner(module, execution_threads);
}
bool changed = true;
int64_t iter = 0;
while (changed) {
TF_ASSIGN_OR_RETURN(changed, RunPipeliner(module, execution_threads));
VLOG(1) << "Finished running pipeliner's iteration: " << iter;
iter++;
}
return iter > 1;
}
} | #include "xla/service/collective_pipeliner.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/host_memory_offload_annotations.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace {
using ::testing::_;
namespace op = xla::testing::opcode_matchers;
class CollectivePipelinerTest : public HloTestBase {
public:
CollectivePipelinerTest() {
const int64_t kNumReplicas = 4;
const int64_t kNumComputations = 2;
config_ = GetModuleConfigForTest(kNumReplicas,
kNumComputations);
}
protected:
const HloPredicate IsAllGather = HloPredicateIsOp<HloOpcode::kAllGather>;
HloModuleConfig config_;
};
absl::StatusOr<bool> RunOptimizer(
HloModule* module, bool last_run, int64_t level_to_operate_on = 0,
bool pipeline_use_tree = false, bool process_different_sized_ops = true,
CollectivePipeliner::PipeliningDirection direction =
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicate should_process = HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicate acceptable_formatting = HloPredicateTrue,
HloPredicate reuse_pipelined_op_buffer = HloPredicateTrue,
HloPredicate should_allow_loop_variant_parameter_in_chain =
HloPredicateFalse,
CollectivePipeliner::HloPostprocessor postprocess_backward_peeled =
std::nullopt,
CollectivePipeliner::HloPostprocessor postprocess_backward_rotated =
std::nullopt,
bool should_add_loop_invariant_op_in_chain = false) {
CollectivePipeliner::Config config = {
level_to_operate_on,
INT64_MAX,
last_run,
pipeline_use_tree,
process_different_sized_ops,
direction,
should_process,
acceptable_formatting,
reuse_pipelined_op_buffer,
should_allow_loop_variant_parameter_in_chain,
false, postprocess_backward_peeled,
postprocess_backward_rotated, should_add_loop_invariant_op_in_chain};
HloPassPipeline pass("optimizer");
pass.AddPass<HloVerifier>(false,
false);
pass.AddPass<CollectivePipeliner>(config);
pass.AddPass<HloVerifier>(false,
false);
return pass.Run(module);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOne) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(1);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(1);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, BitcastAsUser) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
current-loop-index = s32[] get-tuple-element(param), index=0
output-buffer = bf16[3,8,128] get-tuple-element(param), index=1
input-buffer = bf16[3,8,128] get-tuple-element(param), index=2
constant.1 = s32[] constant(1)
next-loop-index = s32[] add(current-loop-index, constant.1)
constant.0 = s32[] constant(0)
sliced-input-buffer = bf16[1,8,128] dynamic-slice(input-buffer, current-loop-index, constant.0, constant.0), dynamic_slice_sizes={1,8,128}
all-reduce = bf16[1,8,128] all-reduce(sliced-input-buffer), replica_groups={}, to_apply=add, channel_id=1
bitcast.0 = u16[3,8,128] bitcast(all-reduce)
bitcast.1 = bf16[3,8,128] bitcast(bitcast.0)
dynamic-update-slice = bf16[3,8,128] dynamic-update-slice(output-buffer, bitcast.1, current-loop-index, constant.0, constant.0)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(next-loop-index, dynamic-update-slice, input-buffer)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::Bitcast(), _, _, _));
const HloInstruction* cast_back = root->operand(1);
EXPECT_EQ(cast_back->opcode(), HloOpcode::kBitcast);
const HloInstruction* cast_to = cast_back->operand(0);
EXPECT_EQ(cast_to->opcode(), HloOpcode::kBitcast);
const HloInstruction* ar = cast_to->operand(0);
EXPECT_EQ(ar->opcode(), HloOpcode::kAllReduce);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.5), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,0}},
frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9},{4,10},{5,11},{6,12},{7,13}}"}
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(1);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneCollectivePermuteBackwardCycle) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.5), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}},
frontend_attributes={_xla_send_recv_validation="{{7,13},{6,12},{5,11},{4,10},{3,9},{2,8},{1,7},{0,6}}"}
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(1);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest, UpdateSendRecvChannelIdForHostTransfers) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
after-all = after-all()
send.88 = (s32[], u32[], token[]) send(
add.232, after-all), channel_id=2, is_host_transfer=true
send-done.88 = token[] send-done(send.88), channel_id=2, is_host_transfer=true
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
auto* entry_comp = module->entry_computation();
auto* unrolled_send_done = entry_comp->GetInstructionWithName("send-done.0");
ASSERT_THAT(unrolled_send_done, ::testing::NotNull());
auto* unrolled_send = unrolled_send_done->operand(0);
auto channel_id = [](const HloInstruction* instr) {
return DynCast<HloChannelInstruction>(instr)->channel_id();
};
EXPECT_EQ(channel_id(unrolled_send), channel_id(unrolled_send_done));
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNoReuse) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(
module.get(), true, 0, false, true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kAllReduce>,
[](const HloInstruction* i) { return true; },
[](const HloInstruction* i) { return false; })
.value());
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_EQ(while_instr->shape().tuple_shapes_size(), 5);
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNotFirstIdx) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[8,3,128], bf16[8,3,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[8,3,128], bf16[8,3,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[8,3,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[8,3,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[8,1,128] dynamic-slice(get-tuple-element.5, constant.2561, select.1348, constant.2561), dynamic_slice_sizes={8,1,128}
mul = bf16[8,1,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[8,1,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[8,3,128] dynamic-update-slice(get-tuple-element.395, ar.1, constant.2561, select.1348, constant.2561)
ROOT tuple = (s32[], bf16[8,3,128], bf16[8,3,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[8,3,128] parameter(0)
tuple = (s32[], bf16[8,3,128], bf16[8,3,128]) tuple(c0, p0, p0)
while = (s32[], bf16[8,3,128], bf16[8,3,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[8,3,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(2);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(2);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwo) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(_, op::AllReduce(), _, _, _));
const HloInstruction* sliced = root->operand(1)->operand(0);
EXPECT_EQ(sliced->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* index = sliced->operand(1);
EXPECT_EQ(index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(index->tuple_index(), 3);
const HloInstruction* while_inst = index->operand(0);
EXPECT_EQ(while_inst->opcode(), HloOpcode::kWhile);
const HloInstruction* while_root =
while_inst->while_body()->root_instruction();
EXPECT_EQ(while_root->opcode(), HloOpcode::kTuple);
const HloInstruction* dyn_upd = while_root->operand(1);
EXPECT_EQ(dyn_upd->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* dyn_upd2 = dyn_upd->operand(0);
EXPECT_EQ(dyn_upd2->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* prev_ar = dyn_upd2->operand(1);
EXPECT_EQ(prev_ar->opcode(), HloOpcode::kAllReduce);
const HloInstruction* dyn_slice_top = prev_ar->operand(0);
EXPECT_EQ(dyn_slice_top->opcode(), HloOpcode::kDynamicSlice);
const HloInstruction* get_tuple_value = dyn_slice_top->operand(0);
const HloInstruction* get_tuple_index = dyn_slice_top->operand(1);
EXPECT_EQ(get_tuple_value->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_index->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(get_tuple_value->tuple_index(), 1);
EXPECT_EQ(get_tuple_index->tuple_index(), 3);
}
TEST_F(CollectivePipelinerTest, NoTransformCantProveIndexDoesntWrap) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(4)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-1)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformNegativeIndexIterationToZero) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false).value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(
_,
op::CustomCall(op::AllReduce(op::DynamicSlice(
op::GetTupleElement(op::While()),
op::GetTupleElement(),
op::Constant(), op::Constant())),
op::Constant()),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, EscapedInputNoTransform) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.911 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-slice.911, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,8,128] parameter(0)
cc = bf16[] constant(0)
c1 = bf16[1,8,128] broadcast(cc), dimensions={}
c2 = bf16[3,8,128] broadcast(cc), dimensions={}
tuple = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) tuple(c0, p0, c1, c2)
while = (s32[], bf16[3,8,128], bf16[1,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
XLA_VLOG_LINES(1, module->ToString());
EXPECT_FALSE(RunOptimizer(module.get(), true).value());
}
TEST_F(CollectivePipelinerTest, TransformWithAg) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
rs.1 = bf16[1,1,128] reduce-scatter(mul), replica_groups={}, to_apply=add, channel_id=1, dimensions={1}
ag.1 = bf16[1,8,128] all-gather(rs.1), replica_groups={}, channel_id=2, dimensions={1}
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ag.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,8,128] parameter(0)
cc = bf16[] constant(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(
_, op::AllGather(op::GetTupleElement(op::While())),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, TransformWithAgWithFormatting) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,9,128], bf16[3,9,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,9,128], bf16[3,9,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,9,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,9,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,9,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,9,128}
mul = bf16[1,9,128] multiply(dynamic-slice.99, dynamic-slice.99)
cpd = bf16[] constant(0)
%pd = bf16[1,16,128] pad(mul, cpd), padding=0_0x0_7x0_0
rs.1 = bf16[1,2,128] reduce-scatter(pd), replica_groups={}, to_apply=add, channel_id=1, dimensions={1}
ag.1 = bf16[1,16,128] all-gather(rs.1), replica_groups={}, channel_id=2, dimensions={1}
slc = bf16[1,9,128] slice(ag.1), slice={[0:1], [0:9], [0:128]}
dynamic-update-slice.35 = bf16[3,9,128] dynamic-update-slice(get-tuple-element.395, slc, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,9,128], bf16[3,9,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-3)
p0 = bf16[3,9,128] parameter(0)
cc = bf16[] constant(0)
tuple = (s32[], bf16[3,9,128], bf16[3,9,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,9,128], bf16[3,9,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,9,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root,
op::DynamicUpdateSlice(
_, op::Slice(op::AllGather(op::GetTupleElement(op::While()))),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, TransformWithAgInsertCustomCall) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(0)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
constant.2561 = s32[] constant(0)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, get-tuple-element.394, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
rs.1 = bf16[1,1,128] reduce-scatter(mul), replica_groups={}, to_apply=add, channel_id=1, dimensions={1}
ag.1 = bf16[1,8,128] all-gather(rs.1), replica_groups={}, channel_id=2, dimensions={1}
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ag.1, get-tuple-element.394, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(-8)
p0 = bf16[3,8,128] parameter(0)
cc = bf16[] constant(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
RunOptimizer(module.get(), true, 1).value();
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, op::DynamicUpdateSlice(
_, op::AllGather(op::GetTupleElement(op::While())),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, PushAgOver) {
constexpr absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(bf16[3,8,128]{2,1,0})->bf16[3,8,128]{2,1,0}}
%add (lhs: bf16[], rhs: bf16[]) -> bf16[] {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %lhs, bf16[] %rhs)
}
%while_body.clone (loop_peel_param: (s32[], bf16[3,8,128], s32[])) -> (s32[], bf16[3,8,128], s32[]) {
%loop_peel_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%get-tuple-element.2 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=0
%constant.7 = s32[] constant(1)
%add.4 = s32[] add(s32[] %get-tuple-element.2, s32[] %constant.7)
%get-tuple-element.3 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=1
%get-tuple-element.4 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=2
%constant.12 = s64[] constant(1)
%custom-call = s32[] custom-call(s32[] %get-tuple-element.4, s64[] %constant.12), custom_call_target="InsertedByPreviousStep"
%constant.13 = s32[] constant(0)
%constant.10 = s32[] constant(0)
%dynamic-slice.2 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13), dynamic_slice_sizes={1,8,128}
%ar.2 = bf16[1,1,128]{2,1,0} reduce-scatter(bf16[1,8,128]{2,1,0} %dynamic-slice.2), channel_id=2, replica_groups={}, to_apply=%add, dimensions={1}
%ag.2 = bf16[1,8,128]{2,1,0} all-gather(bf16[1,1,128]{2,1,0} %ar.2), channel_id=32, replica_groups={}, dimensions={1}
%dynamic-update-slice.2 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, bf16[1,8,128]{2,1,0} %ag.2, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13)
%dynamic-slice.1 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %get-tuple-element.2, s32[] %constant.10, s32[] %constant.10), dynamic_slice_sizes={1,8,128}
%mul.2 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.1, bf16[1,8,128]{2,1,0} %dynamic-slice.1)
%constant.15 = s32[] constant(0)
%dynamic-update-slice.4 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %dynamic-update-slice.2, bf16[1,8,128]{2,1,0} %mul.2, s32[] %get-tuple-element.2, s32[] %constant.15, s32[] %constant.15)
ROOT %tuple.3 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.4, bf16[3,8,128]{2,1,0} %dynamic-update-slice.4, s32[] %get-tuple-element.2)
}
%while_cond.clone (loop_peel_cond_param: (s32[], bf16[3,8,128], s32[])) -> pred[] {
%loop_peel_cond_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%gte.1 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_cond_param), index=0
%constant.6 = s32[] constant(0)
ROOT %cmp.1 = pred[] compare(s32[] %gte.1, s32[] %constant.6), direction=LT
}
ENTRY %entry (p0: bf16[3,8,128]) -> bf16[3,8,128] {
%c0 = s32[] constant(-3)
%p0 = bf16[3,8,128]{2,1,0} parameter(0)
%tuple.1 = (s32[], bf16[3,8,128]{2,1,0}) tuple(s32[] %c0, bf16[3,8,128]{2,1,0} %p0)
%get-tuple-element.0 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=0
%constant.0 = s32[] constant(1)
%constant.4 = s32[] constant(0)
%add.1 = s32[] add(s32[] %get-tuple-element.0, s32[] %constant.0)
%get-tuple-element.1 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=1
%dynamic-slice.0 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4), dynamic_slice_sizes={1,8,128}
%mul.1 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.0, bf16[1,8,128]{2,1,0} %dynamic-slice.0)
%dynamic-update-slice.0 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, bf16[1,8,128]{2,1,0} %mul.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4)
%tuple.4 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.1, bf16[3,8,128]{2,1,0} %dynamic-update-slice.0, s32[] %get-tuple-element.0)
%while.1 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) while((s32[], bf16[3,8,128]{2,1,0}, s32[]) %tuple.4), condition=%while_cond.clone, body=%while_body.clone
%get-tuple-element.6 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=1
%get-tuple-element.5 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=2
%constant.14 = s32[] constant(0)
%dynamic-slice.3 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14), dynamic_slice_sizes={1,8,128}
%ar.3 = bf16[1,8,128]{2,1,0} all-reduce(bf16[1,8,128]{2,1,0} %dynamic-slice.3), channel_id=3, replica_groups={}, to_apply=%add
ROOT %dynamic-update-slice.3 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, bf16[1,8,128]{2,1,0} %ar.3, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 1,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
auto* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::DynamicUpdateSlice(
op::DynamicUpdateSlice(_, op::AllGather(), _, _, _),
op::AllReduce(op::DynamicSlice(op::DynamicUpdateSlice(), _, _, _)),
op::GetTupleElement(), op::Constant(), op::Constant()));
}
TEST_F(CollectivePipelinerTest, NoPushAgOverBecauseDifferentSize) {
constexpr absl::string_view hlo_string = R"(
HloModule module, entry_computation_layout={(bf16[3,8,128]{2,1,0})->bf16[3,8,128]{2,1,0}}
%add (lhs: bf16[], rhs: bf16[]) -> bf16[] {
%lhs = bf16[] parameter(0)
%rhs = bf16[] parameter(1)
ROOT %add = bf16[] add(bf16[] %lhs, bf16[] %rhs)
}
%while_body.clone (loop_peel_param: (s32[], bf16[3,8,128], s32[])) -> (s32[], bf16[3,8,128], s32[]) {
%loop_peel_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%get-tuple-element.2 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=0
%constant.7 = s32[] constant(1)
%add.4 = s32[] add(s32[] %get-tuple-element.2, s32[] %constant.7)
%get-tuple-element.3 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=1
%get-tuple-element.4 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_param), index=2
%constant.12 = s64[] constant(1)
%custom-call = s32[] custom-call(s32[] %get-tuple-element.4, s64[] %constant.12), custom_call_target="InsertedByPreviousStep"
%constant.13 = s32[] constant(0)
%constant.10 = s32[] constant(0)
%dynamic-slice.2 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13), dynamic_slice_sizes={1,8,128}
%ar.2 = bf16[1,1,128]{2,1,0} reduce-scatter(bf16[1,8,128]{2,1,0} %dynamic-slice.2), channel_id=2, replica_groups={}, to_apply=%add, dimensions={1}
%ag.2 = bf16[1,8,128]{2,1,0} all-gather(bf16[1,1,128]{2,1,0} %ar.2), channel_id=32, replica_groups={}, dimensions={1}
%dynamic-update-slice.2 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, bf16[1,8,128]{2,1,0} %ag.2, s32[] %custom-call, s32[] %constant.13, s32[] %constant.13)
%dynamic-slice.1 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.3, s32[] %get-tuple-element.2, s32[] %constant.10, s32[] %constant.10), dynamic_slice_sizes={1,8,128}
%mul.2 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.1, bf16[1,8,128]{2,1,0} %dynamic-slice.1)
%constant.15 = s32[] constant(0)
%dynamic-update-slice.4 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %dynamic-update-slice.2, bf16[1,8,128]{2,1,0} %mul.2, s32[] %get-tuple-element.2, s32[] %constant.15, s32[] %constant.15)
ROOT %tuple.3 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.4, bf16[3,8,128]{2,1,0} %dynamic-update-slice.4, s32[] %get-tuple-element.2)
}
%while_cond.clone (loop_peel_cond_param: (s32[], bf16[3,8,128], s32[])) -> pred[] {
%loop_peel_cond_param = (s32[], bf16[3,8,128]{2,1,0}, s32[]) parameter(0)
%gte.1 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %loop_peel_cond_param), index=0
%constant.6 = s32[] constant(0)
ROOT %cmp.1 = pred[] compare(s32[] %gte.1, s32[] %constant.6), direction=LT
}
ENTRY %entry (p0: bf16[3,8,128]) -> bf16[3,8,128] {
%c0 = s32[] constant(-3)
%p0 = bf16[3,8,128]{2,1,0} parameter(0)
%tuple.1 = (s32[], bf16[3,8,128]{2,1,0}) tuple(s32[] %c0, bf16[3,8,128]{2,1,0} %p0)
%get-tuple-element.0 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=0
%constant.0 = s32[] constant(1)
%constant.4 = s32[] constant(0)
%add.1 = s32[] add(s32[] %get-tuple-element.0, s32[] %constant.0)
%get-tuple-element.1 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}) %tuple.1), index=1
%dynamic-slice.0 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4), dynamic_slice_sizes={1,8,128}
%mul.1 = bf16[1,8,128]{2,1,0} multiply(bf16[1,8,128]{2,1,0} %dynamic-slice.0, bf16[1,8,128]{2,1,0} %dynamic-slice.0)
%dynamic-update-slice.0 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.1, bf16[1,8,128]{2,1,0} %mul.1, s32[] %get-tuple-element.0, s32[] %constant.4, s32[] %constant.4)
%tuple.4 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) tuple(s32[] %add.1, bf16[3,8,128]{2,1,0} %dynamic-update-slice.0, s32[] %get-tuple-element.0)
%while.1 = (s32[], bf16[3,8,128]{2,1,0}, s32[]) while((s32[], bf16[3,8,128]{2,1,0}, s32[]) %tuple.4), condition=%while_cond.clone, body=%while_body.clone
%get-tuple-element.6 = bf16[3,8,128]{2,1,0} get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=1
%get-tuple-element.5 = s32[] get-tuple-element((s32[], bf16[3,8,128]{2,1,0}, s32[]) %while.1), index=2
%constant.14 = s32[] constant(0)
%dynamic-slice.3 = bf16[1,8,128]{2,1,0} dynamic-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14), dynamic_slice_sizes={1,8,128}
%ar.3 = bf16[1,8,128]{2,1,0} all-reduce(bf16[1,8,128]{2,1,0} %dynamic-slice.3), channel_id=3, replica_groups={}, to_apply=%add
ROOT %dynamic-update-slice.3 = bf16[3,8,128]{2,1,0} dynamic-update-slice(bf16[3,8,128]{2,1,0} %get-tuple-element.6, bf16[1,8,128]{2,1,0} %ar.3, s32[] %get-tuple-element.5, s32[] %constant.14, s32[] %constant.14)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), false, 1,
false,
false,
CollectivePipeliner::PipeliningDirection::kForward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwoFormat) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,16,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,16,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,16,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,16,128] dynamic-slice(get-tuple-element.396, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,16,128}
mul = bf16[1,16,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,16,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
ds.1 = bf16[1,8,128] dynamic-slice(ar.1, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ds.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,16,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.396)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,16,128] parameter(0)
c1 = bf16[] constant(0)
b1 = bf16[3,8,128] broadcast(c1), dimensions={}
tuple = (s32[], bf16[3,8,128], bf16[3,16,128]) tuple(c0, b1, p0)
while = (s32[], bf16[3,8,128], bf16[3,16,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::DynamicUpdateSlice(
_, op::DynamicSlice(op::AllReduce(op::GetTupleElement()), _, _, _), _,
_, _));
}
TEST_F(CollectivePipelinerTest, TransformIncrementByTwoFormatTranspose) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,16,128], bf16[3,16,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,16,128], bf16[3,16,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,16,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,16,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(2)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,16,128] dynamic-slice(get-tuple-element.396, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,16,128}
mul = bf16[1,16,128] multiply(dynamic-slice.99, dynamic-slice.99)
reshape.1 = bf16[2,16,64] reshape(mul)
ar.1 = bf16[2,16,64] all-reduce(reshape.1), replica_groups={}, to_apply=add, channel_id=1
transpose.1 = bf16[64,2,16] transpose(ar.1), dimensions={2,0,1}
reshape.2 = bf16[1,16,128] reshape(transpose.1)
dynamic-update-slice.35 = bf16[3,16,128] dynamic-update-slice(get-tuple-element.395, reshape.2, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,16,128], bf16[3,16,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.396)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,16,128] parameter(0)
c1 = bf16[] constant(0)
b1 = bf16[3,16,128] broadcast(c1), dimensions={}
tuple.1 = (s32[], bf16[3,16,128], bf16[3,16,128]) tuple(c0, b1, p0)
while = (s32[], bf16[3,16,128], bf16[3,16,128]) while(tuple.1), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,16,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true).value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_THAT(
root,
op::DynamicUpdateSlice(
_, op::Reshape(op::Transpose(op::AllReduce(op::GetTupleElement()))),
_, _, _));
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneBackwards) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
const int64_t while_count = absl::c_count_if(
module->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return HloPredicateIsOp<HloOpcode::kWhile>(instruction);
});
EXPECT_EQ(while_count, 1);
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
const HloInstruction* tuple = while_instr->operand(0);
EXPECT_TRUE(tuple->HasControlDependencies());
EXPECT_EQ(tuple->control_predecessors().size(), 1);
const HloInstruction* add_instr = tuple->control_predecessors()[0];
EXPECT_EQ(add_instr->opcode(), HloOpcode::kAdd);
const HloComputation* comp = while_instr->while_body();
const HloInstruction* root_loop = comp->root_instruction();
EXPECT_TRUE(root_loop->HasControlDependencies());
EXPECT_EQ(root_loop->control_predecessors().size(), 1);
const HloInstruction* add_instr_loop = root_loop->control_predecessors()[0];
EXPECT_EQ(add_instr_loop->opcode(), HloOpcode::kAdd);
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsWithTwoDependentClones) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
custom-call = bf16[1,2,128] custom-call(r), custom_call_target="MoveToDevice"
a = bf16[1,2,128] add(custom-call, custom-call), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
auto is_all_gather_or_offloading = [](const HloInstruction* instruction) {
return instruction->opcode() == HloOpcode::kAllGather ||
instruction->IsCustomCall(
host_memory_offload_annotations::kMoveToDeviceCustomCallTarget);
};
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
is_all_gather_or_offloading)
.value());
XLA_VLOG_LINES(1, module->ToString());
const int64_t while_count = absl::c_count_if(
module->entry_computation()->instructions(),
[](const HloInstruction* instruction) {
return HloPredicateIsOp<HloOpcode::kWhile>(instruction);
});
EXPECT_EQ(while_count, 1);
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
const HloInstruction* tuple = while_instr->operand(0);
EXPECT_TRUE(tuple->HasControlDependencies());
EXPECT_EQ(tuple->control_predecessors().size(), 1);
const HloInstruction* add_instr = tuple->control_predecessors()[0];
EXPECT_EQ(add_instr->opcode(), HloOpcode::kAdd);
const HloComputation* comp = while_instr->while_body();
const HloInstruction* root_loop = comp->root_instruction();
EXPECT_TRUE(root_loop->HasControlDependencies());
EXPECT_EQ(root_loop->control_predecessors().size(), 1);
const HloInstruction* add_instr_loop = root_loop->control_predecessors()[0];
EXPECT_EQ(add_instr_loop->opcode(), HloOpcode::kAdd);
EXPECT_NE(FindInstruction(module.get(), "custom-call.1"), nullptr);
EXPECT_NE(FindInstruction(module.get(), "custom-call.2"), nullptr);
EXPECT_NE(FindInstruction(module.get(), "ag.1"), nullptr);
EXPECT_NE(FindInstruction(module.get(), "ag.2"), nullptr);
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsCollectivePermute) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.395), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,0}},
frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9},{4,10},{5,11},{6,12},{7,13}}"}
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(cp, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(4);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsCollectivePermuteBackwardCycle) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(14)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
cp = bf16[3,8,128] collective-permute(get-tuple-element.395), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}},
frontend_attributes={_xla_send_recv_validation="{{7,13},{6,12},{5,11},{4,10},{3,9},{2,8},{1,7},{0,6}}"}
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(14)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(13)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r), control-predecessors={constant.2559}
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(cp, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=2
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(cp, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k), control-predecessors={a}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_num_partitions(8);
config_.set_replica_count(4);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(*RunFileCheck(module->ToString(), R"(
)"));
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsModifyOut) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
constant.10 = bf16[] constant(0)
b = bf16[3,1,2,128] broadcast(constant.10), dimensions={}
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, b)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsPlusForward) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) tuple(c0, p0, p1, p0)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneBackwardsPlusForwardConvertOutput) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = f32[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
get-tuple-element.5 = f32[3,8,128] get-tuple-element(param), index=3
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(get-tuple-element.k, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = f32[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
cvt.0 = bf16[1,8,128] convert(dynamic-slice.99)
mul = bf16[1,8,128] multiply(cvt.0, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
cvt.1 = f32[1,8,128] convert(ar.1)
dynamic-update-slice.35 = f32[3,8,128] dynamic-update-slice(get-tuple-element.395, cvt.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = f32[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) tuple(c0, p0, p1, p0)
while = (s32[], f32[3,8,128], bf16[3,1,2,128], f32[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = f32[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwise) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseSortFormattingOps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
negate.1 = bf16[1,8,128] negate(ar.1)
negate.2 = bf16[1,8,128] negate(ar.1)
add = bf16[1,8,128] multiply(negate.1, negate.2)
mul3 = bf16[1,8,128] multiply(add, add)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, ElementWiseUser) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul2, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, TransformIncrementIndexByOneNotFirstIdxSink) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
%c = bf16[] custom-call(), custom_call_target="Boh"
%b = bf16[1,8,128] broadcast(c), dimensions={}
%a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, a, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35), control-predecessors={select.1348}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
const HloComputation* comp = while_instr->while_body();
const HloInstruction* root_loop = comp->root_instruction();
EXPECT_TRUE(root_loop->HasControlDependencies());
EXPECT_EQ(root_loop->control_predecessors().size(), 1);
const HloInstruction* select_instr_loop =
root_loop->control_predecessors()[0];
EXPECT_EQ(select_instr_loop->opcode(), HloOpcode::kSelect);
}
TEST_F(CollectivePipelinerTest,
TransformIncrementIndexByOneNotFirstIdxSinkCustomCall) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
%c = bf16[] custom-call(), custom_call_target="Boh"
%b = bf16[1,8,128] broadcast(c), dimensions={}
%a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, a, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* all_reduce = module->entry_computation()
->root_instruction()
->operand(0)
->operand(1)
->operand(0)
->operand(0);
EXPECT_EQ(all_reduce->opcode(), HloOpcode::kAllReduce);
EXPECT_EQ(all_reduce->shape().dimensions(0), 3);
}
TEST_F(CollectivePipelinerTest, NotTransformAllGatherWithRecvInChainBackwards) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,1,2,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.k = bf16[3,1,2,128] get-tuple-element(param), index=2
constant.2561 = s32[] constant(0)
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
after-all = token[] after-all()
recv = (bf16[1,1,2,128], u32[], token[]) recv(after-all), channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}"
}
send = (bf16[1,1,2,128], u32[], token[]) send(get-tuple-element.k, after-all), channel_id=2, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}"
}
send-done = token[] send-done(send), channel_id=2
recv-done = (bf16[1,1,2,128], token[]) recv-done(recv), channel_id=2
recv-data = bf16[1,1,2,128] get-tuple-element(recv-done), index=0
dynamic-slice.k = bf16[1,1,2,128] dynamic-slice(recv-data, select.1348, constant.2561, constant.2561, constant.2561), dynamic_slice_sizes={1,1,2,128}
r = bf16[1,2,128] reshape(dynamic-slice.k)
a = bf16[1,2,128] add(r, r)
ag = bf16[1,8,128] all-gather(a), dimensions={1}, replica_groups={}
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.395, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, ag)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, ar.1, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.k)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,1,2,128] parameter(1)
tuple = (s32[], bf16[3,8,128], bf16[3,1,2,128]) tuple(c0, p0, p1)
while = (s32[], bf16[3,8,128], bf16[3,1,2,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather)
.value());
}
TEST_F(CollectivePipelinerTest, TransformRecvSendBackwards) {
constexpr absl::string_view hlo_string = R"(
HloModule module
cond {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
ub = u32[] constant(25)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], f32[1, 1024, 1024]) parameter(0)
count = get-tuple-element(%param), index=0
p = get-tuple-element(%param), index=1
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}",
_xla_send_recv_pipeline="0"
}
send = (f32[1, 1024, 1024], u32[], token[]) send(p, after-all), channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{0, 1}, {1, 2}, {2, 3}, {3, 4}}",
_xla_send_recv_pipeline="0"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
replica = u32[] replica-id()
c10 = u32[] constant(10)
sum = u32[] add(replica, c10)
sum2 = u32[] add(sum, count)
conv = f32[] convert(sum2)
b = f32[1, 1024, 1024] add(p, recv-data)
c = f32[1, 1024, 1024] multiply(b, b)
d = f32[1, 1024, 1024] tan(c)
s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}
send-done = token[] send-done(send), channel_id=1, frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init)
while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=body, condition=cond, backend_config="{\"known_trip_count\":{\"n\":\"25\"}}"
ROOT result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1
}
)";
auto should_pipeline = [](const HloInstruction* instruction) {
if (!HloPredicateIsOp<HloOpcode::kRecvDone>(instruction)) return false;
const HloRecvDoneInstruction* recv_done =
dynamic_cast<const HloRecvDoneInstruction*>(instruction);
if (recv_done->is_host_transfer()) return false;
return (recv_done->user_count() == 1 && recv_done->parent() != nullptr &&
recv_done->users()[0] != recv_done->parent()->root_instruction());
};
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
should_pipeline)
.value());
XLA_VLOG_LINES(10, module->ToString());
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
EXPECT_EQ(recv1->channel_id(), send1->channel_id());
}
TEST_F(CollectivePipelinerTest,
TransformRecvSendBackwardsWithLoopVariantParameter) {
constexpr absl::string_view hlo_string = R"(
HloModule module
cond {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
ub = u32[] constant(2)
ROOT result = pred[] compare(count, ub), direction=LT
}
body {
param = (u32[], u32[2]) parameter(0)
count = get-tuple-element(param), index=0
send-data = get-tuple-element(param), index=1
after-all.0 = token[] after-all()
recv.0 = (u32[2], u32[], token[]) recv(after-all.0), channel_id=1,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_other_attr="0"
}
after-all.0.s = token[] after-all()
send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.s),
channel_id=1, frontend_attributes={
_xla_send_recv_source_target_pairs="{{3,0}}",
_xla_other_attr="0"
}
recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
recv-data = u32[2] get-tuple-element(recv-done.0), index=0
c1 = u32[] constant(1)
new_count = u32[] add(count, c1)
r = u32[2] broadcast(c1), dimensions={}
s = u32[2] add(r, recv-data)
send-done.0 = token[] send-done(send.0), channel_id=1,
frontend_attributes={
_xla_send_recv_pipeline="0"
}
ROOT result = (u32[], u32[2]) tuple(new_count, s)
}
ENTRY test_computation {
c0 = u32[] constant(0)
c1 = u32[] constant(1)
r = u32[] replica-id()
a = u32[] add(c1, r)
init = u32[2] broadcast(a), dimensions={}
while_init = (u32[], u32[2]) tuple(c0, init)
while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond
ROOT result = u32[2] get-tuple-element(while_result), index=1
})";
auto should_pipeline = [](const HloInstruction* instr) {
if (!HloPredicateIsOp<HloOpcode::kRecv>(instr) &&
!HloPredicateIsOp<HloOpcode::kSend>(instr))
return false;
const HloSendRecvInstruction* send_recv =
dynamic_cast<const HloSendRecvInstruction*>(instr);
return (send_recv->user_count() == 1 && send_recv->parent() != nullptr &&
send_recv->users()[0] != send_recv->parent()->root_instruction());
};
auto should_allow_loop_variant_parameter = [](const HloInstruction* instr) {
CHECK(instr->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(0)->opcode() == HloOpcode::kParameter);
return true;
};
const char* kAttr = "_xla_other_attr";
auto postprocess_peeled = [&](HloInstruction* instr) {
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kAttr] = "1";
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
};
auto postprocess_rotated = [&](HloInstruction* instr) {
xla::FrontendAttributes attributes = instr->frontend_attributes();
(*attributes.mutable_map())[kAttr] = "2";
instr->set_frontend_attributes(attributes);
return absl::OkStatus();
};
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
should_pipeline,
HloPredicateTrue,
HloPredicateTrue,
should_allow_loop_variant_parameter,
postprocess_peeled, postprocess_rotated)
.value());
XLA_VLOG_LINES(10, module->ToString());
auto while_op = FindInstruction(module.get(), "while");
EXPECT_EQ(while_op->opcode(), HloOpcode::kWhile);
EXPECT_EQ(while_op->shape().tuple_shapes().size(), 5);
auto recv1 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.1"));
EXPECT_NE(recv1, nullptr);
auto recv2 =
DynCast<HloRecvInstruction>(FindInstruction(module.get(), "recv.2"));
EXPECT_NE(recv2, nullptr);
EXPECT_EQ(recv1->channel_id(), recv2->channel_id());
auto send1 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.1"));
EXPECT_NE(send1, nullptr);
auto send2 =
DynCast<HloSendInstruction>(FindInstruction(module.get(), "send.2"));
EXPECT_NE(send2, nullptr);
EXPECT_EQ(send1->channel_id(), send2->channel_id());
EXPECT_EQ(recv1->channel_id(), send1->channel_id());
const char* kSourceTarget = "_xla_send_recv_source_target_pairs={{3,0}}";
const char* kPeeledAttr = "_xla_other_attr=\"1\"";
const char* kRotatedAttr = "_xla_other_attr=\"2\"";
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kSourceTarget));
EXPECT_THAT(send1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(recv1->ToString(), ::testing::HasSubstr(kPeeledAttr));
EXPECT_THAT(send2->ToString(), ::testing::HasSubstr(kRotatedAttr));
EXPECT_THAT(recv2->ToString(), ::testing::HasSubstr(kRotatedAttr));
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseMerge) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
ar.2 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.2)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseFeedTwo) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
ar.2 = bf16[1,8,128] all-reduce(ar.1), replica_groups={}, to_apply=add, channel_id=1
mul2 = bf16[1,8,128] multiply(ar.1, bc), control-predecessors={ar.1}
mul3 = bf16[1,8,128] multiply(mul2, ar.2)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5), control-predecessors={ar.1}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, MultiUsesElementwiseFeedTwoWithReduce) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.2 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.5 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.5, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
bm = bf16[1,1,8,128] broadcast(mul), dimensions={1,2,3}
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
ar.1 = bf16[1,1,8,128] all-reduce(bm), replica_groups={}, to_apply=add, channel_id=1
ar.2 = bf16[1,1,8,128] all-reduce(ar.1), replica_groups={}, to_apply=add, channel_id=2
red.1 = bf16[1,8,128] reduce(ar.1, c2), to_apply=add.1, dimensions={0}
red.2 = bf16[1,8,128] reduce(ar.2, c2), to_apply=add.2, dimensions={0}
mul2 = bf16[1,8,128] multiply(red.1, bc), control-predecessors={ar.1}
mul3 = bf16[1,8,128] multiply(mul2, red.2)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.5), control-predecessors={ar.1}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward)
.value());
XLA_VLOG_LINES(1, module->ToString());
}
TEST_F(CollectivePipelinerTest, PipelinedReduceScatterCanPassVerifier) {
constexpr absl::string_view hlo_string = R"(
HloModule module
to_apply0 {
Arg_0.732 = bf16[] parameter(0)
Arg_1.733 = bf16[] parameter(1)
ROOT add.734 = bf16[] add(Arg_0.732, Arg_1.733)
}
body {
p2 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) parameter(0)
gte2 = bf16[3,4096,4096]{2,1,0} get-tuple-element(p2), index=1
gte3 = bf16[10,512,3,4096]{3,2,1,0} get-tuple-element(p2), index=2
c2 = s32[] constant(9)
gte4 = s32[] get-tuple-element(p2), index=0
sub0 = s32[] subtract(c2, gte4)
c3 = s32[] constant(0)
comp1 = pred[] compare(sub0, c3), direction=LT
c4 = s32[] constant(19)
sub2 = s32[] subtract(c4, gte4)
sel0 = s32[] select(comp1, sub2, sub0)
rsp0 = bf16[3,4096,4096]{2,1,0} reshape(gte2)
rs0 = bf16[3,4096,512]{2,1,0} reduce-scatter(rsp0), channel_id=75, replica_groups={{0,1,2,3}}, dimensions={2}, to_apply=to_apply0
tran0 = bf16[512,3,4096]{0,2,1} transpose(rs0), dimensions={2,0,1}
rsp1 = bf16[1,512,3,4096]{3,2,1,0} reshape(tran0)
dus0 = bf16[10,512,3,4096]{3,2,1,0} dynamic-update-slice(gte3, rsp1, sel0, c3, c3, c3)
c5 = s32[] constant(1)
add0 = s32[] add(gte4, c5)
ROOT t1 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) tuple(add0, rsp0, dus0)
}
condition {
cond_p1 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) parameter(0)
gte1 = s32[] get-tuple-element(cond_p1), index=0
c1 = s32[] constant(9)
ROOT comp0 = pred[] compare(gte1, c1), direction=LT
}
ENTRY main.3813_spmd {
p0 = bf16[3,4096,4096]{2,1,0} parameter(0)
p1 = bf16[10,512,3,4096]{3,2,1,0} parameter(1)
c0 = s32[] constant(0)
t0 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) tuple(c0, p0, p1)
w0 = (s32[], bf16[3,4096,4096]{2,1,0}, bf16[10,512,3,4096]{3,2,1,0}) while(t0), condition=condition, body=body
ROOT gte0 = bf16[3,4096,4096]{2,1,0} get-tuple-element(w0), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true, 0,
true,
true,
CollectivePipeliner::PipeliningDirection::kForward,
HloPredicateIsOp<HloOpcode::kReduceScatter>)
.value());
XLA_VLOG_LINES(1, module->ToString());
HloVerifier verifier(false,
true);
ASSERT_IS_OK(verifier.Run(module.get()).status());
}
TEST_F(CollectivePipelinerTest,
PipelineBackwardIncludeInvariantMultiConsumerInChain) {
constexpr absl::string_view hlo_string = R"(
HloModule module
while_cond {
param = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[1,8,2048,32768]{3,2,1,0} get-tuple-element(param), index=1
get-tuple-element.397 = bf16[1,8,2048,32768]{3,2,1,0} get-tuple-element(param), index=2
constant.1 = bf16[] constant(2)
broadcast.3593 = bf16[1,8,2048,32768]{3,2,1,0} broadcast(constant.1), dimensions={}
add.2 = bf16[1,8,2048,32768]{3,2,1,0} add(broadcast.3593, get-tuple-element.395)
all-gather.1 = bf16[1,64,2048,32768]{3,2,1,0} all-gather(broadcast.3593), channel_id=1, dimensions={1}, replica_groups={}
slice.2 = bf16[1,8,2048,32768]{3,2,1,0} slice(all-gather.1), slice={[0:1], [8:16], [0:2048], [0:32768]}
constant.2 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2)
ROOT tuple = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) tuple(add.230, add.2, slice.2)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[1,8,2048,32768]{3,2,1,0} parameter(0)
p1 = bf16[1,8,2048,32768]{3,2,1,0} parameter(1)
tuple = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) tuple(c0, p0, p1)
while = (s32[], bf16[1,8,2048,32768]{3,2,1,0}, bf16[1,8,2048,32768]{3,2,1,0}) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[1,8,2048,32768]{3,2,1,0} get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
std::nullopt,
std::nullopt,
true)
.value());
XLA_VLOG_LINES(1, module->ToString());
HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_THAT(while_instr, op::While(op::Tuple(_, _, _, op::AllGather(), _)));
HloInstruction* root = while_instr->while_body()->root_instruction();
EXPECT_THAT(root, op::Tuple(_, _, _, op::AllGather(), _));
auto ref_module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(
RunOptimizer(
ref_module.get(), true, 0,
false,
false,
CollectivePipeliner::PipeliningDirection::kBackward,
IsAllGather,
HloPredicateTrue,
HloPredicateTrue,
HloPredicateTrue,
std::nullopt,
std::nullopt,
false)
.value());
}
TEST_F(CollectivePipelinerTest, BroadcastAsFormattingOp) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_GE(while_instr->users().size(), 2);
EXPECT_TRUE(
absl::c_any_of(while_instr->users(), [](const HloInstruction* user) {
return absl::c_any_of(
user->users(), [](const HloInstruction* user_user) {
return user_user->opcode() == HloOpcode::kAllReduce;
});
}));
}
TEST_F(CollectivePipelinerTest, ForwardSinkDependentPipelineableCollectives) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
ar.2 = bf16[1,8,128] all-reduce(reduce), replica_groups={}, to_apply=add, channel_id=2
c1 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c1)
mul1 = bf16[1,8,128] multiply(ar.2, bc)
mul3 = bf16[1,8,128] multiply(mul1, ar.2)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul3, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_use_spmd_partitioning(true);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink,
HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicateIsNotOp<HloOpcode::kAllReduce>)
.value());
XLA_VLOG_LINES(1, module->ToString());
std::function<const HloInstruction*(const HloInstruction*)> find_all_reduce =
[&](const HloInstruction* i) -> const HloInstruction* {
std::queue<const HloInstruction*> queue;
queue.push(i);
absl::flat_hash_set<HloInstruction*> visited;
while (!queue.empty()) {
const HloInstruction* curr_inst = queue.front();
queue.pop();
for (HloInstruction* operand : curr_inst->operands()) {
if (operand->opcode() == HloOpcode::kAllReduce) {
return operand;
}
if (visited.insert(operand).second) {
queue.push(operand);
}
}
}
return nullptr;
};
const HloInstruction* all_reduce1 =
find_all_reduce(module->entry_computation()->root_instruction());
EXPECT_NE(all_reduce1, nullptr);
const HloInstruction* all_reduce2 = find_all_reduce(all_reduce1);
EXPECT_NE(all_reduce2, nullptr);
EXPECT_THAT(all_reduce2, op::AllReduce(op::GetTupleElement(op::While())));
}
TEST_F(CollectivePipelinerTest,
ForwardSinkDependentPipelineableCollectivesNotLastRun) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
ar.2 = bf16[1,8,128] all-reduce(reduce), replica_groups={}, to_apply=add, channel_id=2
c1 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c1)
mul1 = bf16[1,8,128] multiply(ar.2, bc)
mul3 = bf16[1,8,128] multiply(mul1, ar.2)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul3, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
config_.set_use_spmd_partitioning(true);
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(
RunOptimizer(
module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink,
HloPredicateIsOp<HloOpcode::kAllReduce>,
HloPredicateIsNotOp<HloOpcode::kAllReduce>)
.value());
XLA_VLOG_LINES(1, module->ToString());
std::function<const HloInstruction*(const HloInstruction*)> find_all_reduce =
[&](const HloInstruction* i) -> const HloInstruction* {
std::queue<const HloInstruction*> queue;
queue.push(i);
absl::flat_hash_set<HloInstruction*> visited;
while (!queue.empty()) {
const HloInstruction* curr_inst = queue.front();
queue.pop();
for (HloInstruction* operand : curr_inst->operands()) {
if (operand->opcode() == HloOpcode::kAllReduce) {
return operand;
}
if (visited.insert(operand).second) {
queue.push(operand);
}
}
}
return nullptr;
};
const HloInstruction* all_reduce1 =
find_all_reduce(module->entry_computation()->root_instruction());
EXPECT_NE(all_reduce1, nullptr);
const HloInstruction* all_reduce2 = find_all_reduce(all_reduce1);
EXPECT_NE(all_reduce2, nullptr);
EXPECT_THAT(all_reduce2, op::AllReduce(op::GetTupleElement(op::While())));
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
CHECK_NE(while_instr, nullptr);
const HloInstruction* dynamic_update_slice =
while_instr->while_body()->root_instruction()->operands().back();
CHECK_EQ(dynamic_update_slice->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* custom_call = dynamic_update_slice->operand(1);
CHECK(custom_call->IsCustomCall("SunkByPreviousStep"));
}
TEST_F(CollectivePipelinerTest, ForwardSinkFirstDimNotMatchingLoopCount) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[5,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[5,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[5,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
c = bf16[] custom-call(), custom_call_target="Boh"
b = bf16[1,8,128] broadcast(c), dimensions={}
a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[5,8,128] dynamic-update-slice(get-tuple-element.395, a, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[5,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35), control-predecessors={select.1348}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[5,8,128] parameter(0)
p1 = bf16[3,8,128] parameter(1)
tuple = (s32[], bf16[5,8,128], bf16[3,8,128]) tuple(c0, p0, p1)
while = (s32[], bf16[5,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[5,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
}
TEST_F(CollectivePipelinerTest, ForwardSinkNotFirstDim) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
%c = bf16[] custom-call(), custom_call_target="Boh"
%b = bf16[1,8,128] broadcast(c), dimensions={}
%a = bf16[1,8,128] add(ar.1, b)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, a, constant.2561, select.1348, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, get-tuple-element.35), control-predecessors={select.1348}
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
}
TEST_F(CollectivePipelinerTest, CollectiveWithMultipleDUS) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
EXPECT_TRUE(
absl::c_any_of(while_instr->users(), [](const HloInstruction* user) {
return absl::c_any_of(
user->users(), [](const HloInstruction* user_user) {
return user_user->opcode() == HloOpcode::kAllReduce;
});
}));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kGetTupleElement);
const HloInstruction* new_tuple =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_EQ(new_tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(absl::c_count_if(new_tuple->operands(),
[](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kReshape;
}),
2);
}
TEST_F(CollectivePipelinerTest, CollectiveWithMultipleDUSNotLastRun) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
CHECK_NE(while_instr, nullptr);
EXPECT_TRUE(
absl::c_any_of(while_instr->users(), [](const HloInstruction* user) {
return absl::c_any_of(
user->users(), [](const HloInstruction* user_user) {
return user_user->opcode() == HloOpcode::kAllReduce;
});
}));
EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(),
HloOpcode::kGetTupleElement);
const HloInstruction* new_tuple =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_EQ(new_tuple->opcode(), HloOpcode::kTuple);
EXPECT_EQ(absl::c_count_if(new_tuple->operands(),
[](const HloInstruction* operand) {
return operand->opcode() == HloOpcode::kReshape;
}),
2);
const HloInstruction* dynamic_update_slice =
while_instr->while_body()->root_instruction()->operand(4);
CHECK_EQ(dynamic_update_slice->opcode(), HloOpcode::kDynamicUpdateSlice);
const HloInstruction* custom_call = dynamic_update_slice->operand(1);
CHECK(custom_call->IsCustomCall("SunkByPreviousStep"));
}
TEST_F(CollectivePipelinerTest, CollectiveWithMultipleDUSSameBuffer) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=2
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, mul4, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_FALSE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
}
TEST_F(CollectivePipelinerTest, MergeTwoCollectivesEachWithTwoDUS) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.397 = bf16[3,8,128] get-tuple-element(param), index=3
get-tuple-element.398 = bf16[3,8,128] get-tuple-element(param), index=4
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=5
get-tuple-element.36 = bf16[3,8,128] get-tuple-element(param), index=6
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
dynamic-slice.100 = bf16[1,8,128] dynamic-slice(get-tuple-element.36, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul.1 = bf16[1,8,128] multiply(dynamic-slice.100, dynamic-slice.99)
ar.2 = bf16[1,8,128] all-reduce(mul.1), replica_groups={}, to_apply=add, channel_id=1
divide = bf16[1,8,128] divide(ar.1, ar.2)
dynamic-update-slice.37 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.397, divide, select.1348, constant.2561, constant.2561)
mul.2 = bf16[1,8,128] multiply(ar.2, ar.2)
abs = bf16[1,8,128] abs(mul.2)
dynamic-update-slice.38 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.398, abs, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, dynamic-update-slice.37, dynamic-update-slice.38, get-tuple-element.35, get-tuple-element.36)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,8,128] parameter(1)
p2 = bf16[3,8,128] parameter(2)
p3 = bf16[3,8,128] parameter(3)
p4 = bf16[3,8,128] parameter(4)
p5 = bf16[3,8,128] parameter(5)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p1, p2, p3, p4, p5)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Tuple(
op::GetTupleElement(op::While()), op::Reshape(op::Reduce()),
op::Reshape(op::Multiply()), op::Reshape(op::Divide()),
op::Reshape(op::Abs()), op::GetTupleElement(op::While()),
op::GetTupleElement(op::While()))));
}
TEST_F(CollectivePipelinerTest, MergeTwoCollectivesEachWithTwoDUSNotLastRun) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.397 = bf16[3,8,128] get-tuple-element(param), index=3
get-tuple-element.398 = bf16[3,8,128] get-tuple-element(param), index=4
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=5
get-tuple-element.36 = bf16[3,8,128] get-tuple-element(param), index=6
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
b.1 = bf16[1,8,128,32] broadcast(ar.1), dimensions={0,1,2}
constant = bf16[] constant(0)
reduce = bf16[1,8,128] reduce(b.1, constant), dimensions={3}, to_apply=add.1
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, reduce, select.1348, constant.2561, constant.2561)
c2 = bf16[] constant(2.0)
bc = bf16[1,8,128] broadcast(c2)
mul2 = bf16[1,8,128] multiply(ar.1, bc)
mul3 = bf16[1,8,128] multiply(mul2, ar.1)
mul4 = bf16[1,8,128] multiply(mul3, mul)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, mul4, select.1348, constant.2561, constant.2561)
dynamic-slice.100 = bf16[1,8,128] dynamic-slice(get-tuple-element.36, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul.1 = bf16[1,8,128] multiply(dynamic-slice.100, dynamic-slice.99)
ar.2 = bf16[1,8,128] all-reduce(mul.1), replica_groups={}, to_apply=add, channel_id=1
divide = bf16[1,8,128] divide(ar.1, ar.2)
dynamic-update-slice.37 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.397, divide, select.1348, constant.2561, constant.2561)
mul.2 = bf16[1,8,128] multiply(ar.2, ar.2)
abs = bf16[1,8,128] abs(mul.2)
dynamic-update-slice.38 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.398, abs, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, dynamic-update-slice.37, dynamic-update-slice.38, get-tuple-element.35, get-tuple-element.36)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
p1 = bf16[3,8,128] parameter(1)
p2 = bf16[3,8,128] parameter(2)
p3 = bf16[3,8,128] parameter(3)
p4 = bf16[3,8,128] parameter(4)
p5 = bf16[3,8,128] parameter(5)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p1, p2, p3, p4, p5)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), false,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::GetTupleElement(op::Tuple(
op::GetTupleElement(op::While()), op::Reshape(op::Reduce()),
op::Reshape(op::Multiply()), op::Reshape(op::Divide()),
op::Reshape(op::Abs()), op::GetTupleElement(op::While()),
op::GetTupleElement(op::While()))));
std::function<bool(const HloInstruction*)> is_dus_with_custom_call =
[&](const HloInstruction* inst) -> bool {
if (inst->opcode() != HloOpcode::kDynamicUpdateSlice) {
return false;
}
return inst->operand(1)->IsCustomCall("SunkByPreviousStep");
};
const HloInstruction* while_instr =
FindInstruction(module.get(), HloOpcode::kWhile);
CHECK_NE(while_instr, nullptr);
CHECK(is_dus_with_custom_call(
while_instr->while_body()->root_instruction()->operand(7)));
CHECK(is_dus_with_custom_call(
while_instr->while_body()->root_instruction()->operand(8)));
}
TEST_F(CollectivePipelinerTest, NoRedundantBroadcastsInFormattingOps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
add {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
add.1 {
lhs = bf16[] parameter(0)
rhs = bf16[] parameter(1)
ROOT add = bf16[] add(lhs, rhs)
}
while_cond {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
gte = s32[] get-tuple-element(param), index=0
constant.1 = s32[] constant(3)
ROOT cmp = pred[] compare(gte, constant.1), direction=LT
}
while_body {
param = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) parameter(0)
get-tuple-element.394 = s32[] get-tuple-element(param), index=0
get-tuple-element.395 = bf16[3,8,128] get-tuple-element(param), index=1
get-tuple-element.396 = bf16[3,8,128] get-tuple-element(param), index=2
get-tuple-element.35 = bf16[3,8,128] get-tuple-element(param), index=3
constant.2557 = s32[] constant(1)
add.230 = s32[] add(get-tuple-element.394, constant.2557)
constant.2559 = s32[] constant(3)
subtract.139 = s32[] subtract(constant.2559, get-tuple-element.394)
constant.2560 = s32[] constant(-1)
add.231 = s32[] add(subtract.139, constant.2560)
constant.2561 = s32[] constant(0)
compare.747 = pred[] compare(add.231, constant.2561), direction=LT
constant.2562 = s32[] constant(2)
add.232 = s32[] add(subtract.139, constant.2562)
select.1348 = s32[] select(compare.747, add.232, add.231)
dynamic-slice.99 = bf16[1,8,128] dynamic-slice(get-tuple-element.35, select.1348, constant.2561, constant.2561), dynamic_slice_sizes={1,8,128}
mul = bf16[1,8,128] multiply(dynamic-slice.99, dynamic-slice.99)
ar.1 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add, channel_id=1
convert = bf16[] convert(add.232)
broadcast = bf16[1,8,128] broadcast(convert)
add.1 = bf16[1,8,128] add(ar.1, broadcast)
dynamic-update-slice.35 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.395, add.1, select.1348, constant.2561, constant.2561)
ar.2 = bf16[1,8,128] all-reduce(mul), replica_groups={}, to_apply=add.1, channel_id=2
add.2 = bf16[1,8,128] add(ar.2, broadcast)
dynamic-update-slice.36 = bf16[3,8,128] dynamic-update-slice(get-tuple-element.396, add.2, select.1348, constant.2561, constant.2561)
ROOT tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(add.230, dynamic-update-slice.35, dynamic-update-slice.36, get-tuple-element.35)
}
ENTRY entry {
c0 = s32[] constant(0)
p0 = bf16[3,8,128] parameter(0)
tuple = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) tuple(c0, p0, p0, p0)
while = (s32[], bf16[3,8,128], bf16[3,8,128], bf16[3,8,128]) while(tuple), condition=while_cond, body=while_body
ROOT gte1 = bf16[3,8,128] get-tuple-element(while), index=1
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_string, config_).value();
EXPECT_TRUE(RunOptimizer(module.get(), true,
0,
true,
true,
CollectivePipeliner::kForwardSink)
.value());
XLA_VLOG_LINES(1, module->ToString());
EXPECT_EQ(absl::c_count_if(module->entry_computation()->instructions(),
[](const HloInstruction* instr) {
return instr->opcode() ==
HloOpcode::kBroadcast &&
instr->operand(0)->opcode() ==
HloOpcode::kGetTupleElement &&
instr->operand(0)->operand(0)->opcode() ==
HloOpcode::kWhile;
}),
1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_pipeliner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/collective_pipeliner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8323374f-8b1c-43c0-bf25-98a044b762e4 | cpp | tensorflow/tensorflow | ifrt_restore_tensor_registry | tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.cc | tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc | #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "tensorflow/core/framework/tensor.h"
namespace tensorflow {
namespace ifrt_serving {
absl::Status IfrtRestoreTensorRegistry::TryRegister(
absl::string_view name, RestoredTensorInfo restored_tensor_info) {
absl::MutexLock lock(&mutex_);
auto& info = restored_tensors_[name];
if (info.tensor_future.IsValid()) {
return absl::AlreadyExistsError(
absl::StrCat("Variable '", name, "' already registered."));
}
info = std::move(restored_tensor_info);
return absl::OkStatus();
}
xla::ifrt::Future<tensorflow::Tensor>
IfrtRestoreTensorRegistry::GetRestoredTensor(absl::string_view name) const {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return xla::ifrt::Future<tensorflow::Tensor>(
absl::NotFoundError(absl::StrCat("Variable '", name, "' not found.")));
}
return it->second.tensor_future;
}
absl::Status IfrtRestoreTensorRegistry::SetUsedByHost(absl::string_view name) {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return absl::NotFoundError(
absl::StrCat("Variable '", name, "' not found."));
}
it->second.used_by_host = true;
return absl::OkStatus();
}
void IfrtRestoreTensorRegistry::Freeze() {
absl::MutexLock lock(&mutex_);
xla::ifrt::Future<tensorflow::Tensor> release_tensor_future(
absl::UnavailableError("Tensor is already release."));
for (auto& [name, info] : restored_tensors_) {
if (!info.used_by_host) {
info.tensor_future = release_tensor_future;
}
}
}
absl::StatusOr<DtypeAndShape> IfrtRestoreTensorRegistry::GetDtypeAndShape(
absl::string_view name) const {
absl::MutexLock lock(&mutex_);
auto it = restored_tensors_.find(name);
if (it == restored_tensors_.end()) {
return absl::NotFoundError(
absl::StrCat("Variable '", name, "' not found."));
}
return it->second.dtype_and_shape;
}
}
} | #include "tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "xla/python/ifrt/future.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
using tsl::testing::IsOk;
using tsl::testing::StatusIs;
namespace tensorflow {
namespace ifrt_serving {
namespace {
TEST(IfrtRestoreTensorRegistryTest, RetrieveNonRegisteredTensorFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest,
RetrieveNonRegisteredTensorDTypeAndShapeFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.GetDtypeAndShape("input_tensor_1"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest, SetNonExistedTensorAsUsedByHostFails) {
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"),
StatusIs(absl::StatusCode::kNotFound));
}
TEST(IfrtRestoreTensorRegistryTest, RegisteredExistedTensorFails) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info),
IsOk());
promise.Set(input_tensor);
EXPECT_THAT(registry.TryRegister("input_tensor_2", restored_tensor_info),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(IfrtRestoreTensorRegistryTest, SetTensorAsUsedByHost) {
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
EXPECT_THAT(registry.SetUsedByHost("input_tensor_1"), IsOk());
}
TEST(IfrtRestoreTensorRegistryTest, RegisteredTensorCanBeRetrieved) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
promise.Set(input_tensor);
TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved,
registry.GetRestoredTensor("input_tensor_1").Await());
test::ExpectEqual(retrieved, input_tensor);
TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape,
registry.GetDtypeAndShape("input_tensor_1"));
EXPECT_TRUE(
dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2})));
EXPECT_EQ(dtype_and_shape.dtype, DT_INT32);
}
TEST(IfrtRestoreTensorRegistryTest,
RegisteredTensorDTypeAndShapeCanBeRetrieved) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future = xla::ifrt::Future<tensorflow::Tensor>(promise);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future};
IfrtRestoreTensorRegistry registry;
EXPECT_THAT(registry.TryRegister("input_tensor_1", restored_tensor_info),
IsOk());
TF_ASSERT_OK_AND_ASSIGN(DtypeAndShape dtype_and_shape,
registry.GetDtypeAndShape("input_tensor_1"));
EXPECT_TRUE(
dtype_and_shape.shape.IsSameSize(tensorflow::TensorShape({2, 2})));
EXPECT_EQ(dtype_and_shape.dtype, DT_INT32);
}
TEST(IfrtRestoreTensorRegistryTest, FeezeTensorRegistry) {
auto input_tensor =
test::AsTensor<int32_t>({1, 2, 3, 4}, tensorflow::TensorShape({2, 2}));
auto promise1 = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future1 = xla::ifrt::Future<tensorflow::Tensor>(promise1);
auto promise2 = xla::ifrt::Future<tensorflow::Tensor>::CreatePromise();
auto future2 = xla::ifrt::Future<tensorflow::Tensor>(promise2);
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info1 = {
.used_by_host = false,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future1};
IfrtRestoreTensorRegistry::RestoredTensorInfo restored_tensor_info2 = {
.used_by_host = true,
.dtype_and_shape =
{
.dtype = DT_INT32,
.shape = tensorflow::TensorShape({2, 2}),
},
.tensor_future = future2};
IfrtRestoreTensorRegistry registry;
TF_ASSERT_OK(registry.TryRegister("input_tensor_1", restored_tensor_info1));
TF_ASSERT_OK(registry.TryRegister("input_tensor_2", restored_tensor_info2));
promise1.Set(input_tensor);
promise2.Set(input_tensor);
registry.Freeze();
EXPECT_THAT(registry.GetRestoredTensor("input_tensor_1").Await(),
StatusIs(absl::StatusCode::kUnavailable));
TF_ASSERT_OK_AND_ASSIGN(tensorflow::Tensor retrieved,
registry.GetRestoredTensor("input_tensor_2").Await());
test::ExpectEqual(retrieved, input_tensor);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/ifrt/ifrt_restore_tensor_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
718a66b1-78f4-4b2a-a875-0a7f0a41e8b3 | cpp | google/cel-cpp | unknown_set | base/internal/unknown_set.cc | eval/public/unknown_set_test.cc | #include "base/internal/unknown_set.h"
#include "absl/base/no_destructor.h"
namespace cel::base_internal {
const AttributeSet& EmptyAttributeSet() {
static const absl::NoDestructor<AttributeSet> empty_attribute_set;
return *empty_attribute_set;
}
const FunctionResultSet& EmptyFunctionResultSet() {
static const absl::NoDestructor<FunctionResultSet> empty_function_result_set;
return *empty_function_result_set;
}
} | #include "eval/public/unknown_set.h"
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/arena.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_function.h"
#include "eval/public/unknown_attribute_set.h"
#include "eval/public/unknown_function_result_set.h"
#include "internal/testing.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using ::google::protobuf::Arena;
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
UnknownFunctionResultSet MakeFunctionResult(Arena* arena, int64_t id) {
CelFunctionDescriptor desc("OneInt", false, {CelValue::Type::kInt64});
return UnknownFunctionResultSet(UnknownFunctionResult(desc, 0));
}
UnknownAttributeSet MakeAttribute(Arena* arena, int64_t id) {
std::vector<CelAttributeQualifier> attr_trail{
CreateCelAttributeQualifier(CelValue::CreateInt64(id))};
return UnknownAttributeSet({CelAttribute("x", std::move(attr_trail))});
}
MATCHER_P(UnknownAttributeIs, id, "") {
const CelAttribute& attr = arg;
if (attr.qualifier_path().size() != 1) {
return false;
}
auto maybe_qualifier = attr.qualifier_path()[0].GetInt64Key();
if (!maybe_qualifier.has_value()) {
return false;
}
return maybe_qualifier.value() == id;
}
TEST(UnknownSet, AttributesMerge) {
Arena arena;
UnknownSet a(MakeAttribute(&arena, 1));
UnknownSet b(MakeAttribute(&arena, 2));
UnknownSet c(MakeAttribute(&arena, 2));
UnknownSet d(a, b);
UnknownSet e(c, d);
EXPECT_THAT(
d.unknown_attributes(),
UnorderedElementsAre(UnknownAttributeIs(1), UnknownAttributeIs(2)));
EXPECT_THAT(
e.unknown_attributes(),
UnorderedElementsAre(UnknownAttributeIs(1), UnknownAttributeIs(2)));
}
TEST(UnknownSet, DefaultEmpty) {
UnknownSet empty_set;
EXPECT_THAT(empty_set.unknown_attributes(), IsEmpty());
EXPECT_THAT(empty_set.unknown_function_results(), IsEmpty());
}
TEST(UnknownSet, MixedMerges) {
Arena arena;
UnknownSet a(MakeAttribute(&arena, 1), MakeFunctionResult(&arena, 1));
UnknownSet b(MakeFunctionResult(&arena, 2));
UnknownSet c(MakeAttribute(&arena, 2));
UnknownSet d(a, b);
UnknownSet e(c, d);
EXPECT_THAT(d.unknown_attributes(),
UnorderedElementsAre(UnknownAttributeIs(1)));
EXPECT_THAT(
e.unknown_attributes(),
UnorderedElementsAre(UnknownAttributeIs(1), UnknownAttributeIs(2)));
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/base/internal/unknown_set.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/unknown_set_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
a07ea66a-4414-454d-a9cf-f85639ffbeb2 | cpp | tensorflow/tensorflow | topk_specializer | third_party/xla/xla/service/gpu/transforms/topk_specializer.cc | third_party/xla/xla/service/gpu/transforms/topk_specializer_test.cc | #include "xla/service/gpu/transforms/topk_specializer.h"
#include <stddef.h>
#include <initializer_list>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<HloInstruction*> SmallBufferOptimization(
HloCustomCallInstruction* topk) {
Shape data_shape = topk->operand(0)->shape();
auto supported_dtypes = {F32, BF16};
if (!absl::c_linear_search(supported_dtypes, data_shape.element_type())) {
return InvalidArgument(
"Invalid Dtype: %s",
primitive_util::LowercasePrimitiveTypeName(data_shape.element_type()));
}
if (data_shape.dimensions_size() > 2) {
return InvalidArgument("Invalid input dimensions: %s",
data_shape.ToString());
}
bool has_batch = data_shape.dimensions_size() == 2;
constexpr size_t max_k = 16;
constexpr size_t min_n = 1024;
size_t n = data_shape.dimensions(has_batch ? 1 : 0);
size_t k = topk->shape().tuple_shapes(0).dimensions(has_batch ? 1 : 0);
if (k > max_k) {
return InvalidArgument("k too large (%d), must be <= %d", k, max_k);
}
if (n < min_n) {
return InvalidArgument("Input too small (n=%d, min_n=%d)", n, min_n);
}
HloComputation* comp = topk->parent();
HloInstruction* new_topk =
comp->AddInstruction(HloInstruction::CreateCustomCall(
topk->shape(), topk->operands(),
topk->to_apply(), "__gpu$TopK",
"", CustomCallApiVersion::API_VERSION_TYPED_FFI));
return TupleUtil::ExtractPrefix(new_topk, 2);
}
class SpecializeTopkVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleCustomCall(HloInstruction* inst) override {
HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst);
if (topk == nullptr || topk->custom_call_target() != "TopK") {
return absl::OkStatus();
}
TF_RET_CHECK(topk->operand_count() == 1);
if (auto small_topk = SmallBufferOptimization(topk); small_topk.ok()) {
return ReplaceInstruction(topk, *small_topk);
} else {
VLOG(2) << "Small TopK optimization doesn't match: "
<< small_topk.status();
}
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> TopkSpecializer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return SpecializeTopkVisitor().RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/transforms/topk_specializer.h"
#include <stddef.h>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_interface.h"
#include "xla/service/platform_util.h"
#include "xla/service/topk_rewriter.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::Combine;
using ::testing::Values;
using ParameterizedInterface =
::testing::WithParamInterface<std::tuple<int, int, int, std::string_view>>;
class TopkTest : public HloTestBase, public ParameterizedInterface {
public:
TopkTest()
: HloTestBase(*PlatformUtil::GetPlatform("gpu"),
*PlatformUtil::GetPlatform("gpu"), true, true, {}) {}
protected:
absl::StatusOr<std::unique_ptr<HloModule>> TopkHlo(int n, int k,
int batch_size,
std::string_view dtype) {
return ParseAndReturnVerifiedModule(absl::Substitute(
R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
}
ENTRY top_k {
%arg = $3[$2,$0] parameter(0)
ROOT %result = ($3[$2,$1], s32[$2,$1]) custom-call(%arg), custom_call_target="TopK", to_apply=%compare
}
)",
n, k, batch_size, dtype));
}
};
class GeneralizeTopkVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleCustomCall(HloInstruction* inst) override {
HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst);
if (topk == nullptr || topk->custom_call_target() != "__gpu$TopK") {
return absl::OkStatus();
}
HloComputation* comp = topk->parent();
auto original_shape = ShapeUtil::SliceTuple(topk->shape(), 0, 2);
HloInstruction* original_topk =
comp->AddInstruction(HloInstruction::CreateCustomCall(
original_shape, topk->operands(), topk->to_apply(), "TopK"));
HloInstruction* new_tuple = topk->users()[0]->users()[0];
return ReplaceInstruction(new_tuple, original_topk);
}
};
class GeneralizeTopk : public HloModulePass {
public:
absl::string_view name() const override { return "generalized-topk"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(HloModule* module,
const absl::flat_hash_set<absl::string_view>&
execution_threads) override {
return GeneralizeTopkVisitor().RunOnModule(module, execution_threads);
}
};
void ToSortAndSlice(HloModule* module) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, GeneralizeTopk().Run(module));
ASSERT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(changed, TopkDecomposer().Run(module));
ASSERT_TRUE(changed);
}
TEST_P(TopkTest, ProducesCorrectResult) {
const auto [n_kb, k, batch_size, dtype] = GetParam();
const size_t n = n_kb * 1024;
TF_ASSERT_OK_AND_ASSIGN(auto topk_module, TopkHlo(n, k, batch_size, dtype));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
gpu::TopkSpecializer().Run(topk_module.get()));
ASSERT_TRUE(changed);
EXPECT_TRUE(
RunAndCompare(std::move(topk_module), std::nullopt, ToSortAndSlice));
}
INSTANTIATE_TEST_SUITE_P(
TopkTests, TopkTest,
Combine(
Values(1, 8, 12, 32),
Values(1, 2, 4, 8, 16, 7, 12),
Values(1, 16, 32, 64, 128),
Values(absl::string_view("f32"), "bf16")),
[](const auto& info) {
return absl::Substitute("n$0KiB_k$1_batch_size$2_$3",
std::get<0>(info.param), std::get<1>(info.param),
std::get<2>(info.param), std::get<3>(info.param));
});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_specializer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_specializer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8027a9a8-1fb7-4aea-ae54-cfe1d96f544a | cpp | tensorflow/tensorflow | owning_vector_ref | tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.cc | tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref_test.cc | #include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <cstddef>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
void OwningVectorRef::Resize(dims_t&& dims) {
dims_ = dims;
num_elements_ = 0;
for (dim_t d : dims_) {
if (d <= 0) {
break;
}
if (num_elements_ == 0) {
num_elements_ = d;
} else {
num_elements_ *= d;
}
}
raw_data_buffer_.resize(num_elements_ * TypeWidth(Type()));
}
const void* OwningVectorRef::Data() const { return raw_data_buffer_.data(); }
void* OwningVectorRef::Data() { return raw_data_buffer_.data(); }
ind_t OwningVectorRef::NumElements() const { return num_elements_; }
size_t OwningVectorRef::Bytes() const {
return NumElements() * TypeWidth(Type());
}
}
} | #include "tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.h"
#include <algorithm>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/ml_adjacent/lib.h"
namespace ml_adj {
namespace data {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
TEST(OwningVectorRefTest, ConstructFloat32) {
OwningVectorRef t(etype_t::f32);
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 0);
EXPECT_EQ(t.Bytes(), 0);
EXPECT_THAT(t.Dims(), IsEmpty());
}
TEST(OwningVectorRefTest, ResizeFromEmptyFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({2, 2}));
EXPECT_EQ(t.NumElements(), 4);
ASSERT_EQ(t.Bytes(), 4 * sizeof(float));
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, ResizeDownFloat32) {
OwningVectorRef t(etype_t::f32);
t.Resize({2, 2});
float* write_f_start = reinterpret_cast<float*>(t.Data());
float* write_f_end = write_f_start + t.NumElements();
std::fill(write_f_start, write_f_end, 0.5f);
t.Resize({3});
ASSERT_THAT(t.Dims(), ElementsAreArray<dim_t>({3}));
EXPECT_EQ(t.NumElements(), 3);
ASSERT_EQ(t.Bytes(), 3 * sizeof(float));
const float* read_f_start = reinterpret_cast<const float*>(t.Data());
for (int i = 0; i < t.NumElements(); ++i) {
EXPECT_EQ(read_f_start[i], 0.5f);
}
}
TEST(OwningVectorRefTest, IgnoresDimsForNumElementsAfterFirstNonPositive) {
OwningVectorRef t(etype_t::f32);
t.Resize({3, 0, 0, 2});
EXPECT_EQ(t.Type(), etype_t::f32);
EXPECT_EQ(t.NumElements(), 3);
EXPECT_EQ(t.Bytes(), 3 * sizeof(float));
EXPECT_THAT(t.Dims(), ElementsAreArray<dim_t>({3, 0, 0, 2}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/ml_adjacent/data/owning_vector_ref_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c7a7d7c3-ab9c-464d-ab87-e8ba974ca712 | cpp | tensorflow/tensorflow | indexing_analysis | third_party/xla/xla/service/gpu/model/indexing_analysis.cc | third_party/xla/xla/service/gpu/model/indexing_analysis_test.cc | #include "xla/service/gpu/model/indexing_analysis.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::AffineExpr;
using mlir::AffineMap;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::getAffineSymbolExpr;
using mlir::MLIRContext;
HloInstructionIndexing CreateUnknownIndexing(int64_t count = 1) {
HloInstructionIndexing indexing;
indexing.indexing_maps = std::vector<absl::flat_hash_set<IndexingMap>>(
count, {IndexingMap::GetUndefined()});
return indexing;
}
struct HLORTVar {
Interval feasible_values;
const HloInstruction* hlo;
mlir::AffineMap map;
};
bool operator==(const HLORTVar& lhs, const HLORTVar& rhs) {
return lhs.feasible_values == rhs.feasible_values && lhs.hlo == rhs.hlo &&
lhs.map == rhs.map;
}
inline bool operator!=(const HLORTVar& lhs, const HLORTVar& rhs) {
return !(lhs == rhs);
}
struct RTVarOptimizationResult {
AffineExpr remapped_symbol;
HLORTVar rt_var;
};
RTVarOptimizationResult OptimizeRTVar(HLORTVar rt_var, int64_t symbol_index,
MLIRContext* mlir_context) {
const auto symbol = getAffineSymbolExpr(symbol_index, mlir_context);
auto result_expr = symbol;
while (true) {
if (auto constant_expr = DynCast<HloConstantInstruction>(rt_var.hlo)) {
if (rt_var.map.isConstant()) {
const auto idx = rt_var.map.getConstantResults();
result_expr = result_expr.replace(
symbol, getAffineConstantExpr(
constant_expr->literal().GetIntegralAsS64(idx).value(),
mlir_context));
}
return {result_expr, rt_var};
}
if (auto iota_expr = DynCast<HloIotaInstruction>(rt_var.hlo)) {
auto iota_dimension = iota_expr->iota_dimension();
CHECK(iota_dimension < rt_var.map.getNumResults());
return {
result_expr.replace(symbol, rt_var.map.getResults()[iota_dimension]),
rt_var};
}
auto is_indexing_transformation = [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kBitcast ||
instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kReshape ||
instr->opcode() == HloOpcode::kReverse ||
instr->opcode() == HloOpcode::kSlice ||
instr->opcode() == HloOpcode::kTranspose;
};
if (is_indexing_transformation(rt_var.hlo)) {
auto instr_indexing_map =
*ComputeOutputToInputIndexing(rt_var.hlo, 0, mlir_context)
.indexing_maps[0]
.begin();
rt_var.hlo = rt_var.hlo->operand(0);
rt_var.map = instr_indexing_map.GetAffineMap().compose(rt_var.map);
continue;
}
if (rt_var.hlo->opcode() == HloOpcode::kNegate) {
rt_var.hlo = rt_var.hlo->operand(0);
result_expr = result_expr.replace(symbol, -symbol);
continue;
}
if (rt_var.hlo->opcode() == HloOpcode::kAdd ||
rt_var.hlo->opcode() == HloOpcode::kSubtract ||
rt_var.hlo->opcode() == HloOpcode::kMultiply ||
rt_var.hlo->opcode() == HloOpcode::kDivide) {
const auto apply_op = [&](const AffineExpr& lhs,
const AffineExpr& rhs) -> AffineExpr {
switch (rt_var.hlo->opcode()) {
case HloOpcode::kAdd:
return lhs + rhs;
case HloOpcode::kSubtract:
return lhs - rhs;
case HloOpcode::kMultiply:
return lhs * rhs;
case HloOpcode::kDivide:
return lhs.floorDiv(rhs);
default:
ABSL_UNREACHABLE();
}
};
auto lhs = OptimizeRTVar(
HLORTVar{rt_var.feasible_values, rt_var.hlo->operand(0), rt_var.map},
symbol_index, mlir_context);
if (!lhs.remapped_symbol.isFunctionOfSymbol(symbol_index)) {
result_expr =
result_expr.replace(symbol, apply_op(lhs.remapped_symbol, symbol));
rt_var.hlo = rt_var.hlo->operand(1);
continue;
}
auto rhs = OptimizeRTVar(
HLORTVar{rt_var.feasible_values, rt_var.hlo->operand(1), rt_var.map},
symbol_index, mlir_context);
if (!rhs.remapped_symbol.isFunctionOfSymbol(symbol_index)) {
result_expr =
result_expr.replace(symbol, apply_op(symbol, rhs.remapped_symbol));
result_expr = result_expr.replace(symbol, lhs.remapped_symbol);
rt_var = lhs.rt_var;
continue;
}
}
return {result_expr, rt_var};
}
}
std::vector<IndexingMap::Variable> ConvertHLORTVarsToRTVars(
const std::vector<HLORTVar>& hlo_rt_vars) {
std::vector<IndexingMap::Variable> rt_vars;
rt_vars.reserve(hlo_rt_vars.size());
for (const HLORTVar& hlo_rt_var : hlo_rt_vars) {
rt_vars.push_back(IndexingMap::Variable{hlo_rt_var.feasible_values});
}
return rt_vars;
}
IndexingMap FoldRTVarsAndConstructIndexingMap(
AffineMap affine_map, std::vector<IndexingMap::Variable> dim_vars,
std::vector<HLORTVar> hlo_rt_vars) {
if (hlo_rt_vars.empty()) {
return IndexingMap(affine_map, std::move(dim_vars), {},
ConvertHLORTVarsToRTVars(hlo_rt_vars));
}
auto* ctx = affine_map.getContext();
for (auto symbol_index = 0; symbol_index < hlo_rt_vars.size();
++symbol_index) {
auto& rt_var = hlo_rt_vars[symbol_index];
auto rt_var_symbol = getAffineSymbolExpr(symbol_index, ctx);
RTVarOptimizationResult result = OptimizeRTVar(rt_var, symbol_index, ctx);
if (result.remapped_symbol != rt_var_symbol) {
affine_map = affine_map.replace({{rt_var_symbol, result.remapped_symbol}},
affine_map.getNumDims(),
affine_map.getNumSymbols());
llvm::DenseMap<AffineExpr, AffineExpr> replacements;
}
if (result.remapped_symbol.isFunctionOfSymbol(symbol_index)) {
if (rt_var != result.rt_var) {
rt_var = std::move(result.rt_var);
}
}
}
return IndexingMap(affine_map, std::move(dim_vars), {},
ConvertHLORTVarsToRTVars(hlo_rt_vars));
}
HloInstructionIndexing ComputeOutputToInputCwiseOpIndexing(
const HloInstruction* instr, MLIRContext* mlir_context) {
IndexingMap identity_map = CreateIdentityMap(instr->shape(), mlir_context);
IndexingMap unit_map(
mlir::AffineMap::get(identity_map.GetAffineMap().getNumDims(),
0, mlir_context),
identity_map.GetDimVars(), {}, {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(instr->operand_count());
int64_t operand_count = instr->operand_count();
for (int64_t operand_id = 0; operand_id < operand_count; ++operand_id) {
auto* operand = instr->operand(operand_id);
if (operand->shape().rank() == 0 && instr->shape().rank() > 0) {
instr_indexing.indexing_maps[operand_id].insert(unit_map);
} else {
instr_indexing.indexing_maps[operand_id].insert(identity_map);
}
}
return instr_indexing;
}
HloInstructionIndexing ComputeInputToOutputCwiseOpIndexing(
const HloInstruction* instr, MLIRContext* mlir_context) {
IndexingMap identity_map = CreateIdentityMap(instr->shape(), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({identity_map});
}
HloInstructionIndexing ComputeOutputToInputBroadcastOpIndexing(
const HloBroadcastInstruction* bcast, MLIRContext* mlir_context) {
auto output_dims = bcast->shape().dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(bcast->dimensions().size());
for (int64_t bcast_dim : bcast->dimensions()) {
exprs.push_back(getAffineDimExpr(bcast_dim, mlir_context));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_dims.size(), 0, exprs,
mlir_context),
output_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeInputToOutputBroadcastOpIndexing(
const HloBroadcastInstruction* bcast, MLIRContext* mlir_context) {
absl::Span<const int64_t> bcast_dims = bcast->dimensions();
const Shape& input_shape = bcast->operand(0)->shape();
const Shape& output_shape = bcast->shape();
std::vector<int64_t> added_dims_sizes;
std::vector<AffineExpr> exprs;
exprs.reserve(output_shape.rank());
for (auto [output_dim_id, output_dim] :
llvm::enumerate(output_shape.dimensions())) {
auto bcast_dim =
std::find(bcast_dims.begin(), bcast_dims.end(), output_dim_id);
if (bcast_dim == bcast_dims.end()) {
exprs.push_back(
getAffineSymbolExpr(added_dims_sizes.size(), mlir_context));
added_dims_sizes.push_back(output_dim);
continue;
}
exprs.push_back(getAffineDimExpr(
std::distance(bcast_dims.begin(), bcast_dim), mlir_context));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(input_shape.rank(), added_dims_sizes.size(), exprs,
mlir_context),
input_shape.dimensions(), added_dims_sizes);
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputConcatenateOpIndexing(
const HloConcatenateInstruction* concat, MLIRContext* mlir_context) {
const auto& operand_0_dims = concat->operand(0)->shape().dimensions();
mlir::MutableAffineMap affine_map =
AffineMap::getMultiDimIdentityMap(operand_0_dims.size(), mlir_context);
std::vector<IndexingMap::Variable> dim_vars =
DimVarsFromTensorSizes(operand_0_dims);
HloInstructionIndexing concat_indexing;
concat_indexing.indexing_maps.resize(concat->operand_count());
int64_t concat_dim = concat->concatenate_dimension();
AffineExpr concat_dim_expr = getAffineDimExpr(concat_dim, mlir_context);
int64_t offset = 0;
for (const auto [operand_id, operand] : llvm::enumerate(concat->operands())) {
affine_map.setResult(concat_dim, concat_dim_expr - offset);
int64_t operand_concat_dim = operand->shape().dimensions()[concat_dim];
dim_vars[concat_dim] =
IndexingMap::Variable{{offset, offset + operand_concat_dim - 1}};
concat_indexing.indexing_maps[operand_id].insert(
IndexingMap(affine_map.getAffineMap(), dim_vars,
{}, {}));
offset += operand_concat_dim;
}
return concat_indexing;
}
HloInstructionIndexing ComputeInputToOutputConcatenateOpIndexing(
const HloConcatenateInstruction* concat, int input_id,
MLIRContext* mlir_context) {
int64_t concat_dim = concat->concatenate_dimension();
int64_t offset = 0;
for (int64_t operand_id = 0; operand_id < input_id; ++operand_id) {
offset += concat->operand(operand_id)->shape().dimensions()[concat_dim];
}
const auto& operand_dims = concat->operand(input_id)->shape().dimensions();
mlir::MutableAffineMap affine_map =
AffineMap::getMultiDimIdentityMap(operand_dims.size(), mlir_context);
affine_map.setResult(concat_dim,
getAffineDimExpr(concat_dim, mlir_context) + offset);
IndexingMap indexing_map =
IndexingMap::FromTensorSizes(affine_map.getAffineMap(), operand_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputFusionOpIndexing(
const HloFusionInstruction* fusion, int output_id,
MLIRContext* mlir_context) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(fusion);
auto grouped_indexing_maps = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[output_id], mlir_context);
HloInstructionIndexing fusion_indexing;
fusion_indexing.indexing_maps.resize(fusion->operand_count());
for (auto [operand_id, operand] : llvm::enumerate(fusion->operands())) {
fusion_indexing.indexing_maps[operand_id] = grouped_indexing_maps[operand];
}
return fusion_indexing;
}
HloInstructionIndexing ComputeOutputToInputDotOpIndexing(
const HloDotInstruction* dot, MLIRContext* mlir_context) {
CHECK_NE(dot, nullptr);
const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers();
absl::Span<const int64_t> lhs_contracting_dims(
dim_numbers.lhs_contracting_dimensions());
absl::Span<const int64_t> rhs_contracting_dims =
dim_numbers.rhs_contracting_dimensions();
absl::Span<const int64_t> lhs_batch_dims = dim_numbers.lhs_batch_dimensions();
absl::Span<const int64_t> rhs_batch_dims = dim_numbers.rhs_batch_dimensions();
const Shape& lhs_shape = dot->operand(0)->shape();
const Shape& rhs_shape = dot->operand(1)->shape();
SmallVector<AffineExpr> lhs_exprs(lhs_shape.rank());
SmallVector<AffineExpr> rhs_exprs(rhs_shape.rank());
int64_t output_dim_id = 0;
for (auto [lhs_batch_dim, rhs_batch_dim] :
llvm::zip(lhs_batch_dims, rhs_batch_dims)) {
AffineExpr output_dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
lhs_exprs[lhs_batch_dim] = output_dim_expr;
rhs_exprs[rhs_batch_dim] = output_dim_expr;
++output_dim_id;
}
auto lhs_non_contracting_dims =
GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_contracting_dims);
assert(lhs_non_contracting_dims.ok());
for (int64_t lhs_non_contracting_dim : lhs_non_contracting_dims.value()) {
lhs_exprs[lhs_non_contracting_dim] =
getAffineDimExpr(output_dim_id++, mlir_context);
}
auto rhs_non_contracting_dims =
GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_contracting_dims);
assert(rhs_non_contracting_dims.ok());
for (int64_t rhs_non_contracting_dim : rhs_non_contracting_dims.value()) {
rhs_exprs[rhs_non_contracting_dim] =
getAffineDimExpr(output_dim_id++, mlir_context);
}
int64_t input_dim_id = 0;
std::vector<int64_t> input_dim_sizes;
input_dim_sizes.reserve(lhs_contracting_dims.size());
for (auto [lhs_contracting_dim, rhs_contracting_dim] :
llvm::zip(lhs_contracting_dims, rhs_contracting_dims)) {
AffineExpr input_dim_expr = getAffineSymbolExpr(input_dim_id, mlir_context);
lhs_exprs[lhs_contracting_dim] = input_dim_expr;
rhs_exprs[rhs_contracting_dim] = input_dim_expr;
++input_dim_id;
input_dim_sizes.push_back(lhs_shape.dimensions(lhs_contracting_dim));
}
IndexingMap lhs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(dot->shape().rank(), input_dim_sizes.size(), lhs_exprs,
mlir_context),
dot->shape().dimensions(), input_dim_sizes);
IndexingMap rhs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(dot->shape().rank(), input_dim_sizes.size(), rhs_exprs,
mlir_context),
dot->shape().dimensions(), input_dim_sizes);
return HloInstructionIndexing::FromIndexingMaps(
{lhs_indexing_map, rhs_indexing_map});
}
HloInstructionIndexing ComputeOutputToInputDynamicSliceOpIndexing(
const HloDynamicSliceInstruction* dynamic_slice,
MLIRContext* mlir_context) {
const Shape& input_shape = dynamic_slice->operand(0)->shape();
const Shape& output_shape = dynamic_slice->shape();
int64_t rank = output_shape.rank();
const int64_t first_index_num = dynamic_slice->first_index_operand_number();
CHECK(dynamic_slice->operand(first_index_num)->shape().rank() == 0)
<< "b/118437727: Old form, not supported.";
AffineMap empty_results_affine_map = AffineMap::get(
rank, 0, {}, mlir_context);
IndexingMap start_indices_map = IndexingMap::FromTensorSizes(
empty_results_affine_map, output_shape.dimensions(), {});
std::vector<HLORTVar> offsets_rt_vars;
offsets_rt_vars.reserve(rank);
std::vector<AffineExpr> exprs;
exprs.reserve(rank);
for (auto [dim, slice_size] :
llvm::enumerate(dynamic_slice->dynamic_slice_sizes())) {
exprs.push_back(getAffineDimExpr(dim, mlir_context) +
getAffineSymbolExpr(dim, mlir_context));
offsets_rt_vars.push_back(
HLORTVar{Interval{0, input_shape.dimensions(dim) - slice_size},
dynamic_slice->operand(dim + first_index_num),
empty_results_affine_map});
}
std::vector<IndexingMap> indexing_maps(dynamic_slice->operand_count(),
start_indices_map);
indexing_maps.front() = FoldRTVarsAndConstructIndexingMap(
AffineMap::get(rank, rank, exprs,
mlir_context),
start_indices_map.GetDimVars(), std::move(offsets_rt_vars));
return HloInstructionIndexing::FromIndexingMaps(indexing_maps);
}
HloInstructionIndexing ComputeOutputToInputDynamicUpdateSliceOpIndexing(
const HloDynamicUpdateSliceInstruction* dus, MLIRContext* mlir_context) {
const Shape& update_shape = dus->update()->shape();
const Shape& output_shape = dus->shape();
int64_t rank = output_shape.rank();
std::vector<AffineExpr> identity;
identity.reserve(rank);
for (int64_t dim = 0; dim < rank; ++dim) {
identity.push_back(getAffineDimExpr(dim, mlir_context));
}
IndexingMap operand_map = IndexingMap::FromTensorSizes(
AffineMap::get(rank, 0, identity,
mlir_context),
output_shape.dimensions(), {});
AffineMap empty_results_affine_map = AffineMap::get(
rank, 0, {}, mlir_context);
IndexingMap start_indices_map = IndexingMap::FromTensorSizes(
empty_results_affine_map, output_shape.dimensions(), {});
std::vector<AffineExpr> exprs;
exprs.reserve(rank);
std::vector<HLORTVar> rt_vars;
rt_vars.reserve(rank);
for (auto [dim, slice_size] : llvm::enumerate(update_shape.dimensions())) {
exprs.push_back(getAffineDimExpr(dim, mlir_context) -
getAffineSymbolExpr(dim, mlir_context));
Interval feasible_values{0, output_shape.dimensions(dim) - slice_size};
rt_vars.push_back(HLORTVar{feasible_values, dus->operand(2 + dim),
empty_results_affine_map});
}
IndexingMap update_map = FoldRTVarsAndConstructIndexingMap(
AffineMap::get(rank, rank,
exprs, mlir_context),
operand_map.GetDimVars(), std::move(rt_vars));
std::vector<IndexingMap> indexing_maps(dus->operand_count(),
start_indices_map);
indexing_maps[0] = std::move(operand_map);
indexing_maps[1] = std::move(update_map);
return HloInstructionIndexing::FromIndexingMaps(indexing_maps);
}
HloInstructionIndexing ComputeOutputToInputGatherOpIndexing(
const HloGatherInstruction* gather, MLIRContext* mlir_context) {
CHECK(GatherSimplifier::IsSimplifiedGather(gather))
<< "Non-simplified HLO Gather is not supported.";
const Shape& operand_shape = gather->operand(0)->shape();
const Shape& indices_shape = gather->operand(1)->shape();
const GatherDimensionNumbers& dimension_numbers =
gather->gather_dimension_numbers();
int64_t index_vector_length =
indices_shape.dimensions(dimension_numbers.index_vector_dim());
const Shape& output_shape = gather->shape();
int64_t output_rank = output_shape.rank();
AffineExpr indices_id_dim = getAffineDimExpr(0, mlir_context);
std::vector<IndexingMap::Variable> dim_vars =
DimVarsFromTensorSizes(output_shape.dimensions());
IndexingMap indices_map{
AffineMap::get(output_rank, 1,
{indices_id_dim, getAffineSymbolExpr(0, mlir_context)},
mlir_context),
dim_vars,
{IndexingMap::Variable{{0, index_vector_length - 1}}},
{}};
std::vector<HLORTVar> rt_vars;
std::vector<AffineExpr> exprs;
exprs.reserve(operand_shape.rank());
for (auto [operand_dim_id, slice_size] :
llvm::enumerate(gather->gather_slice_sizes())) {
int64_t output_dim_id = dimension_numbers.offset_dims(operand_dim_id);
exprs.push_back(getAffineDimExpr(output_dim_id, mlir_context));
if (operand_dim_id >= index_vector_length) continue;
rt_vars.push_back(HLORTVar{
Interval{0, operand_shape.dimensions(operand_dim_id) - slice_size},
gather->operand(1),
AffineMap::get(output_rank, 0,
{indices_id_dim,
getAffineConstantExpr(operand_dim_id, mlir_context)},
mlir_context)});
exprs.back() =
exprs.back() + getAffineSymbolExpr(operand_dim_id, mlir_context);
}
IndexingMap operand_map = FoldRTVarsAndConstructIndexingMap(
AffineMap::get(output_rank,
index_vector_length, exprs, mlir_context),
std::move(dim_vars), std::move(rt_vars));
return HloInstructionIndexing::FromIndexingMaps({operand_map, indices_map});
}
IndexingMap ComputeOutputToInputPadOpIndexingImpl(
absl::Span<const int64_t> output_dims,
absl::Span<const int64_t> padding_low,
absl::Span<const int64_t> padding_high,
absl::Span<const int64_t> padding_interior, MLIRContext* mlir_context) {
int64_t output_rank = output_dims.size();
std::vector<AffineExpr> exprs;
std::vector<std::pair<AffineExpr, Interval>> constraints;
std::vector<IndexingMap::Variable> dim_vars;
exprs.reserve(output_rank);
constraints.reserve(output_rank);
int64_t output_dim_id = 0;
for (const auto [output_dim, pad_low, pad_high, pad_interior] :
llvm::zip(output_dims, padding_low, padding_high, padding_interior)) {
AffineExpr dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
dim_vars.push_back({IndexingMap::Variable{
std::max(int64_t{0}, pad_low),
std::min(output_dim - 1, output_dim - 1 - pad_high)}});
if (pad_interior == 0) {
exprs.push_back(dim_expr - pad_low);
} else {
exprs.push_back((dim_expr - pad_low).floorDiv(pad_interior + 1));
constraints.push_back(
{(dim_expr - pad_low) % (pad_interior + 1), Interval{0, 0}});
}
++output_dim_id;
}
return IndexingMap{
AffineMap::get(output_rank, 0, exprs, mlir_context),
std::move(dim_vars),
{},
{}, absl::MakeSpan(constraints)};
}
HloInstructionIndexing ComputeOutputToInputPadOpIndexing(
const HloPadInstruction* pad, MLIRContext* mlir_context) {
const Shape& output_shape = pad->shape();
int64_t rank = output_shape.rank();
SmallVector<int64_t> padding_low, padding_high, padding_interior;
padding_low.reserve(rank);
padding_high.reserve(rank);
padding_interior.reserve(rank);
for (const auto& dim_config : pad->padding_config().dimensions()) {
padding_low.push_back(dim_config.edge_padding_low());
padding_high.push_back(dim_config.edge_padding_high());
padding_interior.push_back(dim_config.interior_padding());
}
IndexingMap input_indexing_map = ComputeOutputToInputPadOpIndexingImpl(
output_shape.dimensions(), padding_low, padding_high, padding_interior,
mlir_context);
IndexingMap padding_value_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
return HloInstructionIndexing::FromIndexingMaps(
{input_indexing_map, padding_value_indexing_map});
}
HloInstructionIndexing ComputeOutputToInputReduceOpIndexing(
const HloReduceInstruction* reduce, int output_id,
MLIRContext* mlir_context) {
absl::flat_hash_set<int64_t> reduce_dims_ids(reduce->dimensions().begin(),
reduce->dimensions().end());
const Shape& input_shape = reduce->operand(output_id)->shape();
const Shape& output_shape = GetOutputShape(reduce, 0);
std::vector<int64_t> parallel_dims_sizes;
int64_t output_dim_id = 0;
std::vector<AffineExpr> exprs;
exprs.reserve(input_shape.rank());
for (auto [input_dim_id, input_dim] :
llvm::enumerate(input_shape.dimensions())) {
if (reduce_dims_ids.contains(input_dim_id)) {
exprs.push_back(
getAffineSymbolExpr(parallel_dims_sizes.size(), mlir_context));
parallel_dims_sizes.push_back(input_dim);
continue;
}
exprs.push_back(getAffineDimExpr(output_dim_id++, mlir_context));
}
IndexingMap inputs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), reduce_dims_ids.size(), exprs,
mlir_context),
output_shape.dimensions(), parallel_dims_sizes);
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(reduce->operand_count());
for (int64_t id = 0; id < reduce->input_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing_map);
}
for (int64_t id = reduce->input_count(); id < reduce->operand_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
HloInstructionIndexing ComputeInputToOutputReduceOpIndexing(
const HloReduceInstruction* reduce, int input_id,
MLIRContext* mlir_context) {
const Shape& output_shape = GetOutputShape(reduce, 0);
int64_t output_rank = output_shape.rank();
HloInstructionIndexing instr_indexing;
int arity = reduce->input_count();
instr_indexing.indexing_maps.resize(arity);
if (input_id >= arity) {
std::vector<AffineExpr> inits_exprs;
inits_exprs.reserve(output_rank);
for (int sym = 0; sym < output_rank; ++sym) {
inits_exprs.push_back(getAffineSymbolExpr(sym, mlir_context));
}
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(0, output_rank, inits_exprs,
mlir_context),
{}, output_shape.dimensions());
for (int64_t id = 0; id < arity; ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
const Shape& input_shape = reduce->operand(input_id)->shape();
std::vector<AffineExpr> inputs_exprs;
inputs_exprs.reserve(output_rank);
for (auto [input_dim_id, input_dim] :
llvm::enumerate(input_shape.dimensions())) {
if (!absl::c_linear_search(reduce->dimensions(), input_dim_id)) {
inputs_exprs.push_back(getAffineDimExpr(input_dim_id, mlir_context));
}
}
IndexingMap inputs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(input_shape.rank(), 0, inputs_exprs,
mlir_context),
input_shape.dimensions(), {});
for (int64_t id = 0; id < arity; ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing_map);
}
return instr_indexing;
}
IndexingMap ComposeIndexingMapsForWindow(
absl::Span<const int64_t> input_dimensions,
absl::Span<const int64_t> output_dimensions, const Window& window,
MLIRContext* mlir_context) {
size_t rank = input_dimensions.size();
SmallVector<int64_t> padding_low, padding_high, padding_interior,
padded_input_dimensions;
padding_low.reserve(rank);
padding_high.reserve(rank);
padding_interior.reserve(rank);
padded_input_dimensions.reserve(rank);
SmallVector<AffineExpr, 4> exprs;
std::vector<IndexingMap::Variable> dim_vars;
std::vector<IndexingMap::Variable> range_vars;
exprs.reserve(rank);
dim_vars.reserve(rank);
range_vars.reserve(rank);
for (const auto& [dim_id, window_config] :
llvm::enumerate(window.dimensions())) {
padding_low.push_back(window_config.padding_low());
padding_high.push_back(window_config.padding_high());
padding_interior.push_back(window_config.base_dilation() - 1);
padded_input_dimensions.push_back(
input_dimensions[dim_id] + window_config.padding_low() +
window_config.padding_high() +
(input_dimensions[dim_id] - 1) * (window_config.base_dilation() - 1));
AffineExpr dim_expr = getAffineDimExpr(dim_id, mlir_context);
AffineExpr symbol_expr = getAffineSymbolExpr(dim_id, mlir_context);
exprs.push_back(symbol_expr * window_config.window_dilation() +
window_config.stride() * dim_expr);
dim_vars.push_back(
{IndexingMap::Variable{0, output_dimensions[dim_id] - 1}});
range_vars.push_back({IndexingMap::Variable{0, window_config.size() - 1}});
}
IndexingMap padded_input_indexing = ComputeOutputToInputPadOpIndexingImpl(
padded_input_dimensions, padding_low, padding_high, padding_interior,
mlir_context);
IndexingMap input_indexing_no_padding(
AffineMap::get(rank, rank, exprs, mlir_context), dim_vars, range_vars,
{});
IndexingMap result =
ComposeIndexingMaps(input_indexing_no_padding, padded_input_indexing);
result.Simplify();
result.RemoveUnusedSymbols();
return result;
}
HloInstructionIndexing ComputeOutputToInputReduceWindowOpIndexing(
const HloReduceWindowInstruction* reduce_window, int output_id,
MLIRContext* mlir_context) {
const Shape& input_shape = reduce_window->operand(0)->shape();
const Shape& output_shape = GetOutputShape(reduce_window, 0);
IndexingMap inputs_indexing = ComposeIndexingMapsForWindow(
input_shape.dimensions(), output_shape.dimensions(),
reduce_window->window(), mlir_context);
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(reduce_window->operand_count());
for (int64_t id = 0; id < reduce_window->input_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing);
}
for (int64_t id = reduce_window->input_count();
id < reduce_window->operand_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
HloInstructionIndexing ComputeOutputToInputConvolutionOpIndexing(
const HloConvolutionInstruction* convolution, MLIRContext* mlir_context) {
const Shape& input_shape = convolution->operand(0)->shape();
const Shape& kernel_shape = convolution->operand(1)->shape();
const Shape& output_shape = convolution->shape();
const ConvolutionDimensionNumbers& dnums =
convolution->convolution_dimension_numbers();
size_t rank = output_shape.rank();
size_t spatial_rank = rank - 2;
std::vector<int64_t> input_spatial_sizes(spatial_rank);
std::vector<int64_t> kernel_spatial_sizes(spatial_rank);
std::vector<int64_t> output_spatial_sizes(spatial_rank);
for (int i = 0; i < spatial_rank; ++i) {
input_spatial_sizes[i] =
input_shape.dimensions(dnums.input_spatial_dimensions(i));
kernel_spatial_sizes[i] =
kernel_shape.dimensions(dnums.kernel_spatial_dimensions(i));
output_spatial_sizes[i] =
output_shape.dimensions(dnums.output_spatial_dimensions(i));
}
IndexingMap input_spatial_indexing =
ComposeIndexingMapsForWindow(input_spatial_sizes, output_spatial_sizes,
convolution->window(), mlir_context);
std::vector<AffineExpr> replacement_dims(spatial_rank);
for (int i = 0; i < spatial_rank; ++i) {
replacement_dims[i] =
getAffineDimExpr(dnums.output_spatial_dimensions(i), mlir_context);
}
std::vector<AffineExpr> input_exprs(rank);
for (int i = 0; i < spatial_rank; ++i) {
input_exprs[dnums.input_spatial_dimensions(i)] =
input_spatial_indexing.GetAffineMap().getResult(i).replaceDims(
replacement_dims);
}
llvm::DenseMap<AffineExpr, Interval> input_constraints;
for (const auto& [key, val] : input_spatial_indexing.GetConstraints()) {
input_constraints[key.replaceDims(replacement_dims)] = val;
}
std::vector<AffineExpr> kernel_exprs(rank);
for (int i = 0; i < spatial_rank; ++i) {
kernel_exprs[dnums.kernel_spatial_dimensions(i)] =
getAffineSymbolExpr(i, mlir_context);
}
AffineExpr dim_expr =
getAffineDimExpr(dnums.output_feature_dimension(), mlir_context);
kernel_exprs[dnums.kernel_output_feature_dimension()] = dim_expr;
std::vector<IndexingMap::Variable> input_symbols =
input_spatial_indexing.GetRangeVars();
std::vector<IndexingMap::Variable> kernel_symbols =
RangeVarsFromTensorSizes(kernel_spatial_sizes);
input_exprs[dnums.input_feature_dimension()] =
getAffineSymbolExpr(input_symbols.size(), mlir_context);
kernel_exprs[dnums.kernel_input_feature_dimension()] =
getAffineSymbolExpr(kernel_symbols.size(), mlir_context);
int64_t input_group_size =
kernel_shape.dimensions(dnums.kernel_input_feature_dimension());
Interval input_feature_range{0, input_group_size - 1};
input_symbols.push_back(IndexingMap::Variable{input_feature_range});
kernel_symbols.push_back(IndexingMap::Variable{input_feature_range});
if (convolution->feature_group_count() > 1) {
AffineExpr& input_feature = input_exprs[dnums.input_feature_dimension()];
int64_t output_group_size =
output_shape.dimensions(dnums.output_feature_dimension());
int64_t feature_group_size =
output_group_size / convolution->feature_group_count();
input_feature = dim_expr.floorDiv(feature_group_size) * input_group_size +
input_feature;
}
AffineExpr batch_dim_expr =
getAffineDimExpr(dnums.output_batch_dimension(), mlir_context);
if (convolution->batch_group_count() > 1) {
int64_t batch_group_size =
output_shape.dimensions(dnums.output_batch_dimension());
AffineExpr batch_group_expr =
getAffineSymbolExpr(input_symbols.size(), mlir_context);
input_symbols.push_back(
IndexingMap::Variable{{0, convolution->batch_group_count() - 1}});
input_exprs[dnums.input_batch_dimension()] =
batch_group_expr * batch_group_size + batch_dim_expr;
} else {
input_exprs[dnums.input_batch_dimension()] = batch_dim_expr;
}
IndexingMap inputs_indexing(
AffineMap::get(rank, input_symbols.size(), input_exprs, mlir_context),
DimVarsFromTensorSizes(output_shape.dimensions()), input_symbols,
{}, input_constraints);
IndexingMap kernel_indexing(
AffineMap::get(rank, kernel_symbols.size(), kernel_exprs, mlir_context),
DimVarsFromTensorSizes(output_shape.dimensions()), kernel_symbols,
{});
return HloInstructionIndexing::FromIndexingMaps(
{inputs_indexing, kernel_indexing});
}
std::vector<int64_t> ComputeStrides(absl::Span<const int64_t> dims) {
int rank = static_cast<int>(dims.size());
std::vector<int64_t> strides(rank, 1);
for (int i = rank - 2; i >= 0; --i) {
strides[i] = dims[i + 1] * strides[i + 1];
}
return strides;
}
}
AffineExpr LinearizeShape(absl::Span<const int64_t> dims,
absl::Span<const AffineExpr> dimension_exprs,
MLIRContext* mlir_context) {
AffineExpr linear_index = getAffineConstantExpr(0, mlir_context);
auto strides = ComputeStrides(dims);
for (auto [stride, dimension_expr] : llvm::zip(strides, dimension_exprs)) {
linear_index = linear_index + dimension_expr * stride;
}
return linear_index;
}
std::vector<AffineExpr> DelinearizeIndex(absl::Span<const int64_t> dims,
AffineExpr linear_index,
MLIRContext* mlir_context) {
std::vector<AffineExpr> multi_index;
multi_index.reserve(dims.size());
AffineExpr remainder = linear_index;
for (int64_t stride : ComputeStrides(dims)) {
multi_index.push_back(remainder.floorDiv(stride));
remainder = remainder % stride;
}
return multi_index;
}
namespace {
void ComputeMinimalReshapeIndexing(
absl::Span<const int64_t> input_dims, absl::Span<const int64_t> output_dims,
absl::Span<const AffineExpr> output_dims_exprs,
std::vector<AffineExpr>* exprs, MLIRContext* mlir_context) {
if (input_dims.size() == 1 && output_dims.size() == 1) {
absl::c_copy(output_dims_exprs, std::back_inserter(*exprs));
return;
}
if (input_dims.size() == 1) {
exprs->push_back(
LinearizeShape(output_dims, output_dims_exprs, mlir_context));
return;
}
if (output_dims.size() == 1) {
auto multi_index =
DelinearizeIndex(input_dims, output_dims_exprs.front(), mlir_context);
absl::c_copy(multi_index, std::back_inserter(*exprs));
return;
}
AffineExpr linear_index =
LinearizeShape(output_dims, output_dims_exprs, mlir_context);
auto multi_index = DelinearizeIndex(input_dims, linear_index, mlir_context);
absl::c_copy(multi_index, std::back_inserter(*exprs));
}
AffineMap ComputeReshapeIndexingMap(const Shape& input, const Shape& output,
MLIRContext* mlir_context) {
absl::Span<const int64_t> input_dims = input.dimensions();
absl::Span<const int64_t> output_dims = output.dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(input.rank());
if (ShapeUtil::ElementsIn(input) == 0) {
for (int i = 0; i < input.rank(); ++i) {
exprs.push_back(getAffineConstantExpr(0, mlir_context));
}
return AffineMap::get(output_dims.size(), 0, exprs,
mlir_context);
}
std::vector<AffineExpr> output_dims_exprs;
int64_t input_num_elements = 1;
int64_t output_num_elements = 1;
std::vector<int64_t> input_subshape, output_subshape;
size_t input_dim_id = 0, output_dim_id = 0;
while (input_dim_id < input.rank() || output_dim_id < output.rank() ||
!input_subshape.empty()) {
if (input_dim_id < input.rank() &&
(input_subshape.empty() || input_num_elements < output_num_elements ||
input_dims[input_dim_id] == 1)) {
input_num_elements *= input_dims[input_dim_id];
input_subshape.push_back(input_dims[input_dim_id]);
++input_dim_id;
continue;
}
if (output_dim_id < output.rank() &&
(output_subshape.empty() || output_num_elements < input_num_elements ||
output_dims[output_dim_id] == 1)) {
output_num_elements *= output_dims[output_dim_id];
output_subshape.push_back(output_dims[output_dim_id]);
output_dims_exprs.push_back(
getAffineDimExpr(output_dim_id, mlir_context));
++output_dim_id;
continue;
}
ComputeMinimalReshapeIndexing(input_subshape, output_subshape,
output_dims_exprs, &exprs, mlir_context);
input_num_elements = 1;
output_num_elements = 1;
input_subshape.clear();
output_subshape.clear();
output_dims_exprs.clear();
}
return AffineMap::get(output_dims.size(), 0, exprs,
mlir_context);
};
HloInstructionIndexing ComputeOutputToInputReshapeOpIndexing(
const HloReshapeInstruction* reshape, MLIRContext* mlir_context) {
const auto& input = reshape->operand(0)->shape();
const auto& output = reshape->shape();
IndexingMap reshape_indexing_map = IndexingMap::FromTensorSizes(
ComputeReshapeIndexingMap(input, output, mlir_context),
output.dimensions(), {});
reshape_indexing_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({reshape_indexing_map});
}
HloInstructionIndexing ComputeInputToOutputReshapeOpIndexing(
const HloReshapeInstruction* reshape, MLIRContext* mlir_context) {
const auto& input = reshape->operand(0)->shape();
const auto& output = reshape->shape();
IndexingMap reshape_indexing_map = IndexingMap::FromTensorSizes(
ComputeReshapeIndexingMap(output, input, mlir_context),
input.dimensions(), {});
reshape_indexing_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({reshape_indexing_map});
}
HloInstructionIndexing ComputeReverseOpIndexing(
const HloReverseInstruction* reverse, MLIRContext* mlir_context) {
absl::flat_hash_set<int64_t> reverse_dims(reverse->dimensions().begin(),
reverse->dimensions().end());
auto output_dims = reverse->shape().dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(output_dims.size());
for (auto [output_dim_id, output_dim] : llvm::enumerate(output_dims)) {
auto dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
if (!reverse_dims.contains(output_dim_id)) {
exprs.push_back(dim_expr);
continue;
}
exprs.push_back(-dim_expr + output_dim - 1);
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_dims.size(), 0, exprs,
mlir_context),
output_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputSliceOpIndexing(
const HloSliceInstruction* slice, MLIRContext* mlir_context) {
auto output_rank = slice->shape().rank();
std::vector<AffineExpr> exprs;
exprs.reserve(output_rank);
for (int64_t dim = 0; dim < output_rank; ++dim) {
AffineExpr dim_expr = getAffineDimExpr(dim, mlir_context);
exprs.push_back(dim_expr * slice->slice_strides()[dim] +
slice->slice_starts()[dim]);
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_rank, 0, exprs, mlir_context),
slice->shape().dimensions(), {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeInputToOutputSliceOpIndexing(
const HloSliceInstruction* slice, MLIRContext* mlir_context) {
auto output_rank = slice->shape().rank();
std::vector<AffineExpr> exprs;
exprs.reserve(output_rank);
for (int64_t dim = 0; dim < output_rank; ++dim) {
AffineExpr dim_expr = getAffineDimExpr(dim, mlir_context);
exprs.push_back((dim_expr - slice->slice_starts()[dim])
.floorDiv(slice->slice_strides()[dim]));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_rank, 0, exprs, mlir_context),
slice->operand(0)->shape().dimensions(), {});
for (int64_t dim = 0; dim < output_rank; ++dim) {
AffineExpr dim_expr = getAffineDimExpr(dim, mlir_context);
int64_t lb = slice->slice_starts()[dim];
int64_t ub =
(slice->shape().dimensions(dim) - 1) * slice->slice_strides()[dim] +
slice->slice_starts()[dim];
indexing_map.AddConstraint(dim_expr, {lb, ub});
indexing_map.AddConstraint((dim_expr - lb) % slice->slice_strides()[dim],
{0, 0});
}
return HloInstructionIndexing::FromIndexingMaps({std::move(indexing_map)});
}
AffineMap ComputeTransposeIndexingMap(absl::Span<const int64_t> permutation,
MLIRContext* mlir_context) {
return AffineMap::getPermutationMap(
std::vector<unsigned>(permutation.begin(), permutation.end()),
mlir_context);
}
HloInstructionIndexing ComputeOutputToInputTransposeOpIndexing(
const HloTransposeInstruction* transpose, MLIRContext* mlir_context) {
AffineMap inverse_permutation = ComputeTransposeIndexingMap(
InversePermutation(transpose->dimensions()), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({IndexingMap::FromTensorSizes(
inverse_permutation, transpose->shape().dimensions(), {})});
}
HloInstructionIndexing ComputeInputToOutputTransposeOpIndexing(
const HloTransposeInstruction* transpose, MLIRContext* mlir_context) {
AffineMap forward_permutation =
ComputeTransposeIndexingMap(transpose->dimensions(), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({IndexingMap::FromTensorSizes(
forward_permutation, transpose->operand(0)->shape().dimensions(), {})});
}
}
IndexingMap GetBitcastMap(absl::Span<const int64_t> input_shape,
const Shape& output_shape,
mlir::MLIRContext* mlir_context) {
return GetBitcastMap(ShapeUtil::MakeShapeWithDescendingLayout(
output_shape.element_type(), input_shape),
output_shape, mlir_context);
}
IndexingMap GetBitcastMap(absl::Span<const int64_t> input_shape,
absl::Span<const int64_t> output_shape,
mlir::MLIRContext* mlir_context) {
return GetBitcastMap(
ShapeUtil::MakeShapeWithDescendingLayout(PrimitiveType::S8, input_shape),
ShapeUtil::MakeShapeWithDescendingLayout(PrimitiveType::S8, output_shape),
mlir_context);
}
IndexingMap GetBitcastMap(const Shape& input_shape, const Shape& output_shape,
MLIRContext* mlir_context) {
ShapeUtil::BitcastDecomposition decomposed_bitcast =
ShapeUtil::DecomposeBitcast(input_shape, output_shape);
if (std::holds_alternative<ShapeUtil::BitcastDecompositionTranspose>(
decomposed_bitcast)) {
auto permutation = ShapeUtil::DeduceTransposeDimensionsForBitcast(
input_shape, output_shape);
CHECK(permutation.has_value())
<< "Failed to deduce permutation for a bitcast.";
return IndexingMap::FromTensorSizes(
ComputeTransposeIndexingMap(permutation.value(), mlir_context),
input_shape.dimensions(), {});
}
if (std::holds_alternative<ShapeUtil::BitcastDecompositionReshape>(
decomposed_bitcast)) {
return IndexingMap::FromTensorSizes(
ComputeReshapeIndexingMap(output_shape, input_shape, mlir_context),
input_shape.dimensions(), {});
}
auto trt = std::get<ShapeUtil::BitcastDecompositionTrt>(decomposed_bitcast);
auto transpose_map_1 =
ComputeTransposeIndexingMap(trt.transpose1_dims, mlir_context);
auto reshape_map = ComputeReshapeIndexingMap(
trt.reshape_shape, trt.transpose1_shape, mlir_context);
auto transpose_map_2 =
ComputeTransposeIndexingMap(trt.transpose2_dims, mlir_context);
auto bitcast_map =
transpose_map_2.compose(reshape_map).compose(transpose_map_1);
return IndexingMap::FromTensorSizes(bitcast_map, input_shape.dimensions(),
{});
}
namespace {
HloInstructionIndexing ComputeOutputToInputBitcastOpIndexing(
const HloInstruction* bitcast, MLIRContext* mlir_context) {
auto bitcast_map = GetBitcastMap(bitcast->shape(),
bitcast->operand(0)->shape(), mlir_context);
bitcast_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({bitcast_map});
}
HloInstructionIndexing ComputeInputToOutputBitcastOpIndexing(
const HloInstruction* bitcast, MLIRContext* mlir_context) {
auto bitcast_map = GetBitcastMap(bitcast->operand(0)->shape(),
bitcast->shape(), mlir_context);
bitcast_map.Simplify();
return HloInstructionIndexing::FromIndexingMaps({bitcast_map});
}
std::vector<int64_t> ToTransposeDimensions(const Layout& l) {
std::vector<int64_t> out(l.minor_to_major().begin(),
l.minor_to_major().end());
absl::c_reverse(out);
return out;
}
}
IndexingMap CreateIdentityMap(absl::Span<const int64_t> dimensions,
mlir::MLIRContext* mlir_context) {
return IndexingMap::FromTensorSizes(
AffineMap::getMultiDimIdentityMap(dimensions.size(), mlir_context),
dimensions, {});
}
IndexingMap CreateIdentityMap(const Shape& shape, MLIRContext* mlir_context) {
if (shape.IsTuple()) {
return CreateIdentityMap(shape.tuple_shapes(0), mlir_context);
}
return CreateIdentityMap(shape.dimensions(), mlir_context);
}
llvm::SmallVector<AffineExpr, 4> DelinearizeInBoundsIndex(
AffineExpr linear, absl::Span<const int64_t> sizes) {
llvm::SmallVector<AffineExpr, 4> result;
result.reserve(sizes.size());
if (absl::c_linear_search(sizes, 0)) {
for (int dim = 0; dim < sizes.size(); ++dim) {
result.push_back(mlir::getAffineConstantExpr(0, linear.getContext()));
}
return result;
}
auto strides = ComputeStrides(sizes);
for (auto [size, stride] : llvm::zip(sizes, strides)) {
result.push_back(linear.floorDiv(stride) % size);
}
for (int dim = 0; dim < sizes.size(); ++dim) {
if (sizes[dim] > 1) {
result[dim] = linear.floorDiv(strides[dim]);
break;
}
}
return result;
}
IndexingMap GetIndexingMapFromPhysicalLayoutToLogical(
const Shape& shape, MLIRContext* mlir_context) {
if (shape.rank() == 0) {
return IndexingMap(AffineMap::get(mlir_context),
{}, {}, {});
}
return IndexingMap::FromTensorSizes(
ComputeTransposeIndexingMap(
InversePermutation(ToTransposeDimensions(shape.layout())),
mlir_context),
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(shape)
.dimensions(),
{});
}
IndexingMap GetIndexingMapFromLogicalToPhysicalLayout(
const Shape& shape, MLIRContext* mlir_context) {
if (shape.rank() == 0) {
return IndexingMap(AffineMap::get(mlir_context),
{}, {}, {});
}
return IndexingMap::FromTensorSizes(
ComputeTransposeIndexingMap(ToTransposeDimensions(shape.layout()),
mlir_context),
shape.dimensions(), {});
}
bool HloInstructionIndexing::Simplify() {
bool any_simplified = false;
for (auto& operand_indexing : indexing_maps) {
std::vector<IndexingMap> to_remove, to_add;
for (IndexingMap map : operand_indexing) {
to_remove.push_back(map);
if (map.IsUndefined()) {
to_add.push_back(map);
} else if (map.Simplify()) {
map.RemoveUnusedSymbols();
} else {
to_remove.pop_back();
}
}
for (auto& map : to_remove) {
operand_indexing.erase(map);
}
for (auto& map : to_add) {
operand_indexing.insert(map);
}
any_simplified |= !to_remove.empty();
}
return any_simplified;
}
HloInstructionIndexing HloInstructionIndexing::FromIndexingMaps(
absl::Span<const IndexingMap> indexing_maps) {
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(indexing_maps.size());
for (const auto& [index, map] : llvm::enumerate(indexing_maps)) {
instr_indexing.indexing_maps[index].insert(map);
}
return instr_indexing;
}
std::string HloInstructionIndexing::ToString() const {
std::stringstream ss;
ss << *this;
return ss.str();
}
std::ostream& operator<<(std::ostream& out,
const HloInstructionIndexing& instr_indexing) {
for (const auto& [operand_id, indexing_maps] :
llvm::enumerate(instr_indexing.indexing_maps)) {
out << "operand id = " << operand_id << ' ';
for (const auto& indexing_map : indexing_maps) {
if (indexing_map.IsUndefined()) {
out << "unknown indexing";
continue;
}
out << indexing_map;
}
}
return out;
}
const Shape& GetOutputShape(const HloInstruction* instr, int64_t output_id) {
return instr->shape().IsTuple()
? ShapeUtil::GetSubshape(instr->shape(), {output_id})
: instr->shape();
}
GroupedByOpIndexingMap GroupIndexingMapsByProducers(
const HloInstructionIndexing& indexing, const HloInstruction* instr) {
GroupedByOpIndexingMap result;
for (const auto& [operand_id, indexing_maps] :
llvm::enumerate(indexing.indexing_maps)) {
result[instr->operand(operand_id)].insert(indexing_maps.begin(),
indexing_maps.end());
}
return result;
}
GroupedByOpIndexingMap ComputeGroupedOutputToInputIndexing(
const HloFusionAdaptor& fusion_adaptor, HloInstructionAdaptor target_instr,
MLIRContext* ctx) {
auto initial_map = CreateIdentityMap(target_instr.instruction().shape(), ctx);
GroupedByOpIndexingMap grouped_indexing_maps;
if (fusion_adaptor.ContainsInstruction(target_instr)) {
if (auto parameter_instr =
DynCast<HloParameterInstruction>(&target_instr.instruction())) {
auto fusion_instr = parameter_instr->parent()->FusionInstruction();
auto fusion_operand =
fusion_instr->operand(parameter_instr->parameter_number());
grouped_indexing_maps[fusion_operand] = {initial_map};
return grouped_indexing_maps;
}
}
grouped_indexing_maps[&target_instr.instruction()].insert(initial_map);
auto post_order = fusion_adaptor.MakeInstructionPostOrder();
auto it = std::find(post_order.rbegin(), post_order.rend(), target_instr);
for (; it != post_order.rend(); ++it) {
auto producer_indexing = ComputeOutputToInputIndexing(&it->instruction(),
0, ctx);
auto consumer_indexing_maps =
grouped_indexing_maps.find(&it->instruction());
if (consumer_indexing_maps == grouped_indexing_maps.end()) {
continue;
}
IndexingMapSet consumer_indexing_maps_copy = consumer_indexing_maps->second;
for (const auto& [producer_operand_id, producer_operand_indexing] :
llvm::enumerate(producer_indexing.indexing_maps)) {
auto producer_operand_adaptor = it->GetOperand(producer_operand_id);
for (const IndexingMap& producer_map : producer_operand_indexing) {
for (const IndexingMap& consumer_map : consumer_indexing_maps_copy) {
auto composed_map = ComposeIndexingMaps(consumer_map, producer_map);
composed_map.Simplify();
composed_map.RemoveUnusedSymbols();
grouped_indexing_maps[&producer_operand_adaptor.instruction()].insert(
composed_map);
}
}
}
}
return grouped_indexing_maps;
}
bool FuseProducerConsumerOutputToInputIndexing(
const HloInstruction* producer_instr,
absl::flat_hash_map<const HloInstruction*, IndexingMapSet>*
consumer_indexing,
MLIRContext* mlir_context) {
auto producer_indexing = ComputeOutputToInputIndexing(
producer_instr, 0, mlir_context);
auto consumer_indexing_maps = (*consumer_indexing)[producer_instr];
for (const auto& [producer_operand_id, producer_operand_indexing] :
llvm::enumerate(producer_indexing.indexing_maps)) {
const HloInstruction* producer_operand_instr =
producer_instr->operand(producer_operand_id);
for (const IndexingMap& producer_map : producer_operand_indexing) {
for (const IndexingMap& consumer_map : consumer_indexing_maps) {
(*consumer_indexing)[producer_operand_instr].insert(
ComposeIndexingMaps(producer_map, consumer_map));
}
}
}
consumer_indexing->erase(producer_instr);
return true;
}
HloInstructionIndexing ComputeOutputToInputIndexing(const HloInstruction* instr,
int output_id,
MLIRContext* ctx) {
if (HloInstruction::IsOpElementwise(instr->opcode()) ||
instr->opcode() == HloOpcode::kMap) {
return ComputeOutputToInputCwiseOpIndexing(instr, ctx);
}
if (instr->opcode() == HloOpcode::kBitcast) {
return ComputeOutputToInputBitcastOpIndexing(instr, ctx);
}
if (auto broadcast = DynCast<HloBroadcastInstruction>(instr)) {
return ComputeOutputToInputBroadcastOpIndexing(broadcast, ctx);
}
if (auto concat = DynCast<HloConcatenateInstruction>(instr)) {
return ComputeOutputToInputConcatenateOpIndexing(concat, ctx);
}
if (auto constant = DynCast<HloConstantInstruction>(instr)) {
return HloInstructionIndexing{};
}
if (auto dot = DynCast<HloDotInstruction>(instr)) {
return ComputeOutputToInputDotOpIndexing(dot, ctx);
}
if (auto dynamic_slice = DynCast<HloDynamicSliceInstruction>(instr)) {
return ComputeOutputToInputDynamicSliceOpIndexing(dynamic_slice, ctx);
}
if (auto dus = DynCast<HloDynamicUpdateSliceInstruction>(instr)) {
return ComputeOutputToInputDynamicUpdateSliceOpIndexing(dus, ctx);
}
if (auto fusion = DynCast<HloFusionInstruction>(instr)) {
return ComputeOutputToInputFusionOpIndexing(fusion, output_id, ctx);
}
if (auto gather = DynCast<HloGatherInstruction>(instr)) {
return ComputeOutputToInputGatherOpIndexing(gather, ctx);
}
if (auto iota = DynCast<HloIotaInstruction>(instr)) {
return HloInstructionIndexing{};
}
if (auto pad = DynCast<HloPadInstruction>(instr)) {
return ComputeOutputToInputPadOpIndexing(pad, ctx);
}
if (auto reduce = DynCast<HloReduceInstruction>(instr)) {
return ComputeOutputToInputReduceOpIndexing(reduce, output_id, ctx);
}
if (auto reduce_window = DynCast<HloReduceWindowInstruction>(instr)) {
return ComputeOutputToInputReduceWindowOpIndexing(reduce_window, output_id,
ctx);
}
if (auto convolution = DynCast<HloConvolutionInstruction>(instr)) {
return ComputeOutputToInputConvolutionOpIndexing(convolution, ctx);
}
if (auto reshape = DynCast<HloReshapeInstruction>(instr)) {
return ComputeOutputToInputReshapeOpIndexing(reshape, ctx);
}
if (auto reverse = DynCast<HloReverseInstruction>(instr)) {
return ComputeReverseOpIndexing(reverse, ctx);
}
if (auto slice = DynCast<HloSliceInstruction>(instr)) {
return ComputeOutputToInputSliceOpIndexing(slice, ctx);
}
if (auto transpose = DynCast<HloTransposeInstruction>(instr)) {
return ComputeOutputToInputTransposeOpIndexing(transpose, ctx);
}
return CreateUnknownIndexing(instr->operand_count());
}
HloInstructionIndexing ComputeInputToOutputIndexing(const HloInstruction* instr,
int input_id,
MLIRContext* ctx) {
if (HloInstruction::IsOpElementwise(instr->opcode()) ||
instr->opcode() == HloOpcode::kMap) {
return ComputeInputToOutputCwiseOpIndexing(instr, ctx);
}
if (instr->opcode() == HloOpcode::kBitcast) {
return ComputeInputToOutputBitcastOpIndexing(instr, ctx);
}
if (auto broadcast = DynCast<HloBroadcastInstruction>(instr)) {
return ComputeInputToOutputBroadcastOpIndexing(broadcast, ctx);
}
if (auto concat = DynCast<HloConcatenateInstruction>(instr)) {
return ComputeInputToOutputConcatenateOpIndexing(concat, input_id, ctx);
}
if (auto reduce = DynCast<HloReduceInstruction>(instr)) {
return ComputeInputToOutputReduceOpIndexing(reduce, input_id, ctx);
}
if (auto reshape = DynCast<HloReshapeInstruction>(instr)) {
return ComputeInputToOutputReshapeOpIndexing(reshape, ctx);
}
if (auto reverse = DynCast<HloReverseInstruction>(instr)) {
return ComputeReverseOpIndexing(reverse, ctx);
}
if (auto transpose = DynCast<HloTransposeInstruction>(instr)) {
return ComputeInputToOutputTransposeOpIndexing(transpose, ctx);
}
if (auto slice = DynCast<HloSliceInstruction>(instr)) {
return ComputeInputToOutputSliceOpIndexing(slice, ctx);
}
if (instr->opcode() == HloOpcode::kTuple) {
return HloInstructionIndexing::FromIndexingMaps(
{CreateIdentityMap(instr->shape().tuple_shapes(input_id), ctx)});
}
int64_t num_results =
instr->shape().IsTuple() ? instr->shape().tuple_shapes_size() : 1;
return CreateUnknownIndexing(num_results);
}
IndexingMap ComputeEpilogueInputToOutputIndexing(
HloInstructionAdaptor epilogue_parent, HloInstructionAdaptor epilogue_root,
MLIRContext* mlir_context) {
auto chain = HloFindUseChain(epilogue_parent, epilogue_root);
CHECK(!chain.empty()) << "There is no use chain from parent to root";
auto root_indexing = CreateIdentityMap(epilogue_parent.shape(), mlir_context);
for (int i = 1; i < chain.size(); ++i) {
const auto& producer = chain[i - 1].instruction();
const auto& user = chain[i].instruction();
auto user_indexing = ComputeInputToOutputIndexing(
&user, user.operand_index(&producer), mlir_context);
root_indexing = root_indexing * *user_indexing.indexing_maps[0].begin();
root_indexing.Simplify();
root_indexing.RemoveUnusedSymbols();
}
return root_indexing;
}
}
} | #include "xla/service/gpu/model/indexing_analysis.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_map_serialization.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
MATCHER_P2(MatchInstrIndexing, operand_id, indexing_map_matchers, "") {
return ExplainMatchResult(Eq(operand_id), arg.operand_id, result_listener) &&
ExplainMatchResult(indexing_map_matchers, arg.indexing_maps,
result_listener);
}
using IndexingAnalysisTest = IndexingTestBase;
TEST_F(IndexingAnalysisTest, FuseProducerConsumerOutputToInputIndexing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
)");
const HloInstruction* parameter = root->operand(0);
const HloInstruction* transpose = root->operand(1);
auto root_indexing = GetOutputToInputIndexing(root);
auto grouped_by_key = GroupIndexingMapsByProducers(root_indexing, root);
EXPECT_THAT(
grouped_by_key,
UnorderedElementsAre(Pair(parameter, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))),
Pair(transpose, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)")))));
}
TEST_F(IndexingAnalysisTest, ComputeGroupedOutputToInputIndexing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
)");
const HloInstruction* parameter = root->operand(0);
const HloInstruction* transpose = root->operand(1);
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(transpose, root);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_context_);
EXPECT_THAT(grouped_indexing,
UnorderedElementsAre(
Pair(root, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))),
Pair(transpose, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))),
Pair(parameter, UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"),
MatchIndexingMap(R"(
(d0, d1) -> (d1, d0),
domain:
d0 in [0, 999],
d1 in [0, 999]
)")))));
}
TEST_F(IndexingAnalysisTest,
ComputeGroupedOutputToInputIndexing_VariadicReduce) {
auto root = ParseAndGetRoot(R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0})
reduce(param_0.3, param_1.3, param_2.2, constant),
dimensions={1}, to_apply=add
}
)");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_context_);
EXPECT_THAT(grouped_indexing,
UnorderedElementsAre(
Pair(root, ElementsAre(MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 31]
)"))),
Pair(root->operand(0), ElementsAre(MatchIndexingMap(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 31],
s0 in [0, 39]
)"))),
Pair(root->operand(1), ElementsAre(MatchIndexingMap(R"(
(d0)[s0] -> (d0, s0),
domain:
d0 in [0, 31],
s0 in [0, 39]
)"))),
Pair(root->operand(2), ElementsAre(MatchIndexingMap(R"(
(d0) -> (),
domain:
d0 in [0, 31]
)"))),
Pair(root->operand(3), ElementsAre(MatchIndexingMap(R"(
(d0) -> (),
domain:
d0 in [0, 31]
)")))));
}
TEST_F(IndexingAnalysisTest, ComputeGroupedOutputToInputIndexing_SingleOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
p1 = f32[1000, 1000] parameter(1)
exp0 = f32[1000, 1000] exponential(p1)
ROOT a0 = f32[1000, 1000] add(p0, exp0)
}
)");
HloComputation* entry_computation = root->parent();
const HloInstruction* exponential =
entry_computation->GetInstructionWithName("exp0");
const HloInstruction* parameter =
entry_computation->GetInstructionWithName("p1");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(exponential);
HloInstructionAdaptor parameter_adaptor =
fusion_adaptor->GetRoots()[0].GetOperand(0);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, parameter_adaptor, &mlir_context_);
EXPECT_THAT(grouped_indexing, UnorderedElementsAre(Pair(
parameter, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)")))));
}
TEST_F(IndexingAnalysisTest,
ComputeGroupedOutputToInputIndexing_StartNotAtRoot) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] parameter(1)
p0_bcast = f32[15, 32, 20, 64] broadcast(p0), dimensions={0, 2}
ROOT reduce_2 = f32[15, 64] reduce(p0_bcast, p0_init),
dimensions={1, 2}, to_apply=max
}
ENTRY e {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[15, 64] fusion(p0, p0_init), kind=kLoop, calls=f
}
)");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto root_adaptor = fusion_adaptor->GetRoots()[0];
auto bcast = root_adaptor.GetOperand(0);
auto parameter_0 = bcast.GetOperand(0);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, bcast, &mlir_context_);
EXPECT_THAT(
grouped_indexing,
UnorderedElementsAre(
Pair(&bcast.instruction(), ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d1, d2, d3),
domain:
d0 in [0, 14],
d1 in [0, 31],
d2 in [0, 19],
d3 in [0, 63]
)"))),
Pair(¶meter_0.instruction(), ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d2),
domain:
d0 in [0, 14],
d1 in [0, 31],
d2 in [0, 19],
d3 in [0, 63]
)")))));
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestOutputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30] parameter(0)
ROOT add0 = f32[10, 20, 30]{1, 0, 2} exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1, d2, d0),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d2, d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19],
d2 in [0, 29]
)"));
}
TEST_F(IndexingAnalysisTest, CopyNothing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[0, 0]{0,1} parameter(0)
ROOT copy0 = f32[0, 0]{1,0} copy(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0);
input_indexing.Simplify();
EXPECT_THAT(input_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
auto output_indexing = GetInputToOutputIndexing(root, 0);
output_indexing.Simplify();
EXPECT_THAT(output_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
}
TEST_F(IndexingAnalysisTest, ReshapeNothing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,0,0] parameter(0)
ROOT reshape = f32[0] reshape(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0);
input_indexing.Simplify();
EXPECT_THAT(input_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
auto output_indexing = GetInputToOutputIndexing(root, 0);
output_indexing.Simplify();
EXPECT_THAT(output_indexing.ToString(),
MatchIndexingString("operand id = 0 KNOWN EMPTY"));
EXPECT_EQ(
output_indexing.indexing_maps[0].begin()->GetAffineMap().getNumResults(),
1);
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestInputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30]{1, 0, 2} parameter(0)
ROOT add0 = f32[10, 20, 30] exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d2, d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19],
d2 in [0, 29]
)"));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1, d2, d0),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestInputAndOutputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30]{1, 0, 2} parameter(0)
ROOT add0 = f32[10, 20, 30]{1, 0, 2} exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 29],
d1 in [0, 9],
d2 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, ElementwiseOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20] parameter(0)
p1 = f32[10, 20] parameter(1)
ROOT add0 = f32[10, 20] add(p0, p1)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
operand id = 1
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, Map) {
auto root = ParseAndGetRoot(R"(
HloModule m
mapper {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY e {
p0 = f32[10, 20] parameter(0)
p1 = f32[10, 20] parameter(1)
ROOT add0 = f32[10, 20] map(%p0, %p1), dimensions={}, to_apply=mapper
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
operand id = 1
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, BitcastIsReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 32] parameter(0)
ROOT bitcast = f32[4, 8, 4] bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 * 4 + d2),
domain:
d0 in [0, 3],
d1 in [0, 7],
d2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, BitcastIsTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT bitcast = f32[3, 6, 128, 12288] {2, 1, 3, 0} bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d3, d1, d2),
domain:
d0 in [0, 2],
d1 in [0, 5],
d2 in [0, 127],
d3 in [0, 12287]
)"));
}
TEST_F(IndexingAnalysisTest, BitcastIsTransposeReshapeTranspose) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[16, 17, 3] parameter(0)
ROOT bitcast = f32[51, 16] {0, 1} bitcast(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d1, d0 floordiv 3, d0 mod 3),
domain:
d0 in [0, 50],
d1 in [0, 15]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1 * 3 + d2, d0),
domain:
d0 in [0, 15],
d1 in [0, 16],
d2 in [0, 2]
)"));
}
TEST_F(IndexingAnalysisTest, BroadcastOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[20] parameter(0)
ROOT bc0 = f32[10, 20, 30] broadcast(p0), dimensions={1}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d1),
domain:
d0 in [0, 9],
d1 in [0, 19],
d2 in [0, 29]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0, s1] -> (s0, d0, s1),
domain:
d0 in [0, 19],
s0 in [0, 9],
s1 in [0, 29]
)"));
}
TEST_F(IndexingAnalysisTest, ConstantOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
ROOT c1 = bf16[17, 22] constant(1)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), IsEmpty());
}
TEST_F(IndexingAnalysisTest, ConcatenateOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 1],
d1 in [0, 4],
d2 in [0, 6]
operand id = 1
(d0, d1, d2) -> (d0, d1 - 5, d2),
domain:
d0 in [0, 1],
d1 in [5, 15],
d2 in [0, 6]
operand id = 2
(d0, d1, d2) -> (d0, d1 - 16, d2),
domain:
d0 in [0, 1],
d1 in [16, 32],
d2 in [0, 6]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 1],
d1 in [0, 4],
d2 in [0, 6]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 + 5, d2),
domain:
d0 in [0, 1],
d1 in [0, 10],
d2 in [0, 6]
)"));
auto output_indexing_2 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(output_indexing_2.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 + 16, d2),
domain:
d0 in [0, 1],
d1 in [0, 16],
d2 in [0, 6]
)"));
}
TEST_F(IndexingAnalysisTest, DynamicSliceOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2)[rt0, rt1, rt2] -> (d0 + rt0, d1 + rt1, d2 + rt2),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31],
rt0 in [0, 1],
rt1 in [0, 0],
rt2 in [0, 226]
operand id = 1
(d0, d1, d2) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31]
operand id = 2
(d0, d1, d2) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31]
operand id = 3
(d0, d1, d2) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1],
d2 in [0, 31]
)"));
}
TEST_F(IndexingAnalysisTest, DynamicUpdateSliceOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[20,30] parameter(0)
%upd = s32[5,10] parameter(1)
%of1 = s32[] parameter(2)
%of2 = s32[] parameter(3)
ROOT %dus = s32[20,30] dynamic-update-slice(
s32[20,30] %src, s32[5,10] %upd, s32[] %of1, s32[] %of2)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 19],
d1 in [0, 29]
operand id = 1
(d0, d1)[rt0, rt1] -> (d0 - rt0, d1 - rt1),
domain:
d0 in [0, 19],
d1 in [0, 29],
rt0 in [0, 15],
rt1 in [0, 20]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 19],
d1 in [0, 29]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 19],
d1 in [0, 29]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSingleBinaryOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT a0 = f32[100] add(p0, p1)
}
ENTRY e {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT fusion = f32[100] fusion(p0, p1), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0),
domain:
d0 in [0, 99]
operand id = 1
(d0) -> (d0),
domain:
d0 in [0, 99]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDot) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
f {
p0 = s8[3,12288,6,128]{3,2,1,0} parameter(0)
bitcast1 = s8[3,6,128,12288]{2,1,3,0} bitcast(p0)
copy1 = s8[3,6,128,12288]{3,2,1,0} copy(bitcast1)
bitcast2 = s8[2304,12288]{1,0} bitcast(copy1)
convert1 = bf16[2304,12288]{1,0} convert(bitcast2)
bitcast3 = bf16[2304,16,768]{2,1,0} bitcast(convert1)
p3 = bf16[16,12288]{1,0} parameter(3)
convert2 = f32[16,12288]{1,0} convert(p3)
p4 = bf16[16,12288]{1,0} parameter(4)
convert3 = f32[16,12288]{1,0} convert(p4)
add1 = f32[16,12288]{1,0} add(convert2, convert3)
p2 = bf16[16]{0} parameter(2)
convert15 = f32[16]{0} convert(p2)
rsqrt = f32[16]{0} rsqrt(convert15)
convert4 = bf16[16]{0} convert(rsqrt)
bcast1 = bf16[16,12288]{1,0} broadcast(convert4), dimensions={0}
convert5 = f32[16,12288]{1,0} convert(bcast1)
multiply1 = f32[16,12288]{1,0} multiply(add1, convert5)
p1 = bf16[12288]{0} parameter(1)
convert6 = f32[12288]{0} convert(p1)
c1 = bf16[] constant(1)
bcast2 = bf16[12288]{0} broadcast(c1), dimensions={}
convert7 = f32[12288]{0} convert(bcast2)
add2 = f32[12288]{0} add(convert6, convert7)
convert8 = bf16[12288]{0} convert(add2)
bcast3 = bf16[16,12288]{1,0} broadcast(convert8), dimensions={1}
convert9 = f32[16,12288]{1,0} convert(bcast3)
multiply2 = f32[16,12288]{1,0} multiply(multiply1, convert9)
convert10 = bf16[16,12288]{1,0} convert(multiply2)
bcast4 = bf16[16,16,768]{2,1,0} bitcast(convert10)
dot = bf16[16,2304,16]{2,1,0} dot(bitcast3, bcast4),
lhs_batch_dims={1}, lhs_contracting_dims={2},
rhs_batch_dims={1}, rhs_contracting_dims={2}
bcast5 = bf16[16,3,6,128,16]{4,3,2,1,0} bitcast(dot)
copy2 = bf16[16,3,6,128,16]{3,2,4,1,0} copy(bcast5)
convert13 = f32[16,3,6,128,16]{3,2,4,1,0} convert(copy2)
p5 = bf16[3,6,128]{2,1,0} parameter(5)
bcast6 = bf16[3,6,128,16]{2,1,3,0} broadcast(p5), dimensions={0,1,2}
convert11 = f32[3,6,128,16]{2,1,3,0} convert(bcast6)
bcast7 = f32[16,3,6,128,16]{3,2,4,1,0} broadcast(convert11),
dimensions={1,2,3,4}
multiply3 = f32[16,3,6,128,16]{3,2,4,1,0} multiply(convert13, bcast7)
convert12 = bf16[16,3,6,128,16]{3,2,4,1,0} convert(multiply3)
ROOT bcast8 = bf16[16,16,3,1,6,128]{5,4,1,3,2,0} bitcast(convert12)
}
ENTRY e {
p0 = s8[3,12288,6,128]{3,2,1,0} parameter(0)
p1 = bf16[12288]{0} parameter(1)
p2 = bf16[16]{0} parameter(2)
p3 = bf16[16,12288]{1,0} parameter(3)
p4 = bf16[16,12288]{1,0} parameter(4)
p5 = bf16[3,6,128]{2,1,0} parameter(5)
ROOT fusion = bf16[16,16,3,1,6,128]{5,4,1,3,2,0}
fusion(p0, p1, p2, p3, p4, p5), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3, d4, d5)[s0] -> (d2, d0 * 768 + s0, d4, d5),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 1
(d0, d1, d2, d3, d4, d5)[s0] -> (d0 * 768 + s0),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 2
(d0, d1, d2, d3, d4, d5) -> (d1),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127]
operand id = 3
(d0, d1, d2, d3, d4, d5)[s0] -> (d1, d0 * 768 + s0),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 4
(d0, d1, d2, d3, d4, d5)[s0] -> (d1, d0 * 768 + s0),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127],
s0 in [0, 767]
operand id = 5
(d0, d1, d2, d3, d4, d5) -> (d2, d4, d5),
domain:
d0 in [0, 15],
d1 in [0, 15],
d2 in [0, 2],
d3 in [0, 0],
d4 in [0, 5],
d5 in [0, 127]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSoftmax) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
add_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
max_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
softmax {
p0 = f32[2,65,125]{2,1,0} parameter(0)
bitcast0 = f32[65,2,125]{2,1,0} bitcast(p0)
constant_neg_inf_1 = f32[] constant(-inf)
reduce0 = f32[2,65]{1,0} reduce(p0, constant_neg_inf_1),
dimensions={2}, to_apply=max_computation
bitcast1 = f32[130]{0} bitcast(reduce0)
bcast1 = f32[130,125]{1,0} broadcast(bitcast1), dimensions={0}
bitcast2 = f32[65,2,125]{2,1,0} bitcast(bcast1)
subtract0 = f32[65,2,125]{2,1,0} subtract(bitcast0, bitcast2)
exponential0 = f32[65,2,125]{2,1,0} exponential(subtract0)
bitcast3 = f32[65,2,125]{2,1,0} bitcast(p0)
reduce1 = f32[2,65]{1,0} reduce(p0, constant_neg_inf_1),
dimensions={2}, to_apply=max_computation
bitcast4 = f32[130]{0} bitcast(reduce1)
bcast2 = f32[130,125]{1,0} broadcast(bitcast4), dimensions={0}
bitcast5 = f32[65,2,125]{2,1,0} bitcast(bcast2)
subtract1 = f32[65,2,125]{2,1,0} subtract(bitcast3, bitcast5)
exponential1 = f32[65,2,125]{2,1,0} exponential(subtract1)
constant_zero_1 = f32[] constant(0)
reduce2 = f32[65,2]{1,0} reduce(exponential1, constant_zero_1),
dimensions={2}, to_apply=add_computation
bitcast6 = f32[130]{0} bitcast(reduce2)
bcast3 = f32[130,125]{1,0} broadcast(bitcast6), dimensions={0}
bitcast7 = f32[65,2,125]{2,1,0} bitcast(bcast3)
divide = f32[65,2,125]{2,1,0} divide(exponential0, bitcast7)
ROOT bitcast8 = f32[2,65,125]{2,1,0} bitcast(divide)
}
ENTRY e {
p0 = f32[2,65,125]{2,1,0} parameter(0)
ROOT fusion = f32[2,65,125]{2,1,0}
fusion(p0), kind=kLoop, calls=softmax
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1, d2)[s0] -> (d0, d1, s0),
domain:
d0 in [0, 1],
d1 in [0, 64],
d2 in [0, 124],
s0 in [0, 124]
)"),
MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 1],
d1 in [0, 64],
d2 in [0, 124]
)"))));
}
TEST_F(IndexingAnalysisTest, FusionOpTensorPlusTransposedTensor) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
ENTRY e {
p0 = f32[1000,1000] parameter(0)
ROOT fusion = f32[1000,1000] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"),
MatchIndexingMap(R"(
(d0, d1) -> (d1, d0),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"))));
}
TEST_F(IndexingAnalysisTest, FusionExponentialDuplication) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule test_module
fused_computation {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
add0 = f32[4] add(p0, p1)
slice1.0 = f32[3] slice(add0), slice={[0:3]}
slice1.1 = f32[3] slice(add0), slice={[1:4]}
add1 = f32[3]{0} add(slice1.0, slice1.1)
slice2.0 = f32[2] slice(add1), slice={[0:2]}
slice2.1 = f32[2] slice(add1), slice={[1:3]}
ROOT add2 = f32[2] add(slice2.0, slice2.1)
}
ENTRY entry_computation {
p0 = f32[4] parameter(0)
p1 = f32[4] parameter(1)
ROOT fusion = f32[2] fusion(p0, p1), kind=kLoop,
calls=fused_computation
})"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(UnorderedElementsAre(MatchIndexingMap(R"(
(d0) -> (d0 + 1),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0 + 2),
domain:
d0 in [0, 1]
)")),
UnorderedElementsAre(MatchIndexingMap(R"(
(d0) -> (d0 + 2),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0 + 1),
domain:
d0 in [0, 1]
)"),
MatchIndexingMap(R"(
(d0) -> (d0),
domain:
d0 in [0, 1]
)"))));
}
TEST_F(IndexingAnalysisTest, GatherOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY main {
operand = f32[33,76,70] parameter(0)
indices = s32[1806,2] parameter(1)
ROOT r = f32[1806,7,8,4] gather(operand, indices), offset_dims={1,2,3},
collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[rt0, rt1] -> (d1 + rt0, d2 + rt1, d3),
domain:
d0 in [0, 1805],
d1 in [0, 6],
d2 in [0, 7],
d3 in [0, 3],
rt0 in [0, 26],
rt1 in [0, 68]
operand id = 1
(d0, d1, d2, d3)[s0] -> (d0, s0),
domain:
d0 in [0, 1805],
d1 in [0, 6],
d2 in [0, 7],
d3 in [0, 3],
s0 in [0, 1]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReduceOfReduce) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[150, 20, 10, 50] parameter(0)
p0_init = f32[] parameter(1)
reduce_1 = f32[20, 10] reduce(p0, p0_init),
dimensions={0, 3}, to_apply=max
ROOT reduce_2 = f32[10] reduce(reduce_1, p0_init),
dimensions={0}, to_apply=max
}
ENTRY e {
p0 = f32[150, 20, 10, 50] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[10] fusion(p0, p0_init), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0, s1, s2] -> (s0, s2, d0, s1),
domain:
d0 in [0, 9],
s0 in [0, 149],
s1 in [0, 49],
s2 in [0, 19]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReduceOfBroadcast) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] parameter(1)
p0_bcast = f32[15, 32, 20, 64] broadcast(p0), dimensions={0, 2}
ROOT reduce_2 = f32[15, 64] reduce(p0_bcast, p0_init),
dimensions={1, 2}, to_apply=max
}
ENTRY e {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[15, 64] fusion(p0, p0_init), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0] -> (d0, s0),
domain:
d0 in [0, 14],
d1 in [0, 63],
s0 in [0, 19]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 14],
d1 in [0, 63]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithTransposeOfTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[20, 10, 50] parameter(0)
lhs_transpose_1 = f32[10, 20, 50]
transpose(p0), dimensions={1, 0, 2}
lhs_e = f32[10, 20, 50] exponential(lhs_transpose_1)
lhs_transpose_2 = f32[10, 50, 20]
transpose(lhs_e), dimensions={0, 2, 1}
rhs_transpose_1 = f32[50, 10, 20]
transpose(p0), dimensions={2, 1, 0}
rhs_log = f32[50, 10, 20] exponential(rhs_transpose_1)
rhs_transpose_2 = f32[10, 50, 20]
transpose(rhs_log), dimensions={1, 0, 2}
ROOT add = f32[10, 50, 20] add(lhs_transpose_2, rhs_transpose_2)
}
ENTRY e {
p0 = f32[20, 10, 50] parameter(0)
ROOT fusion = f32[10, 50, 20] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d2, d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 49],
d2 in [0, 19]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReducedSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[150, 64, 1024] parameter(0)
p0_init = f32[] parameter(1)
p0_slice = f32[16, 32, 128] slice(f32[150, 64, 1024] p0),
slice={[5:21:1], [0:64:2], [50:434:3]}
ROOT reduce = f32[32] reduce(p0_slice, p0_init),
dimensions={0, 2}, to_apply=max
}
ENTRY e {
p0 = f32[150, 64, 1024] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[32] fusion(p0, p0_init), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0, s1] -> (s0 + 5, d0 * 2, s1 * 3 + 50),
domain:
d0 in [0, 31],
s0 in [0, 15],
s1 in [0, 127]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 31]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReshape_CollapseOfExpand) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[128] parameter(0)
expand = f32[8, 16] reshape(p0)
ROOT collapse = f32[128] reshape(expand)
}
ENTRY e {
p0 = f32[128] parameter(0)
ROOT fusion = f32[128] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0),
domain:
d0 in [0, 127]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReshape_ExpandOfCollapse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[8, 16] parameter(0)
collapse = f32[128] reshape(p0)
ROOT expand = f32[8, 16] reshape(collapse)
}
ENTRY e {
p0 = f32[8, 16] parameter(0)
ROOT fusion = f32[8, 16] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 7],
d1 in [0, 15]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithReshape_ChainedGenericReshapes) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[10, 10, 10] parameter(0)
reshape1 = f32[50, 20] reshape(p0)
ROOT reshape2 = f32[10, 10, 10] reshape(reshape1)
}
ENTRY e {
p0 = f32[10, 10, 10] parameter(0)
ROOT fusion = f32[10, 10, 10] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1, d2),
domain:
d0 in [0, 9],
d1 in [0, 9],
d2 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSliceOfSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[150, 64, 1024] parameter(0)
p0_slice_1 = f32[16, 32, 128] slice(f32[150, 64, 1024] p0),
slice={[5:21:1], [0:64:2], [50:434:3]}
ROOT p0_slice_2 = f32[7, 9, 24] slice(f32[16, 32, 128] p0_slice_1),
slice={[3:16:2], [4:30:3], [5:100:4]}
}
ENTRY e {
p0 = f32[150, 64, 1024] parameter(0)
ROOT fusion = f32[7, 9, 24] fusion(p0), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 2 + 8, d1 * 6 + 8, d2 * 12 + 65),
domain:
d0 in [0, 6],
d1 in [0, 8],
d2 in [0, 23]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDynSliceOfDynSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
%src = s32[150, 64] parameter(0)
%of11 = s32[] parameter(1)
%of12 = s32[] parameter(2)
%of21 = s32[] parameter(3)
%of22 = s32[] parameter(4)
%ds1 = s32[50, 32] dynamic-slice(s32[150, 64] %src,
s32[] %of11, s32[] %of12), dynamic_slice_sizes={50, 32}
ROOT %ds2 = s32[25, 16] dynamic-slice(s32[50, 32] %ds1,
s32[] %of21, s32[] %of22), dynamic_slice_sizes={25, 16}
}
ENTRY e {
%p0 = s32[150, 64] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[] parameter(2)
%p3 = s32[] parameter(3)
%p4 = s32[] parameter(4)
ROOT fusion = s32[25, 16] fusion(p0, p1, p2, p3, p4),
kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[rt0, rt1, rt2, rt3] -> (d0 + rt0 + rt2, d1 + rt1 + rt3),
domain:
d0 in [0, 24],
d1 in [0, 15],
rt0 in [0, 100],
rt1 in [0, 32],
rt2 in [0, 25],
rt3 in [0, 16]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
operand id = 4
(d0, d1) -> (),
domain:
d0 in [0, 24],
d1 in [0, 15]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpSliceOfAllConcatenateOpInputs) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
ROOT slice = f32[2, 11, 7] slice(f32[2, 33, 7] concat),
slice={[0:2:1], [0:33:3], [0:7:1]}
}
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT fusion = f32[2, 11, 7] fusion(p0, p1, p2), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 * 3, d2),
domain:
d0 in [0, 1],
d1 in [0, 1],
d2 in [0, 6]
operand id = 1
(d0, d1, d2) -> (d0, d1 * 3 - 5, d2),
domain:
d0 in [0, 1],
d1 in [2, 5],
d2 in [0, 6]
operand id = 2
(d0, d1, d2) -> (d0, d1 * 3 - 16, d2),
domain:
d0 in [0, 1],
d1 in [6, 10],
d2 in [0, 6]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpSliceOfOneOfConcatenateOpInputs) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
ROOT slice = f32[2, 3, 7] slice(f32[2, 33, 7] concat),
slice={[0:2:1], [0:5:2], [0:7:1]}
}
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT fusion = f32[2, 3, 7] fusion(p0, p1, p2), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, d1 * 2, d2),
domain:
d0 in [0, 1],
d1 in [0, 2],
d2 in [0, 6]
operand id = 1
KNOWN EMPTY
operand id = 2
KNOWN EMPTY
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpReshapeOfConcat) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[2] parameter(0)
p1 = f32[30] parameter(1)
concat = f32[32] concatenate(f32[2] p0, f32[30] p1), dimensions={0}
ROOT reshape = f32[4, 8] reshape(concat)
}
ENTRY e {
p0 = f32[2] parameter(0)
p1 = f32[30] parameter(1)
ROOT fusion = f32[4, 8] fusion(p0, p1), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 * 8 + d1),
domain:
d0 in [0, 3],
d1 in [0, 7],
d0 * 8 + d1 in [0, 1]
operand id = 1
(d0, d1) -> (d0 * 8 + d1 - 2),
domain:
d0 in [0, 3],
d1 in [0, 7],
d0 * 8 + d1 in [2, 31]
)"));
}
TEST_F(IndexingAnalysisTest, IotaOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
ROOT iota = s32[5,5,111,42] iota(), iota_dimension=0
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps, IsEmpty());
}
TEST_F(IndexingAnalysisTest, ReshapeOpCollapseShape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4,8] parameter(0)
ROOT reshape = f32[32] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0 floordiv 8, d0 mod 8),
domain:
d0 in [0, 31]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpExpandShape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[32] parameter(0)
ROOT reshape = f32[4, 8] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 * 8 + d1),
domain:
d0 in [0, 3],
d1 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpExpandAndCollapseShape) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 8, 12] parameter(0)
ROOT reshape = f32[32, 3, 4] reshape(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 floordiv 8, d0 mod 8, d1 * 4 + d2),
domain:
d0 in [0, 31],
d1 in [0, 2],
d2 in [0, 3]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 8 + d1, d2 floordiv 4, d2 mod 4),
domain:
d0 in [0, 3],
d1 in [0, 7],
d2 in [0, 11]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpExpandSubshapeOnly) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[16, 8] parameter(0)
ROOT reshape = f32[4, 4, 8] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 4 + d1, d2),
domain:
d0 in [0, 3],
d1 in [0, 3],
d2 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpGenericReshape2DTo3D) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4,8] parameter(0)
ROOT reshape = f32[2, 4, 4] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 * 2 + d1 floordiv 2, (d1 mod 2) * 4 + d2),
domain:
d0 in [0, 1],
d1 in [0, 3],
d2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ReshapeOpGenericReshape3DTo2D) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2, 4, 4] parameter(0)
ROOT reshape = f32[4, 8] reshape(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 floordiv 2,
(d0 mod 2) * 2 + d1 floordiv 4,
d1 mod 4),
domain:
d0 in [0, 3],
d1 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, PadOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 4] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[12, 16] pad(p0, p1), padding=1_4_1x4_8_0
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> ((d0 - 1) floordiv 2, d1 - 4),
domain:
d0 in [1, 7],
d1 in [4, 7],
(d0 - 1) mod 2 in [0, 0]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 11],
d1 in [0, 15]
)"));
}
TEST_F(IndexingAnalysisTest, PadOpNoInterior) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2,8] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[10,8] pad(p0, p1), padding=1_7x0_0
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 - 1, d1),
domain:
d0 in [1, 2],
d1 in [0, 7]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 9],
d1 in [0, 7]
)"));
}
TEST_F(IndexingAnalysisTest, PadOpNegativePadding) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[5] pad(p0, p1), padding=-3_-5_1
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> ((d0 + 3) floordiv 2),
domain:
d0 in [0, 4],
(d0 + 3) mod 2 in [0, 0]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 4]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
p0 = f32[150, 20, 10, 50] parameter(0)
p0_init = f32[] constant(-inf)
ROOT reduce = f32[150, 10] reduce(p0, p0_init),
dimensions={3, 1}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (d0, s0, d1, s1),
domain:
d0 in [0, 149],
d1 in [0, 9],
s0 in [0, 19],
s1 in [0, 49]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 149],
d1 in [0, 9]
)"));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d2),
domain:
d0 in [0, 149],
d1 in [0, 19],
d2 in [0, 9],
d3 in [0, 49]
)"));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
()[s0, s1] -> (s0, s1),
domain:
s0 in [0, 149],
s1 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, VariadicReduceOp) {
HloInstruction* root = ParseAndGetRoot(R"(
HloModule m
min {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
cmp = pred[] compare(tmp_0, tmp_1), direction=GE
select1 = f32[] select(cmp, tmp_0, tmp_1)
select2 = s32[] select(cmp, tmp_2, tmp_3)
ROOT tmp_4 = (f32[], s32[]) tuple(select1, select2)
}
ENTRY e {
p0 = f32[256,10] parameter(0)
p0_init = f32[] constant(-inf)
p1 = s32[256,10] parameter(1)
p1_init = s32[] constant(0)
ROOT reduce = (f32[10], s32[10]) reduce(p0, p1, p0_init, p1_init),
dimensions={0}, to_apply=min
}
)");
auto output_indexing_0 = GetOutputToInputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 1
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 2
(d0) -> (),
domain:
d0 in [0, 9]
operand id = 3
(d0) -> (),
domain:
d0 in [0, 9]
)"));
auto output_indexing_1 = GetOutputToInputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 1
(d0)[s0] -> (s0, d0),
domain:
d0 in [0, 9],
s0 in [0, 255]
operand id = 2
(d0) -> (),
domain:
d0 in [0, 9]
operand id = 3
(d0) -> (),
domain:
d0 in [0, 9]
)"));
constexpr std::string_view kInputToOutputIndexing = R"(
(d0, d1) -> (d1),
domain:
d0 in [0, 255],
d1 in [0, 9]
)";
auto input_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(
input_indexing_0.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInputToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInputToOutputIndexing))));
auto input_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(
input_indexing_1.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInputToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInputToOutputIndexing))));
constexpr std::string_view kInitToOutputIndexing = R"(
()[s0] -> (s0),
domain:
s0 in [0, 9]
)";
auto input_indexing_2 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(
input_indexing_2.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInitToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInitToOutputIndexing))));
auto input_indexing_3 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(
input_indexing_3.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(kInitToOutputIndexing)),
ElementsAre(MatchIndexingMap(kInitToOutputIndexing))));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_NoPadding) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[1024, 514]parameter(0)
ROOT reduce-window = f32[1024, 3] reduce-window(p0, c_inf),
window={size=1x512 pad=0_0x0_0}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0] -> (d0, d1 + s0),
domain:
d0 in [0, 1023],
d1 in [0, 2],
s0 in [0, 511]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 1023],
d1 in [0, 2]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_PaddingAndWindowStride) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[13, 17] parameter(0)
ROOT reduce-window = f32[7, 17] reduce-window(p0, c_inf),
window={size=3x2 stride=2x1 pad=1_1x0_1}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (d0 * 2 + s0 - 1, d1 + s1),
domain:
d0 in [0, 6],
d1 in [0, 16],
s0 in [0, 2],
s1 in [0, 1],
d0 * 2 + s0 in [1, 13],
d1 + s1 in [0, 16]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 6],
d1 in [0, 16]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_BaseDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[2, 3] parameter(0)
ROOT reduce-window = f32[3, 5] reduce-window(p0, c_inf),
window={size=1x1 pad=0_0x0_0 lhs_dilate=2x2}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 floordiv 2, d1 floordiv 2),
domain:
d0 in [0, 2],
d1 in [0, 4],
d0 mod 2 in [0, 0],
d1 mod 2 in [0, 0]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 2],
d1 in [0, 4]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_WindowDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
c_inf = f32[] constant(-inf)
p0 = f32[7, 3] parameter(0)
ROOT reduce-window = f32[4, 3] reduce-window(p0, c_inf),
window={size=2x1 pad=0_0x0_0 rhs_dilate=3x1}, to_apply=max
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0] -> (d0 + s0 * 3, d1),
domain:
d0 in [0, 3],
d1 in [0, 2],
s0 in [0, 1]
operand id = 1
(d0, d1) -> (),
domain:
d0 in [0, 3],
d1 in [0, 2]
)"));
}
TEST_F(IndexingAnalysisTest, ReduceWindowOp_Variadic) {
auto root = ParseAndGetRoot(R"(
HloModule m
combiner {
a0 = f32[] parameter(0)
a1 = s32[] parameter(1)
b0 = f32[] parameter(2)
b1 = s32[] parameter(3)
add0 = f32[] add(a0, b0)
add1 = s32[] add(a1, b1)
ROOT sum2 = (f32[], s32[]) tuple(add0, add1)
}
ENTRY e {
c_f32 = f32[] constant(-inf)
c_s32 = s32[] constant(10)
p0 = f32[2, 3] parameter(0)
p1 = s32[2, 3] parameter(1)
ROOT reduce-window = (f32[1, 2], s32[1, 2])
reduce-window(p0, p1, c_f32, c_s32),
window={size=2x2 pad=0_0x0_0}, to_apply=combiner
}
)");
auto input_indexing_0 = GetOutputToInputIndexing(root, 0);
EXPECT_THAT(input_indexing_0.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 1
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
)"));
auto input_indexing_1 = GetOutputToInputIndexing(root, 1);
EXPECT_THAT(input_indexing_1.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 1
(d0, d1)[s0, s1] -> (s0, d1 + s1),
domain:
d0 in [0, 0],
d1 in [0, 1],
s0 in [0, 1],
s1 in [0, 1]
operand id = 2
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
operand id = 3
(d0, d1) -> (),
domain:
d0 in [0, 0],
d1 in [0, 1]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_NoPadding) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,10,6,8] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 + s0, d2 + s1, s2),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_PaddingAndWindowStride) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,6,5,8] convolution(p0, p1),
window={size=3x5 stride=2x2 pad=1_1x2_2}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 * 2 + s0 - 1, d2 * 2 + s1 - 2, s2),
domain:
d0 in [0, 0],
d1 in [0, 5],
d2 in [0, 4],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3],
d1 * 2 + s0 in [1, 12],
d2 * 2 + s1 in [2, 11]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 5],
d2 in [0, 4],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_LhsDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,21,15,8] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0 lhs_dilate=2x2}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, (d1 + s0) floordiv 2, (d2 + s1) floordiv 2, s2),
domain:
d0 in [0, 0],
d1 in [0, 20],
d2 in [0, 14],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3],
(d1 + s0) mod 2 in [0, 0],
(d2 + s1) mod 2 in [0, 0]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 20],
d2 in [0, 14],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_RhsDilation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,4] parameter(0)
p1 = f32[4,3,5,8] parameter(1)
ROOT conv = f32[1,8,2,8] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0 rhs_dilate=2x2}, dim_labels=b01f_i01o->b01f
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 + s0 * 2, d2 + s1 * 2, s2),
domain:
d0 in [0, 0],
d1 in [0, 7],
d2 in [0, 1],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 7],
d2 in [0, 1],
d3 in [0, 7],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_FeatureGroups) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,12,10,24] parameter(0)
p1 = f32[4,3,5,48] parameter(1)
ROOT conv = f32[1,10,6,48] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f, feature_group_count=6
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2] -> (d0, d1 + s0, d2 + s1, (d3 floordiv 8) * 4 + s2),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 47],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 0],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 47],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ConvolutionOp_BatchGroups) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[14,12,10,4] parameter(0)
p1 = f32[4,3,5,21] parameter(1)
ROOT conv = f32[2,10,6,21] convolution(p0, p1),
window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f, batch_group_count=7
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 + s3 * 2, d1 + s0, d2 + s1, s2),
domain:
d0 in [0, 1],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 20],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3],
s3 in [0, 6]
operand id = 1
(d0, d1, d2, d3)[s0, s1, s2] -> (s2, s0, s1, d3),
domain:
d0 in [0, 1],
d1 in [0, 9],
d2 in [0, 5],
d3 in [0, 20],
s0 in [0, 2],
s1 in [0, 4],
s2 in [0, 3]
)"));
}
TEST_F(IndexingAnalysisTest, ReverseOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1, 17, 9, 9] parameter(0)
ROOT reverse = f32[1, 17, 9, 9] reverse(p0), dimensions={1, 2}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, -d1 + 16, -d2 + 8, d3),
domain:
d0 in [0, 0],
d1 in [0, 16],
d2 in [0, 8],
d3 in [0, 8]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, -d1 + 16, -d2 + 8, d3),
domain:
d0 in [0, 0],
d1 in [0, 16],
d2 in [0, 8],
d3 in [0, 8]
)"));
}
TEST_F(IndexingAnalysisTest, ReverseReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
fused_computation {
p0 = f32[10, 11] parameter(0)
reverse.0 = f32[10, 11] reverse(p0), dimensions={0, 1}
reshape.0 = f32[110] reshape(reverse.0)
reverse.1 = f32[110] reverse(reshape.0), dimensions={0}
ROOT reshape.1 = f32[10, 11] reshape(reverse.1)
}
ENTRY e {
p0 = f32[10, 11] parameter(0)
ROOT fusion = f32[10, 11] fusion(p0), kind=kLoop,
calls=fused_computation
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 9],
d1 in [0, 10]
)"));
}
TEST_F(IndexingAnalysisTest, SliceOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 50] parameter(0)
ROOT slice = f32[5, 3, 25] slice(f32[10, 20, 50] p0),
slice={[5:10:1], [3:20:7], [0:50:2]}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0 + 5, d1 * 7 + 3, d2 * 2),
domain:
d0 in [0, 4],
d1 in [0, 2],
d2 in [0, 24]
)"));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (
d0 - 5,
(d1 - 3) floordiv 7,
d2 floordiv 2
),
domain:
d0 in [5, 9],
d1 in [3, 17],
d2 in [0, 48],
(d1 - 3) mod 7 in [0, 0],
d2 mod 2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, TransposeOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT transpose = f32[3, 6, 128, 12288]
transpose(p0), dimensions={0, 2, 3, 1}
}
)");
EXPECT_THAT(GetOutputToInputIndexing(root).ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d3, d1, d2),
domain:
d0 in [0, 2],
d1 in [0, 5],
d2 in [0, 127],
d3 in [0, 12287]
)"));
EXPECT_THAT(GetInputToOutputIndexing(root).ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d2, d3, d1),
domain:
d0 in [0, 2],
d1 in [0, 12287],
d2 in [0, 5],
d3 in [0, 127]
)"));
}
TEST_F(IndexingAnalysisTest, TransposeOp4D) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT bitcast = f32[3, 6, 128, 12288] {2, 1, 3, 0} bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3) -> (d0, d3, d1, d2),
domain:
d0 in [0, 2],
d1 in [0, 5],
d2 in [0, 127],
d3 in [0, 12287]
)"));
}
TEST_F(IndexingAnalysisTest, DotOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 38, 17, 11, 18, 10] parameter(0)
p1 = f32[17, 10, 16, 18, 22, 38] parameter(1)
ROOT dot = f32[10, 38, 4, 11, 16, 22] dot(p0, p1),
lhs_batch_dims={5,1}, rhs_batch_dims={1,5},
lhs_contracting_dims={4,2}, rhs_contracting_dims={3,0}
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (d2, d1, s1, d3, s0, d0),
domain:
d0 in [0, 9],
d1 in [0, 37],
d2 in [0, 3],
d3 in [0, 10],
d4 in [0, 15],
d5 in [0, 21],
s0 in [0, 17],
s1 in [0, 16]
operand id = 1
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (s1, d0, d4, s0, d5, d1),
domain:
d0 in [0, 9],
d1 in [0, 37],
d2 in [0, 3],
d3 in [0, 10],
d4 in [0, 15],
d5 in [0, 21],
s0 in [0, 17],
s1 in [0, 16]
)"));
}
TEST_F(IndexingAnalysisTest, UnsupportedOps) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[20, 20] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[4,3] parameter(2)
ROOT out = f32[4,3] triangular-solve(f32[4,4] p1, f32[4,3] p2),
left_side=true,
lower=true,
transpose_a=NO_TRANSPOSE,
unit_diagonal=true
}
)");
EXPECT_THAT(GetOutputToInputIndexing(root).ToString(), MatchIndexingString(R"(
operand id = 0 unknown indexing
operand id = 1 unknown indexing
)"));
EXPECT_THAT(GetInputToOutputIndexing(root, 0).ToString(),
MatchIndexingString(R"(
operand id = 0 unknown indexing
)"));
EXPECT_THAT(GetInputToOutputIndexing(root, 1).ToString(),
MatchIndexingString(R"(
operand id = 0 unknown indexing
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithUnsupportedOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
fused_computation {
p0 = f32[20, 20] parameter(0)
p1 = f32[4,4] parameter(1)
p2 = f32[4,3] parameter(2)
lhs = f32[4,3] triangular-solve(f32[4,4] p1, f32[4,3] p2),
left_side=true,
lower=true,
transpose_a=NO_TRANSPOSE,
unit_diagonal=true
rhs = f32[4, 3] slice(f32[20, 20] p0),
slice={[0:20:6], [0:5:2]}
ROOT add = f32[4, 3] add(lhs, rhs)
}
ENTRY e {
p0 = f32[20, 20] parameter(0)
p1 = f32[4, 4] parameter(1)
p2 = f32[4, 3] parameter(2)
ROOT fusion = f32[4, 3] fusion(p0, p1, p2), kind=kLoop,
calls=fused_computation
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1) -> (d0 * 6, d1 * 2),
domain:
d0 in [0, 3],
d1 in [0, 2]
operand id = 1
unknown indexing
operand id = 2
unknown indexing
)"));
}
TEST_F(IndexingAnalysisTest, EpilogueIndexing) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
p0 = f32[1000, 1000] parameter(0)
t = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
a0 = f32[1000000] bitcast(t)
ROOT log = f32[1000000] log(a0)
}
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
ROOT fusion = f32[1000000] fusion(p0), kind=kLoop,
calls=fused_computation
}
)");
ASSERT_TRUE(module.ok());
auto* computation = (*module)->GetComputationWithName("fused_computation");
auto fusion = HloFusionAdaptor::ForComputation(computation);
HloInstructionAdaptor transpose(*computation->GetInstructionWithName("t"),
fusion.get());
HloInstructionAdaptor log(*computation->GetInstructionWithName("log"),
fusion.get());
EXPECT_THAT(ToString(ComputeEpilogueInputToOutputIndexing(transpose, log,
&mlir_context_)),
MatchIndexingString(R"(
(d0, d1) -> (d1 * 1000 + d0),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"));
}
TEST_F(IndexingAnalysisTest, EpilogueIndexing_NoEpilogue) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
p0 = f32[1000, 1000] parameter(0)
ROOT t = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
}
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
ROOT fusion = f32[1000, 1000] fusion(p0), kind=kLoop,
calls=fused_computation
}
)");
ASSERT_TRUE(module.ok());
auto* computation = (*module)->GetComputationWithName("fused_computation");
auto fusion = HloFusionAdaptor::ForComputation(computation);
HloInstructionAdaptor transpose(*computation->GetInstructionWithName("t"),
fusion.get());
EXPECT_THAT(ToString(ComputeEpilogueInputToOutputIndexing(
transpose, transpose, &mlir_context_)),
MatchIndexingString(R"(
(d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"));
}
TEST_F(IndexingAnalysisTest, BroadcastingElementwise) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = pred[] parameter(0)
p1 = f32[1000, 1000] parameter(1)
p2 = f32[1000, 1000] parameter(2)
ROOT select = f32[1000, 1000] select(p0, p1, p2)
}
)"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0 (d0, d1) -> (),
domain:
d0 in [0, 999],
d1 in [0, 999]
operand id = 1 (d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
operand id = 2 (d0, d1) -> (d0, d1),
domain:
d0 in [0, 999],
d1 in [0, 999]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_ScalarConstant) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
offset = s64[] constant(42)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, offset), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
ROOT fusion = s32[10] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0) -> (d0 + 42),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Iota) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
iota = s64[42,1] iota(), iota_dimension=0
ROOT gather = f32[42,1,1] gather(p0, iota),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (d0, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_IotaAsConstant) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
iota = s64[42,1] iota(), iota_dimension=1
ROOT gather = f32[42,1,1] gather(p0, iota),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (0, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Broadcast) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
c42 = s64[] constant(42)
bcast = s64[42, 1] broadcast(s64[] c42), dimensions={}
ROOT gather = f32[42,1,1] gather(p0, bcast),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (42, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Reverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = f32[33,76] parameter(0)
iota = s64[42,1] iota(), iota_dimension=0
reverse = s64[42,1] reverse(iota), dimensions={0}
ROOT gather = f32[42,1,1] gather(p0, reverse),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0},
index_vector_dim=1,
slice_sizes={1,1}
}
ENTRY main {
p0 = f32[33,76] parameter(0)
ROOT fusion = f32[42,1,1] fusion(p0), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1, d2) -> (-d0 + 41, 0),
domain:
d0 in [0, 41],
d1 in [0, 0],
d2 in [0, 0]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Add) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
c42 = s64[] constant(42)
add = s64[] add(c42, p1)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, add), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
ROOT fusion = s32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0 (d0)[rt0] -> (d0 + rt0 + 42),
domain:
d0 in [0, 9],
rt0 in [0, 4086]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_Multiply) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
c42 = s64[] constant(42)
add = s64[] multiply(c42, p1)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, add), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
ROOT fusion = s32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0 (d0)[rt0] -> (d0 + rt0 * 42),
domain:
d0 in [0, 9],
rt0 in [0, 4086]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionWithRTVarsSimplification_ChainedOps) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
c42 = s64[] constant(42)
c2 = s64[] constant(2)
add = s64[] add(c42, p1)
multiply = s64[] multiply(c2, add)
ROOT dynamic-slice = s32[10]
dynamic-slice(p0, multiply), dynamic_slice_sizes={10}
}
ENTRY main {
p0 = s32[4096] parameter(0)
p1 = s64[] parameter(1)
ROOT fusion = s32[10] fusion(p0, p1), kind=kInput, calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0)[rt0] -> (d0 + rt0 * 2 + 84),
domain: d0 in [0, 9],
rt0 in [0, 4086]
operand id = 1
(d0) -> (),
domain:
d0 in [0, 9]
)"));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDUS) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"hlo(
HloModule m
fused_computation {
bitcast = s32[1,4096]{1,0} parameter(0)
constant = s32[] constant(0)
pad = s32[1,8192]{1,0} pad(bitcast, constant), padding=0_0x4096_0
slice = s32[1]{0} parameter(1)
bitcast.4 = s32[] bitcast(slice)
ROOT dynamic-slice = s32[1,4096]{1,0}
dynamic-slice(pad, constant, bitcast.4), dynamic_slice_sizes={1,4096}
}
ENTRY main {
param_0 = s32[1,4096]{1,0} parameter(0)
param_1 = s32[1]{0} parameter(1)
ROOT fusion = s32[1,4096]{1,0} fusion(param_0, param_1), kind=kInput,
calls=fused_computation
}
)hlo"));
EXPECT_THAT(input_indexing.ToString(), MatchIndexingString(R"(
operand id = 0
(d0, d1)[rt0] -> (0, d1 + rt0 - 4096),
domain:
d0 in [0, 0],
d1 in [0, 4095],
rt0 in [0, 4096],
d1 + rt0 in [4096, 8191]
operand id = 1
(d0, d1) -> (0),
domain:
d0 in [0, 0],
d1 in [0, 4095]
)"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/indexing_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71a2284b-c8c4-47ae-8f89-1b9d6cb5b1aa | cpp | google/cel-cpp | uint_type | common/types/uint_type.h | common/types/uint_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_UINT_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_UINT_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class UintType final {
public:
static constexpr TypeKind kKind = TypeKind::kUint;
static constexpr absl::string_view kName = "uint";
UintType() = default;
UintType(const UintType&) = default;
UintType(UintType&&) = default;
UintType& operator=(const UintType&) = default;
UintType& operator=(UintType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(UintType&) noexcept {}
};
inline constexpr void swap(UintType& lhs, UintType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(UintType, UintType) { return true; }
inline constexpr bool operator!=(UintType lhs, UintType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, UintType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const UintType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(UintType, Kind) {
EXPECT_EQ(UintType().kind(), UintType::kKind);
EXPECT_EQ(Type(UintType()).kind(), UintType::kKind);
}
TEST(UintType, Name) {
EXPECT_EQ(UintType().name(), UintType::kName);
EXPECT_EQ(Type(UintType()).name(), UintType::kName);
}
TEST(UintType, DebugString) {
{
std::ostringstream out;
out << UintType();
EXPECT_EQ(out.str(), UintType::kName);
}
{
std::ostringstream out;
out << Type(UintType());
EXPECT_EQ(out.str(), UintType::kName);
}
}
TEST(UintType, Hash) {
EXPECT_EQ(absl::HashOf(UintType()), absl::HashOf(UintType()));
}
TEST(UintType, Equal) {
EXPECT_EQ(UintType(), UintType());
EXPECT_EQ(Type(UintType()), UintType());
EXPECT_EQ(UintType(), Type(UintType()));
EXPECT_EQ(Type(UintType()), Type(UintType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/uint_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/uint_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3fab986a-3313-49b1-a984-9d57a4f12f38 | cpp | google/quiche | internet_checksum | quiche/quic/core/internet_checksum.cc | quiche/quic/core/internet_checksum_test.cc | #include "quiche/quic/core/internet_checksum.h"
#include <stdint.h>
#include <string.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
namespace quic {
void InternetChecksum::Update(const char* data, size_t size) {
const char* current;
for (current = data; current + 1 < data + size; current += 2) {
uint16_t v;
memcpy(&v, current, sizeof(v));
accumulator_ += v;
}
if (current < data + size) {
accumulator_ += *reinterpret_cast<const unsigned char*>(current);
}
}
void InternetChecksum::Update(const uint8_t* data, size_t size) {
Update(reinterpret_cast<const char*>(data), size);
}
void InternetChecksum::Update(absl::string_view data) {
Update(data.data(), data.size());
}
void InternetChecksum::Update(absl::Span<const uint8_t> data) {
Update(reinterpret_cast<const char*>(data.data()), data.size());
}
uint16_t InternetChecksum::Value() const {
uint32_t total = accumulator_;
while (total & 0xffff0000u) {
total = (total >> 16u) + (total & 0xffffu);
}
return ~static_cast<uint16_t>(total);
}
} | #include "quiche/quic/core/internet_checksum.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace {
TEST(InternetChecksumTest, MatchesRFC1071Example) {
uint8_t data[] = {0x00, 0x01, 0xf2, 0x03, 0xf4, 0xf5, 0xf6, 0xf7};
InternetChecksum checksum;
checksum.Update(data, 8);
uint16_t result = checksum.Value();
auto* result_bytes = reinterpret_cast<uint8_t*>(&result);
ASSERT_EQ(0x22, result_bytes[0]);
ASSERT_EQ(0x0d, result_bytes[1]);
}
TEST(InternetChecksumTest, MatchesRFC1071ExampleWithOddByteCount) {
uint8_t data[] = {0x00, 0x01, 0xf2, 0x03, 0xf4, 0xf5, 0xf6};
InternetChecksum checksum;
checksum.Update(data, 7);
uint16_t result = checksum.Value();
auto* result_bytes = reinterpret_cast<uint8_t*>(&result);
ASSERT_EQ(0x23, result_bytes[0]);
ASSERT_EQ(0x04, result_bytes[1]);
}
TEST(InternetChecksumTest, MatchesBerkleyExample) {
uint8_t data[] = {0xe3, 0x4f, 0x23, 0x96, 0x44, 0x27, 0x99, 0xf3};
InternetChecksum checksum;
checksum.Update(data, 8);
uint16_t result = checksum.Value();
auto* result_bytes = reinterpret_cast<uint8_t*>(&result);
ASSERT_EQ(0x1a, result_bytes[0]);
ASSERT_EQ(0xff, result_bytes[1]);
}
TEST(InternetChecksumTest, ChecksumRequiringMultipleCarriesInLittleEndian) {
uint8_t data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x02, 0x00};
InternetChecksum checksum;
checksum.Update(data, 8);
uint16_t result = checksum.Value();
auto* result_bytes = reinterpret_cast<uint8_t*>(&result);
EXPECT_EQ(0xfd, result_bytes[0]);
EXPECT_EQ(0xff, result_bytes[1]);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/internet_checksum.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/internet_checksum_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
2b37f4eb-1743-4a71-a5fa-9e6fbb0fbcc1 | cpp | google/quiche | quic_alarm | quiche/quic/core/quic_alarm.cc | quiche/quic/core/quic_alarm_test.cc | #include "quiche/quic/core/quic_alarm.h"
#include <atomic>
#include <cstdlib>
#include <utility>
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_stack_trace.h"
namespace quic {
QuicAlarm::QuicAlarm(QuicArenaScopedPtr<Delegate> delegate)
: delegate_(std::move(delegate)), deadline_(QuicTime::Zero()) {}
QuicAlarm::~QuicAlarm() {
if (IsSet()) {
QUIC_CODE_COUNT(quic_alarm_not_cancelled_in_dtor);
}
}
void QuicAlarm::Set(QuicTime new_deadline) {
QUICHE_DCHECK(!IsSet());
QUICHE_DCHECK(new_deadline.IsInitialized());
if (IsPermanentlyCancelled()) {
QUIC_BUG(quic_alarm_illegal_set)
<< "Set called after alarm is permanently cancelled. new_deadline:"
<< new_deadline;
return;
}
deadline_ = new_deadline;
SetImpl();
}
void QuicAlarm::CancelInternal(bool permanent) {
if (IsSet()) {
deadline_ = QuicTime::Zero();
CancelImpl();
}
if (permanent) {
delegate_.reset();
}
}
bool QuicAlarm::IsPermanentlyCancelled() const { return delegate_ == nullptr; }
void QuicAlarm::Update(QuicTime new_deadline, QuicTime::Delta granularity) {
if (IsPermanentlyCancelled()) {
QUIC_BUG(quic_alarm_illegal_update)
<< "Update called after alarm is permanently cancelled. new_deadline:"
<< new_deadline << ", granularity:" << granularity;
return;
}
if (!new_deadline.IsInitialized()) {
Cancel();
return;
}
if (std::abs((new_deadline - deadline_).ToMicroseconds()) <
granularity.ToMicroseconds()) {
return;
}
const bool was_set = IsSet();
deadline_ = new_deadline;
if (was_set) {
UpdateImpl();
} else {
SetImpl();
}
}
bool QuicAlarm::IsSet() const { return deadline_.IsInitialized(); }
void QuicAlarm::Fire() {
if (!IsSet()) {
return;
}
deadline_ = QuicTime::Zero();
if (!IsPermanentlyCancelled()) {
QuicConnectionContextSwitcher context_switcher(
delegate_->GetConnectionContext());
delegate_->OnAlarm();
}
}
void QuicAlarm::UpdateImpl() {
const QuicTime new_deadline = deadline_;
deadline_ = QuicTime::Zero();
CancelImpl();
deadline_ = new_deadline;
SetImpl();
}
} | #include "quiche/quic/core/quic_alarm.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "quiche/quic/core/quic_connection_context.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_test.h"
using testing::ElementsAre;
using testing::Invoke;
using testing::Return;
namespace quic {
namespace test {
namespace {
class TraceCollector : public QuicConnectionTracer {
public:
~TraceCollector() override = default;
void PrintLiteral(const char* literal) override { trace_.push_back(literal); }
void PrintString(absl::string_view s) override {
trace_.push_back(std::string(s));
}
const std::vector<std::string>& trace() const { return trace_; }
private:
std::vector<std::string> trace_;
};
class MockDelegate : public QuicAlarm::Delegate {
public:
MOCK_METHOD(QuicConnectionContext*, GetConnectionContext, (), (override));
MOCK_METHOD(void, OnAlarm, (), (override));
};
class DestructiveDelegate : public QuicAlarm::DelegateWithoutContext {
public:
DestructiveDelegate() : alarm_(nullptr) {}
void set_alarm(QuicAlarm* alarm) { alarm_ = alarm; }
void OnAlarm() override {
QUICHE_DCHECK(alarm_);
delete alarm_;
}
private:
QuicAlarm* alarm_;
};
class TestAlarm : public QuicAlarm {
public:
explicit TestAlarm(QuicAlarm::Delegate* delegate)
: QuicAlarm(QuicArenaScopedPtr<QuicAlarm::Delegate>(delegate)) {}
bool scheduled() const { return scheduled_; }
void FireAlarm() {
scheduled_ = false;
Fire();
}
protected:
void SetImpl() override {
QUICHE_DCHECK(deadline().IsInitialized());
scheduled_ = true;
}
void CancelImpl() override {
QUICHE_DCHECK(!deadline().IsInitialized());
scheduled_ = false;
}
private:
bool scheduled_;
};
class DestructiveAlarm : public QuicAlarm {
public:
explicit DestructiveAlarm(DestructiveDelegate* delegate)
: QuicAlarm(QuicArenaScopedPtr<DestructiveDelegate>(delegate)) {}
void FireAlarm() { Fire(); }
protected:
void SetImpl() override {}
void CancelImpl() override {}
};
class QuicAlarmTest : public QuicTest {
public:
QuicAlarmTest()
: delegate_(new MockDelegate()),
alarm_(delegate_),
deadline_(QuicTime::Zero() + QuicTime::Delta::FromSeconds(7)),
deadline2_(QuicTime::Zero() + QuicTime::Delta::FromSeconds(14)),
new_deadline_(QuicTime::Zero()) {}
void ResetAlarm() { alarm_.Set(new_deadline_); }
MockDelegate* delegate_;
TestAlarm alarm_;
QuicTime deadline_;
QuicTime deadline2_;
QuicTime new_deadline_;
};
TEST_F(QuicAlarmTest, IsSet) { EXPECT_FALSE(alarm_.IsSet()); }
TEST_F(QuicAlarmTest, Set) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
EXPECT_TRUE(alarm_.IsSet());
EXPECT_TRUE(alarm_.scheduled());
EXPECT_EQ(deadline, alarm_.deadline());
}
TEST_F(QuicAlarmTest, Cancel) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
alarm_.Cancel();
EXPECT_FALSE(alarm_.IsSet());
EXPECT_FALSE(alarm_.scheduled());
EXPECT_EQ(QuicTime::Zero(), alarm_.deadline());
}
TEST_F(QuicAlarmTest, PermanentCancel) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
alarm_.PermanentCancel();
EXPECT_FALSE(alarm_.IsSet());
EXPECT_FALSE(alarm_.scheduled());
EXPECT_EQ(QuicTime::Zero(), alarm_.deadline());
EXPECT_QUIC_BUG(alarm_.Set(deadline),
"Set called after alarm is permanently cancelled");
EXPECT_TRUE(alarm_.IsPermanentlyCancelled());
EXPECT_FALSE(alarm_.IsSet());
EXPECT_FALSE(alarm_.scheduled());
EXPECT_EQ(QuicTime::Zero(), alarm_.deadline());
EXPECT_QUIC_BUG(alarm_.Update(deadline, QuicTime::Delta::Zero()),
"Update called after alarm is permanently cancelled");
EXPECT_TRUE(alarm_.IsPermanentlyCancelled());
EXPECT_FALSE(alarm_.IsSet());
EXPECT_FALSE(alarm_.scheduled());
EXPECT_EQ(QuicTime::Zero(), alarm_.deadline());
}
TEST_F(QuicAlarmTest, Update) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
QuicTime new_deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(8);
alarm_.Update(new_deadline, QuicTime::Delta::Zero());
EXPECT_TRUE(alarm_.IsSet());
EXPECT_TRUE(alarm_.scheduled());
EXPECT_EQ(new_deadline, alarm_.deadline());
}
TEST_F(QuicAlarmTest, UpdateWithZero) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
alarm_.Update(QuicTime::Zero(), QuicTime::Delta::Zero());
EXPECT_FALSE(alarm_.IsSet());
EXPECT_FALSE(alarm_.scheduled());
EXPECT_EQ(QuicTime::Zero(), alarm_.deadline());
}
TEST_F(QuicAlarmTest, Fire) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
EXPECT_CALL(*delegate_, OnAlarm());
alarm_.FireAlarm();
EXPECT_FALSE(alarm_.IsSet());
EXPECT_FALSE(alarm_.scheduled());
EXPECT_EQ(QuicTime::Zero(), alarm_.deadline());
}
TEST_F(QuicAlarmTest, FireAndResetViaSet) {
alarm_.Set(deadline_);
new_deadline_ = deadline2_;
EXPECT_CALL(*delegate_, OnAlarm())
.WillOnce(Invoke(this, &QuicAlarmTest::ResetAlarm));
alarm_.FireAlarm();
EXPECT_TRUE(alarm_.IsSet());
EXPECT_TRUE(alarm_.scheduled());
EXPECT_EQ(deadline2_, alarm_.deadline());
}
TEST_F(QuicAlarmTest, FireDestroysAlarm) {
DestructiveDelegate* delegate(new DestructiveDelegate);
DestructiveAlarm* alarm = new DestructiveAlarm(delegate);
delegate->set_alarm(alarm);
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm->Set(deadline);
alarm->FireAlarm();
}
TEST_F(QuicAlarmTest, NullAlarmContext) {
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
EXPECT_CALL(*delegate_, GetConnectionContext()).WillOnce(Return(nullptr));
EXPECT_CALL(*delegate_, OnAlarm()).WillOnce(Invoke([] {
QUIC_TRACELITERAL("Alarm fired.");
}));
alarm_.FireAlarm();
}
TEST_F(QuicAlarmTest, AlarmContextWithNullTracer) {
QuicConnectionContext context;
ASSERT_EQ(context.tracer, nullptr);
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
EXPECT_CALL(*delegate_, GetConnectionContext()).WillOnce(Return(&context));
EXPECT_CALL(*delegate_, OnAlarm()).WillOnce(Invoke([] {
QUIC_TRACELITERAL("Alarm fired.");
}));
alarm_.FireAlarm();
}
TEST_F(QuicAlarmTest, AlarmContextWithTracer) {
QuicConnectionContext context;
std::unique_ptr<TraceCollector> tracer = std::make_unique<TraceCollector>();
const TraceCollector& tracer_ref = *tracer;
context.tracer = std::move(tracer);
QuicTime deadline = QuicTime::Zero() + QuicTime::Delta::FromSeconds(7);
alarm_.Set(deadline);
EXPECT_CALL(*delegate_, GetConnectionContext()).WillOnce(Return(&context));
EXPECT_CALL(*delegate_, OnAlarm()).WillOnce(Invoke([] {
QUIC_TRACELITERAL("Alarm fired.");
}));
QUIC_TRACELITERAL("Should not be collected before alarm.");
alarm_.FireAlarm();
QUIC_TRACELITERAL("Should not be collected after alarm.");
EXPECT_THAT(tracer_ref.trace(), ElementsAre("Alarm fired."));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_alarm.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_alarm_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b0f7e370-4018-4e23-a4da-4d004db961cd | cpp | tensorflow/tensorflow | mul | tensorflow/lite/delegates/gpu/gl/kernels/mul.cc | tensorflow/lite/delegates/xnnpack/mul_test.cc | #include "tensorflow/lite/delegates/gpu/gl/kernels/mul.h"
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/convert.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/common/util.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
absl::Status GetCoordinate(const NodeShader::GenerationContext& ctx, int dim,
const std::string& default_coord,
std::string* coord) {
std::string result;
if (ctx.input_shapes[1][dim] == 1 && ctx.input_shapes[0][dim] != 1) {
result = "0";
} else if (ctx.input_shapes[0][dim] == ctx.input_shapes[1][dim]) {
result = default_coord;
} else {
return absl::InvalidArgumentError(
absl::StrCat("Second runtime tensor dimension ", dim,
" must either match "
"first tensor's dimensions or be 1."));
}
*coord = result;
return absl::OkStatus();
}
absl::Status GenerateMultiplyRuntimeTensorCode(
const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
std::string x_coord, y_coord, z_coord;
RETURN_IF_ERROR(
GetCoordinate(ctx, 2, "gid.x", &x_coord));
RETURN_IF_ERROR(
GetCoordinate(ctx, 1, "gid.y", &y_coord));
RETURN_IF_ERROR(
GetCoordinate(ctx, 3, "gid.z", &z_coord));
std::string source =
absl::StrCat("vec4 input1_value = $input_data_1[", x_coord, ", ", y_coord,
", ", z_coord, "]$;");
if (ctx.input_shapes[1][3] == 1 && ctx.input_shapes[0][3] != 1) {
absl::StrAppend(
&source,
"\ninput1_value = vec4(input1_value.x, input1_value.x, input1_value.x, "
"input1_value.x);\n");
}
absl::StrAppend(
&source, "value_0 = $input_data_0[gid.x, gid.y, gid.z]$ * input1_value;");
*generated_code = {
{},
{},
{},
uint3(),
uint3(),
std::move(source),
IOStructure::ONLY_DEFINITIONS,
IOStructure::AUTO,
};
return absl::OkStatus();
}
absl::Status GenerateMultiplyConstantTensorCode(
const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) {
const auto& attr = std::any_cast<const ElementwiseAttributes&>(ctx.op_attr);
if (std::holds_alternative<float>(attr.param)) {
*generated_code = {
{{"scalar", std::get<float>(attr.param)}},
{},
{},
uint3(),
uint3(),
"value_0 *= $scalar$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (std::holds_alternative<Tensor<Linear, DataType::FLOAT32>>(attr.param)) {
*generated_code = {
{},
{{"mul_buffer",
MakeReadonlyObject(
std::get<Tensor<Linear, DataType::FLOAT32>>(attr.param).data)}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
"value_0 *= $mul_buffer[gid.z]$;",
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
if (std::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(attr.param)) {
std::string source;
if (ctx.input_shapes[0][1] == 1 && ctx.input_shapes[0][2] == 1 &&
ctx.input_shapes[0][3] == 1) {
source = R"(
value_0 = $input_data_0[0, 0, 0]$;
value_0 = vec4(value_0.x, value_0.x, value_0.x, value_0.x);
)";
}
auto param_shape =
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param).shape;
if (param_shape.c == 1) {
if (param_shape.h == 1 && param_shape.w == 1) {
absl::StrAppend(&source, "vec4 const_val = $hwc_buffer[0, 0, 0]$;");
} else {
absl::StrAppend(&source,
"vec4 const_val = $hwc_buffer[gid.x, gid.y, 0]$;");
}
absl::StrAppend(&source,
"const_val = vec4(const_val.x, const_val.x, const_val.x, "
"const_val.x);");
} else {
source += "vec4 const_val = $hwc_buffer[gid.x, gid.y, gid.z]$;";
}
absl::StrAppend(&source, "value_0 *= const_val;");
*generated_code = {
{},
{{"hwc_buffer",
MakeReadonlyObject(
uint3(param_shape.w, param_shape.h,
DivideRoundUp(param_shape.c, 4)),
ConvertToPHWC4(
std::get<Tensor<HWC, DataType::FLOAT32>>(attr.param)))}},
{},
uint3(static_cast<int>(ctx.input_shapes[0][2]),
static_cast<int>(ctx.input_shapes[0][1]),
DivideRoundUp(static_cast<int>(ctx.input_shapes[0][3]), 4)),
uint3(),
std::move(source),
IOStructure::AUTO,
IOStructure::AUTO,
};
return absl::OkStatus();
}
return absl::InvalidArgumentError("Unsupported Multiplication case.");
}
class Multiply : public NodeShader {
public:
absl::Status GenerateCode(const GenerationContext& ctx,
GeneratedCode* generated_code) const final {
if (ctx.input_shapes.size() == 2) {
return GenerateMultiplyRuntimeTensorCode(ctx, generated_code);
} else {
return GenerateMultiplyConstantTensorCode(ctx, generated_code);
}
}
};
}
std::unique_ptr<NodeShader> NewMultiplyNodeShader() {
return std::make_unique<Multiply>();
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Mul, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, ReluActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluActivation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, Relu6Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Relu6Activation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, ReluMinus1To1Activation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.ReluMinus1To1Activation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, DISABLED_TanhActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.TanhActivation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, DISABLED_SignBitActivation) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.SignBitActivation()
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
TEST(Mul, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MUL, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/kernels/mul.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/mul_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa41cd86-bce6-4de3-bb3a-e3c3aaae27ec | cpp | google/quiche | tls_client_handshaker | quiche/quic/core/tls_client_handshaker.cc | quiche/quic/core/tls_client_handshaker_test.cc | #include "quiche/quic/core/tls_client_handshaker.h"
#include <algorithm>
#include <cstring>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/quic_crypto_client_config.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/crypto/transport_parameters.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_hostname_utils.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic {
TlsClientHandshaker::TlsClientHandshaker(
const QuicServerId& server_id, QuicCryptoStream* stream,
QuicSession* session, std::unique_ptr<ProofVerifyContext> verify_context,
QuicCryptoClientConfig* crypto_config,
QuicCryptoClientStream::ProofHandler* proof_handler,
bool has_application_state)
: TlsHandshaker(stream, session),
session_(session),
server_id_(server_id),
proof_verifier_(crypto_config->proof_verifier()),
verify_context_(std::move(verify_context)),
proof_handler_(proof_handler),
session_cache_(crypto_config->session_cache()),
user_agent_id_(crypto_config->user_agent_id()),
pre_shared_key_(crypto_config->pre_shared_key()),
crypto_negotiated_params_(new QuicCryptoNegotiatedParameters),
has_application_state_(has_application_state),
tls_connection_(crypto_config->ssl_ctx(), this, session->GetSSLConfig()) {
if (crypto_config->tls_signature_algorithms().has_value()) {
SSL_set1_sigalgs_list(ssl(),
crypto_config->tls_signature_algorithms()->c_str());
}
if (crypto_config->proof_source() != nullptr) {
std::shared_ptr<const ClientProofSource::CertAndKey> cert_and_key =
crypto_config->proof_source()->GetCertAndKey(server_id.host());
if (cert_and_key != nullptr) {
QUIC_DVLOG(1) << "Setting client cert and key for " << server_id.host();
tls_connection_.SetCertChain(cert_and_key->chain->ToCryptoBuffers().value,
cert_and_key->private_key.private_key());
}
}
#if BORINGSSL_API_VERSION >= 22
if (!crypto_config->preferred_groups().empty()) {
SSL_set1_group_ids(ssl(), crypto_config->preferred_groups().data(),
crypto_config->preferred_groups().size());
}
#endif
#if BORINGSSL_API_VERSION >= 27
SSL_set_alps_use_new_codepoint(ssl(),
crypto_config->alps_use_new_codepoint());
#endif
}
TlsClientHandshaker::~TlsClientHandshaker() {}
bool TlsClientHandshaker::CryptoConnect() {
if (!pre_shared_key_.empty()) {
std::string error_details =
"QUIC client pre-shared keys not yet supported with TLS";
QUIC_BUG(quic_bug_10576_1) << error_details;
CloseConnection(QUIC_HANDSHAKE_FAILED, error_details);
return false;
}
int use_legacy_extension = 0;
if (session()->version().UsesLegacyTlsExtension()) {
use_legacy_extension = 1;
}
SSL_set_quic_use_legacy_codepoint(ssl(), use_legacy_extension);
#if BORINGSSL_API_VERSION >= 16
SSL_set_permute_extensions(ssl(), true);
#endif
SSL_set_connect_state(ssl());
const bool allow_invalid_sni_for_test =
GetQuicFlag(quic_client_allow_invalid_sni_for_test);
if (QUIC_DLOG_INFO_IS_ON() &&
!QuicHostnameUtils::IsValidSNI(server_id_.host())) {
QUIC_DLOG(INFO) << "Client configured with invalid hostname \""
<< server_id_.host() << "\", "
<< (allow_invalid_sni_for_test
? "sending it anyway for test."
: "not sending as SNI.");
}
if (!server_id_.host().empty() &&
(QuicHostnameUtils::IsValidSNI(server_id_.host()) ||
allow_invalid_sni_for_test) &&
SSL_set_tlsext_host_name(ssl(), server_id_.host().c_str()) != 1) {
return false;
}
if (!SetAlpn()) {
CloseConnection(QUIC_HANDSHAKE_FAILED, "Client failed to set ALPN");
return false;
}
if (!SetTransportParameters()) {
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Client failed to set Transport Parameters");
return false;
}
if (session_cache_) {
cached_state_ = session_cache_->Lookup(
server_id_, session()->GetClock()->WallNow(), SSL_get_SSL_CTX(ssl()));
}
if (cached_state_) {
SSL_set_session(ssl(), cached_state_->tls_session.get());
if (!cached_state_->token.empty()) {
session()->SetSourceAddressTokenToSend(cached_state_->token);
}
}
SSL_set_enable_ech_grease(ssl(),
tls_connection_.ssl_config().ech_grease_enabled);
if (!tls_connection_.ssl_config().ech_config_list.empty() &&
!SSL_set1_ech_config_list(
ssl(),
reinterpret_cast<const uint8_t*>(
tls_connection_.ssl_config().ech_config_list.data()),
tls_connection_.ssl_config().ech_config_list.size())) {
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Client failed to set ECHConfigList");
return false;
}
AdvanceHandshake();
return session()->connection()->connected();
}
bool TlsClientHandshaker::PrepareZeroRttConfig(
QuicResumptionState* cached_state) {
std::string error_details;
if (!cached_state->transport_params ||
handshaker_delegate()->ProcessTransportParameters(
*(cached_state->transport_params),
true, &error_details) != QUIC_NO_ERROR) {
QUIC_BUG(quic_bug_10576_2)
<< "Unable to parse cached transport parameters.";
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Client failed to parse cached Transport Parameters.");
return false;
}
session()->connection()->OnTransportParametersResumed(
*(cached_state->transport_params));
session()->OnConfigNegotiated();
if (has_application_state_) {
if (!cached_state->application_state ||
!session()->ResumeApplicationState(
cached_state->application_state.get())) {
QUIC_BUG(quic_bug_10576_3) << "Unable to parse cached application state.";
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Client failed to parse cached application state.");
return false;
}
}
return true;
}
static bool IsValidAlpn(const std::string& alpn_string) {
return alpn_string.length() <= std::numeric_limits<uint8_t>::max();
}
bool TlsClientHandshaker::SetAlpn() {
std::vector<std::string> alpns = session()->GetAlpnsToOffer();
if (alpns.empty()) {
if (allow_empty_alpn_for_tests_) {
return true;
}
QUIC_BUG(quic_bug_10576_4) << "ALPN missing";
return false;
}
if (!std::all_of(alpns.begin(), alpns.end(), IsValidAlpn)) {
QUIC_BUG(quic_bug_10576_5) << "ALPN too long";
return false;
}
uint8_t alpn[1024];
QuicDataWriter alpn_writer(sizeof(alpn), reinterpret_cast<char*>(alpn));
bool success = true;
for (const std::string& alpn_string : alpns) {
success = success && alpn_writer.WriteUInt8(alpn_string.size()) &&
alpn_writer.WriteStringPiece(alpn_string);
}
success =
success && (SSL_set_alpn_protos(ssl(), alpn, alpn_writer.length()) == 0);
if (!success) {
QUIC_BUG(quic_bug_10576_6)
<< "Failed to set ALPN: "
<< quiche::QuicheTextUtils::HexDump(
absl::string_view(alpn_writer.data(), alpn_writer.length()));
return false;
}
for (const std::string& alpn_string : alpns) {
for (const ParsedQuicVersion& version : session()->supported_versions()) {
if (!version.UsesHttp3() || AlpnForVersion(version) != alpn_string) {
continue;
}
if (SSL_add_application_settings(
ssl(), reinterpret_cast<const uint8_t*>(alpn_string.data()),
alpn_string.size(), nullptr, 0) != 1) {
QUIC_BUG(quic_bug_10576_7) << "Failed to enable ALPS.";
return false;
}
break;
}
}
QUIC_DLOG(INFO) << "Client using ALPN: '" << alpns[0] << "'";
return true;
}
bool TlsClientHandshaker::SetTransportParameters() {
TransportParameters params;
params.perspective = Perspective::IS_CLIENT;
params.legacy_version_information =
TransportParameters::LegacyVersionInformation();
params.legacy_version_information->version =
CreateQuicVersionLabel(session()->supported_versions().front());
params.version_information = TransportParameters::VersionInformation();
const QuicVersionLabel version = CreateQuicVersionLabel(session()->version());
params.version_information->chosen_version = version;
params.version_information->other_versions.push_back(version);
if (!handshaker_delegate()->FillTransportParameters(¶ms)) {
return false;
}
session()->connection()->OnTransportParametersSent(params);
std::vector<uint8_t> param_bytes;
return SerializeTransportParameters(params, ¶m_bytes) &&
SSL_set_quic_transport_params(ssl(), param_bytes.data(),
param_bytes.size()) == 1;
}
bool TlsClientHandshaker::ProcessTransportParameters(
std::string* error_details) {
received_transport_params_ = std::make_unique<TransportParameters>();
const uint8_t* param_bytes;
size_t param_bytes_len;
SSL_get_peer_quic_transport_params(ssl(), ¶m_bytes, ¶m_bytes_len);
if (param_bytes_len == 0) {
*error_details = "Server's transport parameters are missing";
return false;
}
std::string parse_error_details;
if (!ParseTransportParameters(
session()->connection()->version(), Perspective::IS_SERVER,
param_bytes, param_bytes_len, received_transport_params_.get(),
&parse_error_details)) {
QUICHE_DCHECK(!parse_error_details.empty());
*error_details =
"Unable to parse server's transport parameters: " + parse_error_details;
return false;
}
session()->connection()->OnTransportParametersReceived(
*received_transport_params_);
if (received_transport_params_->legacy_version_information.has_value()) {
if (received_transport_params_->legacy_version_information->version !=
CreateQuicVersionLabel(session()->connection()->version())) {
*error_details = "Version mismatch detected";
return false;
}
if (CryptoUtils::ValidateServerHelloVersions(
received_transport_params_->legacy_version_information
->supported_versions,
session()->connection()->server_supported_versions(),
error_details) != QUIC_NO_ERROR) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
}
if (received_transport_params_->version_information.has_value()) {
if (!CryptoUtils::ValidateChosenVersion(
received_transport_params_->version_information->chosen_version,
session()->version(), error_details)) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
if (!CryptoUtils::CryptoUtils::ValidateServerVersions(
received_transport_params_->version_information->other_versions,
session()->version(),
session()->client_original_supported_versions(), error_details)) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
}
if (handshaker_delegate()->ProcessTransportParameters(
*received_transport_params_, false,
error_details) != QUIC_NO_ERROR) {
QUICHE_DCHECK(!error_details->empty());
return false;
}
session()->OnConfigNegotiated();
if (is_connection_closed()) {
*error_details =
"Session closed the connection when parsing negotiated config.";
return false;
}
return true;
}
int TlsClientHandshaker::num_sent_client_hellos() const { return 0; }
bool TlsClientHandshaker::ResumptionAttempted() const {
QUIC_BUG_IF(quic_tls_client_resumption_attempted, !encryption_established_);
return cached_state_ != nullptr;
}
bool TlsClientHandshaker::IsResumption() const {
QUIC_BUG_IF(quic_bug_12736_1, !one_rtt_keys_available());
return SSL_session_reused(ssl()) == 1;
}
bool TlsClientHandshaker::EarlyDataAccepted() const {
QUIC_BUG_IF(quic_bug_12736_2, !one_rtt_keys_available());
return SSL_early_data_accepted(ssl()) == 1;
}
ssl_early_data_reason_t TlsClientHandshaker::EarlyDataReason() const {
return TlsHandshaker::EarlyDataReason();
}
bool TlsClientHandshaker::ReceivedInchoateReject() const {
QUIC_BUG_IF(quic_bug_12736_3, !one_rtt_keys_available());
return false;
}
int TlsClientHandshaker::num_scup_messages_received() const {
return 0;
}
std::string TlsClientHandshaker::chlo_hash() const { return ""; }
bool TlsClientHandshaker::ExportKeyingMaterial(absl::string_view label,
absl::string_view context,
size_t result_len,
std::string* result) {
return ExportKeyingMaterialForLabel(label, context, result_len, result);
}
bool TlsClientHandshaker::encryption_established() const {
return encryption_established_;
}
bool TlsClientHandshaker::IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel level) const {
return level != ENCRYPTION_ZERO_RTT;
}
EncryptionLevel TlsClientHandshaker::GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const {
switch (space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
bool TlsClientHandshaker::one_rtt_keys_available() const {
return state_ >= HANDSHAKE_COMPLETE;
}
const QuicCryptoNegotiatedParameters&
TlsClientHandshaker::crypto_negotiated_params() const {
return *crypto_negotiated_params_;
}
CryptoMessageParser* TlsClientHandshaker::crypto_message_parser() {
return TlsHandshaker::crypto_message_parser();
}
HandshakeState TlsClientHandshaker::GetHandshakeState() const { return state_; }
size_t TlsClientHandshaker::BufferSizeLimitForLevel(
EncryptionLevel level) const {
return TlsHandshaker::BufferSizeLimitForLevel(level);
}
std::unique_ptr<QuicDecrypter>
TlsClientHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter() {
return TlsHandshaker::AdvanceKeysAndCreateCurrentOneRttDecrypter();
}
std::unique_ptr<QuicEncrypter>
TlsClientHandshaker::CreateCurrentOneRttEncrypter() {
return TlsHandshaker::CreateCurrentOneRttEncrypter();
}
void TlsClientHandshaker::OnOneRttPacketAcknowledged() {
OnHandshakeConfirmed();
}
void TlsClientHandshaker::OnHandshakePacketSent() {
if (initial_keys_dropped_) {
return;
}
initial_keys_dropped_ = true;
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_INITIAL);
handshaker_delegate()->DiscardOldDecryptionKey(ENCRYPTION_INITIAL);
}
void TlsClientHandshaker::OnConnectionClosed(QuicErrorCode error,
ConnectionCloseSource source) {
TlsHandshaker::OnConnectionClosed(error, source);
}
void TlsClientHandshaker::OnHandshakeDoneReceived() {
if (!one_rtt_keys_available()) {
CloseConnection(QUIC_HANDSHAKE_FAILED,
"Unexpected handshake done received");
return;
}
OnHandshakeConfirmed();
}
void TlsClientHandshaker::OnNewTokenReceived(absl::string_view token) {
if (token.empty()) {
return;
}
if (session_cache_ != nullptr) {
session_cache_->OnNewTokenReceived(server_id_, token);
}
}
void TlsClientHandshaker::SetWriteSecret(
EncryptionLevel level, const SSL_CIPHER* cipher,
absl::Span<const uint8_t> write_secret) {
if (is_connection_closed()) {
return;
}
if (level == ENCRYPTION_FORWARD_SECURE || level == ENCRYPTION_ZERO_RTT) {
encryption_established_ = true;
}
TlsHandshaker::SetWriteSecret(level, cipher, write_secret);
if (level == ENCRYPTION_FORWARD_SECURE) {
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_ZERO_RTT);
}
}
void TlsClientHandshaker::OnHandshakeConfirmed() {
QUICHE_DCHECK(one_rtt_keys_available());
if (state_ >= HANDSHAKE_CONFIRMED) {
return;
}
state_ = HANDSHAKE_CONFIRMED;
handshaker_delegate()->OnTlsHandshakeConfirmed();
handshaker_delegate()->DiscardOldEncryptionKey(ENCRYPTION_HANDSHAKE);
handshaker_delegate()->DiscardOldDecryptionKey(ENCRYPTION_HANDSHAKE);
}
QuicAsyncStatus TlsClientHandshaker::VerifyCertChain(
const std::vector<std::string>& certs, std::string* error_details,
std::unique_ptr<ProofVerifyDetails>* details, uint8_t* out_alert,
std::unique_ptr<ProofVerifierCallback> callback) {
const uint8_t* ocsp_response_raw;
size_t ocsp_response_len;
SSL_get0_ocsp_response(ssl(), &ocsp_response_raw, &ocsp_response_len);
std::string ocsp_response(reinterpret_cast<const char*>(ocsp_response_raw),
ocsp_response_len);
const uint8_t* sct_list_raw;
size_t sct_list_len;
SSL_get0_signed_cert_timestamp_list(ssl(), &sct_list_raw, &sct_list_len);
std::string sct_list(reinterpret_cast<const char*>(sct_list_raw),
sct_list_len);
return proof_verifier_->VerifyCertChain(
server_id_.host(), server_id_.port(), certs, ocsp_response, sct_list,
verify_context_.get(), error_details, details, out_alert,
std::move(callback));
}
void TlsClientHandshaker::OnProofVerifyDetailsAvailable(
const ProofVerifyDetails& verify_details) {
proof_handler_->OnProofVerifyDetailsAvailable(verify_details);
}
void TlsClientHandshaker::FinishHandshake() {
FillNegotiatedParams();
QUICHE_CHECK(!SSL_in_early_data(ssl()));
QUIC_DLOG(INFO) << "Client: handshake finished";
std::string error_details;
if (!ProcessTransportParameters(&error_details)) {
QUICHE_DCHECK(!error_details.empty());
CloseConnection(QUIC_HANDSHAKE_FAILED, error_details);
return;
}
const uint8_t* alpn_data = nullptr;
unsigned alpn_length = 0;
SSL_get0_alpn_selected(ssl(), &alpn_data, &alpn_length);
if (alpn_length == 0) {
QUIC_DLOG(ERROR) << "Client: server did not select ALPN";
CloseConnection(QUIC_HANDSHAKE_FAILED, "Server did not select ALPN");
return;
}
std::string received_alpn_string(reinterpret_cast<const char*>(alpn_data),
alpn_length);
std::vector<std::string> offered_alpns = session()->GetAlpnsToOffer();
if (std::find(offered_alpns.begin(), offered_alpns.end(),
received_alpn_string) == offered_alpns.end()) {
QUIC_LOG(ERROR) << "Client: received mismatched ALPN '"
<< received_alpn_string;
CloseConnection(QUIC_HANDSHAKE_FAILED, "Client received mismatched ALPN");
return;
}
session()->OnAlpnSelected(received_alpn_string);
QUIC_DLOG(INFO) << "Client: server selected ALPN: '" << received_alpn_string
<< "'";
const uint8_t* alps_data;
size_t alps_length;
SSL_get0_peer_application_settings(ssl(), &alps_data, &alps_length);
if (alps_length > 0) {
auto error = session()->OnAlpsData(alps_data, alps_length);
if (error.has_value()) {
CloseConnection(QUIC_HANDSHAKE_FAILED,
absl::StrCat("Error processing ALPS data: ", *error));
return;
}
}
state_ = HANDSHAKE_COMPLETE;
handshaker_delegate()->OnTlsHandshakeComplete();
}
void TlsClientHandshaker::OnEnterEarlyData() {
QUICHE_DCHECK(SSL_in_early_data(ssl()));
FillNegotiatedParams();
PrepareZeroRttConfig(cached_state_.get());
}
void TlsClientHandshaker::FillNegotiatedParams() {
const SSL_CIPHER* cipher = SSL_get_current_cipher(ssl());
if (cipher) {
crypto_negotiated_params_->cipher_suite =
SSL_CIPHER_get_protocol_id(cipher);
}
crypto_negotiated_params_->key_exchange_group = SSL_get_curve_id(ssl());
crypto_negotiated_params_->peer_signature_algorithm =
SSL_get_peer_signature_algorithm(ssl());
crypto_negotiated_params_->encrypted_client_hello = SSL_ech_accepted(ssl());
}
void TlsClientHandshaker::ProcessPostHandshakeMessage() {
int rv = SSL_process_quic_post_handshake(ssl());
if (rv != 1) {
CloseConnection(QUIC_HANDSHAKE_FAILED, "Unexpected post-handshake data");
}
}
bool TlsClientHandshaker::ShouldCloseConnectionOnUnexpectedError(
int ssl_error) {
if (ssl_error != SSL_ERROR_EARLY_DATA_REJECTED) {
return true;
}
HandleZeroRttReject();
return false;
}
void TlsClientHandshaker::HandleZeroRttReject() {
QUIC_DLOG(INFO) << "0-RTT handshake attempted but was rejected by the server";
QUICHE_DCHECK(session_cache_);
encryption_established_ = false;
handshaker_delegate()->OnZeroRttRejected(EarlyDataReason());
SSL_reset_early_data_reject(ssl());
session_cache_->ClearEarlyData(server_id_);
AdvanceHandshake();
}
void TlsClientHandshaker::InsertSession(bssl::UniquePtr<SSL_SESSION> session) {
if (!received_transport_params_) {
QUIC_BUG(quic_bug_10576_8) << "Transport parameters isn't received";
return;
}
if (session_cache_ == nullptr) {
QUIC_DVLOG(1) << "No session cache, not inserting a session";
return;
}
if (has_application_state_ && !received_application_state_) {
if (cached_tls_sessions_[0] != nullptr) {
cached_tls_sessions_[1] = std::move(cached_tls_sessions_[0]);
}
cached_tls_sessions_[0] = std::move(session);
return;
}
session_cache_->Insert(server_id_, std::move(session),
*received_transport_params_,
received_application_state_.get());
}
void TlsClientHandshaker::WriteMessage(EncryptionLevel level,
absl::string_view data) {
if (level == ENCRYPTION_HANDSHAKE && state_ < HANDSHAKE_PROCESSED) {
state_ = HANDSHAKE_PROCESSED;
}
TlsHandshaker::WriteMessage(level, data);
}
void TlsClientHandshaker::SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> application_state) {
QUICHE_DCHECK(one_rtt_keys_available());
received_application_state_ = std::move(application_state);
if (session_cache_ != nullptr && cached_tls_sessions_[0] != nullptr) {
if (cached_tls_sessions_[1] != nullptr) {
session_cache_->Insert(server_id_, std::move(cached_tls_sessions_[1]),
*received_transport_params_,
received_application_state_.get());
}
session_cache_->Insert(server_id_, std::move(cached_tls_sessions_[0]),
*received_transport_params_,
received_application_state_.get());
}
}
} | #include <algorithm>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/macros.h"
#include "openssl/hpke.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/crypto/quic_decrypter.h"
#include "quiche/quic/core/crypto/quic_encrypter.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_server_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_framer_peer.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_session_cache.h"
#include "quiche/quic/tools/fake_proof_verifier.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
using testing::_;
using testing::HasSubstr;
namespace quic {
namespace test {
namespace {
constexpr char kServerHostname[] = "test.example.com";
constexpr uint16_t kServerPort = 443;
class TestProofVerifier : public ProofVerifier {
public:
TestProofVerifier()
: verifier_(crypto_test_utils::ProofVerifierForTesting()) {}
QuicAsyncStatus VerifyProof(
const std::string& hostname, const uint16_t port,
const std::string& server_config, QuicTransportVersion quic_version,
absl::string_view chlo_hash, const std::vector<std::string>& certs,
const std::string& cert_sct, const std::string& signature,
const ProofVerifyContext* context, std::string* error_details,
std::unique_ptr<ProofVerifyDetails>* details,
std::unique_ptr<ProofVerifierCallback> callback) override {
return verifier_->VerifyProof(
hostname, port, server_config, quic_version, chlo_hash, certs, cert_sct,
signature, context, error_details, details, std::move(callback));
}
QuicAsyncStatus VerifyCertChain(
const std::string& hostname, const uint16_t port,
const std::vector<std::string>& certs, const std::string& ocsp_response,
const std::string& cert_sct, const ProofVerifyContext* context,
std::string* error_details, std::unique_ptr<ProofVerifyDetails>* details,
uint8_t* out_alert,
std::unique_ptr<ProofVerifierCallback> callback) override {
if (!active_) {
return verifier_->VerifyCertChain(
hostname, port, certs, ocsp_response, cert_sct, context,
error_details, details, out_alert, std::move(callback));
}
pending_ops_.push_back(std::make_unique<VerifyChainPendingOp>(
hostname, port, certs, ocsp_response, cert_sct, context, error_details,
details, out_alert, std::move(callback), verifier_.get()));
return QUIC_PENDING;
}
std::unique_ptr<ProofVerifyContext> CreateDefaultContext() override {
return nullptr;
}
void Activate() { active_ = true; }
size_t NumPendingCallbacks() const { return pending_ops_.size(); }
void InvokePendingCallback(size_t n) {
ASSERT_GT(NumPendingCallbacks(), n);
pending_ops_[n]->Run();
auto it = pending_ops_.begin() + n;
pending_ops_.erase(it);
}
private:
class FailingProofVerifierCallback : public ProofVerifierCallback {
public:
void Run(bool , const std::string& ,
std::unique_ptr<ProofVerifyDetails>* ) override {
FAIL();
}
};
class VerifyChainPendingOp {
public:
VerifyChainPendingOp(const std::string& hostname, const uint16_t port,
const std::vector<std::string>& certs,
const std::string& ocsp_response,
const std::string& cert_sct,
const ProofVerifyContext* context,
std::string* error_details,
std::unique_ptr<ProofVerifyDetails>* details,
uint8_t* out_alert,
std::unique_ptr<ProofVerifierCallback> callback,
ProofVerifier* delegate)
: hostname_(hostname),
port_(port),
certs_(certs),
ocsp_response_(ocsp_response),
cert_sct_(cert_sct),
context_(context),
error_details_(error_details),
details_(details),
out_alert_(out_alert),
callback_(std::move(callback)),
delegate_(delegate) {}
void Run() {
QuicAsyncStatus status = delegate_->VerifyCertChain(
hostname_, port_, certs_, ocsp_response_, cert_sct_, context_,
error_details_, details_, out_alert_,
std::make_unique<FailingProofVerifierCallback>());
ASSERT_NE(status, QUIC_PENDING);
callback_->Run(status == QUIC_SUCCESS, *error_details_, details_);
}
private:
std::string hostname_;
const uint16_t port_;
std::vector<std::string> certs_;
std::string ocsp_response_;
std::string cert_sct_;
const ProofVerifyContext* context_;
std::string* error_details_;
std::unique_ptr<ProofVerifyDetails>* details_;
uint8_t* out_alert_;
std::unique_ptr<ProofVerifierCallback> callback_;
ProofVerifier* delegate_;
};
std::unique_ptr<ProofVerifier> verifier_;
bool active_ = false;
std::vector<std::unique_ptr<VerifyChainPendingOp>> pending_ops_;
};
class TlsClientHandshakerTest : public QuicTestWithParam<ParsedQuicVersion> {
public:
TlsClientHandshakerTest()
: supported_versions_({GetParam()}),
server_id_(kServerHostname, kServerPort),
server_compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize) {
crypto_config_ = std::make_unique<QuicCryptoClientConfig>(
std::make_unique<TestProofVerifier>(),
std::make_unique<test::SimpleSessionCache>());
server_crypto_config_ = crypto_test_utils::CryptoServerConfigForTesting();
CreateConnection();
}
void CreateSession() {
session_ = std::make_unique<TestQuicSpdyClientSession>(
connection_, DefaultQuicConfig(), supported_versions_, server_id_,
crypto_config_.get(), ssl_config_);
EXPECT_CALL(*session_, GetAlpnsToOffer())
.WillRepeatedly(testing::Return(std::vector<std::string>(
{AlpnForVersion(connection_->version())})));
}
void CreateConnection() {
connection_ =
new PacketSavingConnection(&client_helper_, &alarm_factory_,
Perspective::IS_CLIENT, supported_versions_);
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
CreateSession();
}
void CompleteCryptoHandshake() {
CompleteCryptoHandshakeWithServerALPN(
AlpnForVersion(connection_->version()));
}
void CompleteCryptoHandshakeWithServerALPN(const std::string& alpn) {
EXPECT_CALL(*connection_, SendCryptoData(_, _, _))
.Times(testing::AnyNumber());
stream()->CryptoConnect();
QuicConfig config;
crypto_test_utils::HandshakeWithFakeServer(
&config, server_crypto_config_.get(), &server_helper_, &alarm_factory_,
connection_, stream(), alpn);
}
QuicCryptoClientStream* stream() {
return session_->GetMutableCryptoStream();
}
QuicCryptoServerStreamBase* server_stream() {
return server_session_->GetMutableCryptoStream();
}
void InitializeFakeServer() {
TestQuicSpdyServerSession* server_session = nullptr;
CreateServerSessionForTest(
server_id_, QuicTime::Delta::FromSeconds(100000), supported_versions_,
&server_helper_, &alarm_factory_, server_crypto_config_.get(),
&server_compressed_certs_cache_, &server_connection_, &server_session);
server_session_.reset(server_session);
std::string alpn = AlpnForVersion(connection_->version());
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillRepeatedly([alpn](const std::vector<absl::string_view>& alpns) {
return std::find(alpns.cbegin(), alpns.cend(), alpn);
});
}
static bssl::UniquePtr<SSL_ECH_KEYS> MakeTestEchKeys(
const char* public_name, size_t max_name_len,
std::string* ech_config_list) {
bssl::ScopedEVP_HPKE_KEY key;
if (!EVP_HPKE_KEY_generate(key.get(), EVP_hpke_x25519_hkdf_sha256())) {
return nullptr;
}
uint8_t* ech_config;
size_t ech_config_len;
if (!SSL_marshal_ech_config(&ech_config, &ech_config_len,
1, key.get(), public_name,
max_name_len)) {
return nullptr;
}
bssl::UniquePtr<uint8_t> scoped_ech_config(ech_config);
uint8_t* ech_config_list_raw;
size_t ech_config_list_len;
bssl::UniquePtr<SSL_ECH_KEYS> keys(SSL_ECH_KEYS_new());
if (!keys ||
!SSL_ECH_KEYS_add(keys.get(), 1, ech_config,
ech_config_len, key.get()) ||
!SSL_ECH_KEYS_marshal_retry_configs(keys.get(), &ech_config_list_raw,
&ech_config_list_len)) {
return nullptr;
}
bssl::UniquePtr<uint8_t> scoped_ech_config_list(ech_config_list_raw);
ech_config_list->assign(ech_config_list_raw,
ech_config_list_raw + ech_config_list_len);
return keys;
}
MockQuicConnectionHelper server_helper_;
MockQuicConnectionHelper client_helper_;
MockAlarmFactory alarm_factory_;
PacketSavingConnection* connection_;
ParsedQuicVersionVector supported_versions_;
std::unique_ptr<TestQuicSpdyClientSession> session_;
QuicServerId server_id_;
CryptoHandshakeMessage message_;
std::unique_ptr<QuicCryptoClientConfig> crypto_config_;
std::optional<QuicSSLConfig> ssl_config_;
std::unique_ptr<QuicCryptoServerConfig> server_crypto_config_;
PacketSavingConnection* server_connection_;
std::unique_ptr<TestQuicSpdyServerSession> server_session_;
QuicCompressedCertsCache server_compressed_certs_cache_;
};
INSTANTIATE_TEST_SUITE_P(TlsHandshakerTests, TlsClientHandshakerTest,
::testing::ValuesIn(AllSupportedVersionsWithTls()),
::testing::PrintToStringParamName());
TEST_P(TlsClientHandshakerTest, NotInitiallyConnected) {
EXPECT_FALSE(stream()->encryption_established());
EXPECT_FALSE(stream()->one_rtt_keys_available());
}
TEST_P(TlsClientHandshakerTest, ConnectedAfterHandshake) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
}
TEST_P(TlsClientHandshakerTest, ConnectionClosedOnTlsError) {
stream()->CryptoConnect();
EXPECT_CALL(*connection_, CloseConnection(QUIC_HANDSHAKE_FAILED, _, _, _));
char bogus_handshake_message[] = {
2,
0, 0, 0,
};
stream()->crypto_message_parser()->ProcessInput(
absl::string_view(bogus_handshake_message,
ABSL_ARRAYSIZE(bogus_handshake_message)),
ENCRYPTION_INITIAL);
EXPECT_FALSE(stream()->one_rtt_keys_available());
}
TEST_P(TlsClientHandshakerTest, ProofVerifyDetailsAvailableAfterHandshake) {
EXPECT_CALL(*session_, OnProofVerifyDetailsAvailable(testing::_));
stream()->CryptoConnect();
QuicConfig config;
crypto_test_utils::HandshakeWithFakeServer(
&config, server_crypto_config_.get(), &server_helper_, &alarm_factory_,
connection_, stream(), AlpnForVersion(connection_->version()));
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
}
TEST_P(TlsClientHandshakerTest, HandshakeWithAsyncProofVerifier) {
InitializeFakeServer();
TestProofVerifier* proof_verifier =
static_cast<TestProofVerifier*>(crypto_config_->proof_verifier());
proof_verifier->Activate();
stream()->CryptoConnect();
std::pair<size_t, size_t> moved_message_counts =
crypto_test_utils::AdvanceHandshake(
connection_, stream(), 0, server_connection_, server_stream(), 0);
ASSERT_EQ(proof_verifier->NumPendingCallbacks(), 1u);
proof_verifier->InvokePendingCallback(0);
crypto_test_utils::AdvanceHandshake(
connection_, stream(), moved_message_counts.first, server_connection_,
server_stream(), moved_message_counts.second);
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
}
TEST_P(TlsClientHandshakerTest, Resumption) {
SSL_CTX_set_early_data_enabled(server_crypto_config_->ssl_ctx(), false);
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->ResumptionAttempted());
EXPECT_FALSE(stream()->IsResumption());
CreateConnection();
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_TRUE(stream()->ResumptionAttempted());
EXPECT_TRUE(stream()->IsResumption());
}
TEST_P(TlsClientHandshakerTest, ResumptionRejection) {
SSL_CTX_set_early_data_enabled(server_crypto_config_->ssl_ctx(), false);
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->ResumptionAttempted());
EXPECT_FALSE(stream()->IsResumption());
SSL_CTX_set_options(server_crypto_config_->ssl_ctx(), SSL_OP_NO_TICKET);
CreateConnection();
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_TRUE(stream()->ResumptionAttempted());
EXPECT_FALSE(stream()->IsResumption());
EXPECT_FALSE(stream()->EarlyDataAccepted());
EXPECT_EQ(stream()->EarlyDataReason(),
ssl_early_data_unsupported_for_session);
}
TEST_P(TlsClientHandshakerTest, ZeroRttResumption) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
CreateConnection();
EXPECT_CALL(*session_, OnConfigNegotiated()).Times(2);
EXPECT_CALL(*connection_, SendCryptoData(_, _, _))
.Times(testing::AnyNumber());
stream()->CryptoConnect();
EXPECT_TRUE(stream()->encryption_established());
EXPECT_NE(stream()->crypto_negotiated_params().cipher_suite, 0);
EXPECT_NE(stream()->crypto_negotiated_params().key_exchange_group, 0);
EXPECT_NE(stream()->crypto_negotiated_params().peer_signature_algorithm, 0);
QuicConfig config;
crypto_test_utils::HandshakeWithFakeServer(
&config, server_crypto_config_.get(), &server_helper_, &alarm_factory_,
connection_, stream(), AlpnForVersion(connection_->version()));
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_TRUE(stream()->IsResumption());
EXPECT_TRUE(stream()->EarlyDataAccepted());
EXPECT_EQ(stream()->EarlyDataReason(), ssl_early_data_accepted);
}
TEST_P(TlsClientHandshakerTest, ZeroRttResumptionWithAyncProofVerifier) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
CreateConnection();
InitializeFakeServer();
EXPECT_CALL(*session_, OnConfigNegotiated());
EXPECT_CALL(*connection_, SendCryptoData(_, _, _))
.Times(testing::AnyNumber());
TestProofVerifier* proof_verifier =
static_cast<TestProofVerifier*>(crypto_config_->proof_verifier());
proof_verifier->Activate();
stream()->CryptoConnect();
ASSERT_EQ(proof_verifier->NumPendingCallbacks(), 1u);
crypto_test_utils::AdvanceHandshake(connection_, stream(), 0,
server_connection_, server_stream(), 0);
EXPECT_FALSE(stream()->one_rtt_keys_available());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
proof_verifier->InvokePendingCallback(0);
QuicFramer* framer = QuicConnectionPeer::GetFramer(connection_);
EXPECT_NE(nullptr,
QuicFramerPeer::GetEncrypter(framer, ENCRYPTION_HANDSHAKE));
}
TEST_P(TlsClientHandshakerTest, ZeroRttRejection) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
SSL_CTX_set_early_data_enabled(server_crypto_config_->ssl_ctx(), false);
CreateConnection();
EXPECT_CALL(*session_, OnConfigNegotiated()).Times(2);
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_INITIAL, NOT_RETRANSMISSION));
if (VersionUsesHttp3(session_->transport_version())) {
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_ZERO_RTT, NOT_RETRANSMISSION));
}
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_HANDSHAKE, NOT_RETRANSMISSION));
if (VersionUsesHttp3(session_->transport_version())) {
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_FORWARD_SECURE, LOSS_RETRANSMISSION));
}
CompleteCryptoHandshake();
QuicFramer* framer = QuicConnectionPeer::GetFramer(connection_);
EXPECT_EQ(nullptr, QuicFramerPeer::GetEncrypter(framer, ENCRYPTION_ZERO_RTT));
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_TRUE(stream()->IsResumption());
EXPECT_FALSE(stream()->EarlyDataAccepted());
EXPECT_EQ(stream()->EarlyDataReason(), ssl_early_data_peer_declined);
}
TEST_P(TlsClientHandshakerTest, ZeroRttAndResumptionRejection) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
SSL_CTX_set_options(server_crypto_config_->ssl_ctx(), SSL_OP_NO_TICKET);
CreateConnection();
EXPECT_CALL(*session_, OnConfigNegotiated()).Times(2);
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_INITIAL, NOT_RETRANSMISSION));
if (VersionUsesHttp3(session_->transport_version())) {
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_ZERO_RTT, NOT_RETRANSMISSION));
}
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_HANDSHAKE, NOT_RETRANSMISSION));
if (VersionUsesHttp3(session_->transport_version())) {
EXPECT_CALL(*connection_,
OnPacketSent(ENCRYPTION_FORWARD_SECURE, LOSS_RETRANSMISSION));
}
CompleteCryptoHandshake();
QuicFramer* framer = QuicConnectionPeer::GetFramer(connection_);
EXPECT_EQ(nullptr, QuicFramerPeer::GetEncrypter(framer, ENCRYPTION_ZERO_RTT));
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
EXPECT_FALSE(stream()->EarlyDataAccepted());
EXPECT_EQ(stream()->EarlyDataReason(), ssl_early_data_session_not_resumed);
}
TEST_P(TlsClientHandshakerTest, ClientSendsNoSNI) {
server_id_ = QuicServerId("", 443);
crypto_config_.reset(new QuicCryptoClientConfig(
std::make_unique<FakeProofVerifier>(), nullptr));
CreateConnection();
InitializeFakeServer();
stream()->CryptoConnect();
crypto_test_utils::CommunicateHandshakeMessages(
connection_, stream(), server_connection_, server_stream());
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_EQ(server_stream()->crypto_negotiated_params().sni, "");
}
TEST_P(TlsClientHandshakerTest, ClientSendingTooManyALPNs) {
std::string long_alpn(250, 'A');
EXPECT_QUIC_BUG(
{
EXPECT_CALL(*session_, GetAlpnsToOffer())
.WillOnce(testing::Return(std::vector<std::string>({
long_alpn + "1",
long_alpn + "2",
long_alpn + "3",
long_alpn + "4",
long_alpn + "5",
long_alpn + "6",
long_alpn + "7",
long_alpn + "8",
})));
stream()->CryptoConnect();
},
"Failed to set ALPN");
}
TEST_P(TlsClientHandshakerTest, ServerRequiresCustomALPN) {
InitializeFakeServer();
const std::string kTestAlpn = "An ALPN That Client Did Not Offer";
EXPECT_CALL(*server_session_, SelectAlpn(_))
.WillOnce([kTestAlpn](const std::vector<absl::string_view>& alpns) {
return std::find(alpns.cbegin(), alpns.cend(), kTestAlpn);
});
EXPECT_CALL(
*server_connection_,
CloseConnection(
QUIC_HANDSHAKE_FAILED,
static_cast<QuicIetfTransportErrorCodes>(CRYPTO_ERROR_FIRST + 120),
HasSubstr("TLS handshake failure (ENCRYPTION_INITIAL) 120: "
"no application protocol"),
_));
stream()->CryptoConnect();
crypto_test_utils::AdvanceHandshake(connection_, stream(), 0,
server_connection_, server_stream(), 0);
EXPECT_FALSE(stream()->one_rtt_keys_available());
EXPECT_FALSE(server_stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->encryption_established());
EXPECT_FALSE(server_stream()->encryption_established());
}
TEST_P(TlsClientHandshakerTest, ZeroRTTNotAttemptedOnALPNChange) {
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->IsResumption());
CreateConnection();
const std::string kTestAlpn = "Test ALPN";
EXPECT_CALL(*session_, GetAlpnsToOffer())
.WillRepeatedly(testing::Return(std::vector<std::string>({kTestAlpn})));
EXPECT_CALL(*session_, OnConfigNegotiated()).Times(1);
CompleteCryptoHandshakeWithServerALPN(kTestAlpn);
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->EarlyDataAccepted());
EXPECT_EQ(stream()->EarlyDataReason(), ssl_early_data_alpn_mismatch);
}
TEST_P(TlsClientHandshakerTest, InvalidSNI) {
server_id_ = QuicServerId("invalid!.example.com", 443);
crypto_config_.reset(new QuicCryptoClientConfig(
std::make_unique<FakeProofVerifier>(), nullptr));
CreateConnection();
InitializeFakeServer();
stream()->CryptoConnect();
crypto_test_utils::CommunicateHandshakeMessages(
connection_, stream(), server_connection_, server_stream());
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_EQ(server_stream()->crypto_negotiated_params().sni, "");
}
TEST_P(TlsClientHandshakerTest, BadTransportParams) {
if (!connection_->version().UsesHttp3()) {
return;
}
CompleteCryptoHandshake();
CreateConnection();
stream()->CryptoConnect();
auto* id_manager = QuicSessionPeer::ietf_streamid_manager(session_.get());
EXPECT_EQ(kDefaultMaxStreamsPerConnection,
id_manager->max_outgoing_bidirectional_streams());
QuicConfig config;
config.SetMaxBidirectionalStreamsToSend(
config.GetMaxBidirectionalStreamsToSend() - 1);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_ZERO_RTT_REJECTION_LIMIT_REDUCED, _, _))
.WillOnce(testing::Invoke(connection_,
&MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, CloseConnection(QUIC_HANDSHAKE_FAILED, _, _));
crypto_test_utils::HandshakeWithFakeServer(
&config, server_crypto_config_.get(), &server_helper_, &alarm_factory_,
connection_, stream(), AlpnForVersion(connection_->version()));
}
TEST_P(TlsClientHandshakerTest, ECH) {
ssl_config_.emplace();
bssl::UniquePtr<SSL_ECH_KEYS> ech_keys =
MakeTestEchKeys("public-name.example", 64,
&ssl_config_->ech_config_list);
ASSERT_TRUE(ech_keys);
ASSERT_TRUE(
SSL_CTX_set1_ech_keys(server_crypto_config_->ssl_ctx(), ech_keys.get()));
CreateConnection();
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_TRUE(stream()->crypto_negotiated_params().encrypted_client_hello);
}
TEST_P(TlsClientHandshakerTest, ECHWithConfigAndGREASE) {
ssl_config_.emplace();
bssl::UniquePtr<SSL_ECH_KEYS> ech_keys =
MakeTestEchKeys("public-name.example", 64,
&ssl_config_->ech_config_list);
ASSERT_TRUE(ech_keys);
ssl_config_->ech_grease_enabled = true;
ASSERT_TRUE(
SSL_CTX_set1_ech_keys(server_crypto_config_->ssl_ctx(), ech_keys.get()));
CreateConnection();
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_TRUE(stream()->crypto_negotiated_params().encrypted_client_hello);
}
TEST_P(TlsClientHandshakerTest, ECHInvalidConfig) {
ssl_config_.emplace();
ssl_config_->ech_config_list = "invalid config";
CreateConnection();
EXPECT_CALL(*connection_, CloseConnection(QUIC_HANDSHAKE_FAILED, _, _));
stream()->CryptoConnect();
}
TEST_P(TlsClientHandshakerTest, ECHWrongKeys) {
ssl_config_.emplace();
bssl::UniquePtr<SSL_ECH_KEYS> ech_keys1 =
MakeTestEchKeys("public-name.example", 64,
&ssl_config_->ech_config_list);
ASSERT_TRUE(ech_keys1);
std::string ech_config_list2;
bssl::UniquePtr<SSL_ECH_KEYS> ech_keys2 = MakeTestEchKeys(
"public-name.example", 64, &ech_config_list2);
ASSERT_TRUE(ech_keys2);
ASSERT_TRUE(
SSL_CTX_set1_ech_keys(server_crypto_config_->ssl_ctx(), ech_keys2.get()));
CreateConnection();
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HANDSHAKE_FAILED,
static_cast<QuicIetfTransportErrorCodes>(
CRYPTO_ERROR_FIRST + SSL_AD_ECH_REQUIRED),
_, _))
.WillOnce(testing::Invoke(connection_,
&MockQuicConnection::ReallyCloseConnection4));
CompleteCryptoHandshake();
}
TEST_P(TlsClientHandshakerTest, ECHGrease) {
ssl_config_.emplace();
ssl_config_->ech_grease_enabled = true;
CreateConnection();
static bool callback_ran;
callback_ran = false;
SSL_CTX_set_dos_protection_cb(
server_crypto_config_->ssl_ctx(),
[](const SSL_CLIENT_HELLO* client_hello) -> int {
const uint8_t* data;
size_t len;
EXPECT_TRUE(SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_encrypted_client_hello, &data, &len));
callback_ran = true;
return 1;
});
CompleteCryptoHandshake();
EXPECT_TRUE(callback_ran);
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_FALSE(stream()->crypto_negotiated_params().encrypted_client_hello);
}
#if BORINGSSL_API_VERSION >= 22
TEST_P(TlsClientHandshakerTest, EnableKyber) {
crypto_config_->set_preferred_groups({SSL_GROUP_X25519_KYBER768_DRAFT00});
server_crypto_config_->set_preferred_groups(
{SSL_GROUP_X25519_KYBER768_DRAFT00, SSL_GROUP_X25519, SSL_GROUP_SECP256R1,
SSL_GROUP_SECP384R1});
CreateConnection();
CompleteCryptoHandshake();
EXPECT_TRUE(stream()->encryption_established());
EXPECT_TRUE(stream()->one_rtt_keys_available());
EXPECT_EQ(SSL_GROUP_X25519_KYBER768_DRAFT00,
SSL_get_group_id(stream()->GetSsl()));
}
#endif
#if BORINGSSL_API_VERSION >= 27
TEST_P(TlsClientHandshakerTest, EnableClientAlpsUseNewCodepoint) {
for (bool server_allow_alps_new_codepoint : {true, false}) {
SCOPED_TRACE(absl::StrCat("Test allows alps new codepoint:",
server_allow_alps_new_codepoint));
crypto_config_->set_alps_use_new_codepoint(true);
SetQuicReloadableFlag(quic_gfe_allow_alps_new_codepoint,
server_allow_alps_new_codepoint);
CreateConnection();
static bool callback_ran;
callback_ran = false;
SSL_CTX_set_dos_protection_cb(
server_crypto_config_->ssl_ctx(),
[](const SSL_CLIENT_HELLO* client_hello) -> int {
const uint8_t* data;
size_t len;
EXPECT_TRUE(SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_application_settings, &data, &len));
callback_ran = true;
return 1;
});
CompleteCryptoHandshake();
EXPECT_EQ(PROTOCOL_TLS1_3, stream()->handshake_protocol());
EXPECT_TRUE(callback_ran);
}
}
#endif
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/tls_client_handshaker.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/tls_client_handshaker_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
5bd8cb22-40d3-4485-8d6d-5d6717fce3d5 | cpp | tensorflow/tensorflow | gpu_collective_performance_model | third_party/xla/xla/service/gpu/model/gpu_collective_performance_model.cc | third_party/xla/xla/service/gpu/model/gpu_collective_performance_model_test.cc | #include "xla/service/gpu/model/gpu_collective_performance_model.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/numbers.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/nvml/include/nvml.h"
#endif
namespace xla {
namespace gpu {
namespace {
int64_t GetNcclMaxNumChannels(
GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) {
int64_t max_nchannels = 0;
switch (algorithm) {
case GpuPerformanceWithCollectiveModel::RING:
case GpuPerformanceWithCollectiveModel::TREE:
max_nchannels = GpuPerformanceWithCollectiveModel::kMaxNumChannelsRing;
break;
}
const char* env = std::getenv("NCCL_MAX_NCHANNELS");
if (env != nullptr) {
int64_t max_nchannels_from_env;
if (absl::SimpleAtoi(env, &max_nchannels_from_env)) {
max_nchannels = std::min(max_nchannels_from_env, max_nchannels);
}
}
return max_nchannels;
}
int64_t GetMinNumberOfChannels(
GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) {
int64_t min_nchannels = 0;
switch (algorithm) {
case GpuPerformanceWithCollectiveModel::RING:
case GpuPerformanceWithCollectiveModel::TREE:
min_nchannels = 1;
break;
}
const char* env = std::getenv("NCCL_MIN_NCHANNELS");
if (env != nullptr) {
int64_t min_nchannels_from_env;
if (absl::SimpleAtoi(env, &min_nchannels_from_env)) {
min_nchannels = std::min(min_nchannels_from_env, min_nchannels);
}
}
return min_nchannels;
}
int GetNumThreads(int warp_size, int min_num_threads, int max_num_threads,
int default_num_threads) {
int threads_from_env = default_num_threads;
const char* env = std::getenv("NCCL_NTHREADS");
if (env != nullptr) {
CHECK(absl::SimpleAtoi(env, &threads_from_env));
}
int num_threads = threads_from_env;
if (num_threads > 0) {
if (num_threads % warp_size != 0) {
num_threads = max_num_threads;
} else if (num_threads > max_num_threads) {
num_threads = max_num_threads;
} else if (num_threads < min_num_threads) {
num_threads = min_num_threads;
}
} else {
num_threads = default_num_threads;
}
return num_threads;
}
float GetMaxSysBwFromGpu(const se::CudaComputeCapability cc,
const double* bandwidths_table) {
switch (cc.major) {
case se::CudaComputeCapability::VOLTA:
return bandwidths_table[0];
case se::CudaComputeCapability::AMPERE:
return bandwidths_table[1];
case se::CudaComputeCapability::HOPPER:
return bandwidths_table[2];
case se::CudaComputeCapability::BLACKWELL:
return bandwidths_table[3];
default:
return bandwidths_table[4];
}
}
}
float GpuPerformanceWithCollectiveModel::GetNvlinkBw(
se::CudaComputeCapability compute_capability) {
return compute_capability.IsAtLeast(se::CudaComputeCapability::HOPPER)
? kSm90NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::AMPERE)
? kSm80NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::VOLTA)
? kSm70NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::PASCAL_)
? kSm60NvlinkBandwidth
: kSm80NvlinkBandwidth;
}
bool GpuPerformanceWithCollectiveModel::InitNvml() {
#if GOOGLE_CUDA && (defined(PLATFORM_POSIX) || defined(PLATFORM_GOOGLE))
void* libhandle = dlopen("libnvidia-ml.so.1", RTLD_NOW);
CHECK(libhandle != nullptr) << "Failed to open libnvidia-ml.so.1";
struct SymbolEntry {
void** functor;
char const* name;
};
std::vector<SymbolEntry> symbols = {
{(void**)&xla_nvmlInit, "nvmlInit_v2"},
{(void**)&xla_nvmlShutdown, "nvmlShutdown"},
{(void**)&xla_nvmlDeviceGetHandleByIndex, "nvmlDeviceGetHandleByIndex"},
{(void**)&xla_nvmlDeviceGetNvLinkCapability,
"nvmlDeviceGetNvLinkCapability"},
};
for (SymbolEntry se : symbols) {
*se.functor = dlsym(libhandle, se.name);
}
nvmlReturn_t init_result = xla_nvmlInit();
return init_result == NVML_SUCCESS;
#else
return false;
#endif
}
bool GpuPerformanceWithCollectiveModel::ShutdownNvml() {
#if GOOGLE_CUDA
nvmlReturn_t shutdown_result = xla_nvmlShutdown();
return shutdown_result == NVML_SUCCESS;
#else
return false;
#endif
}
uint32_t
GpuPerformanceWithCollectiveModel::CheckIfNvlinkSupportsP2P() {
#if GOOGLE_CUDA
CHECK(InitNvml()) << "NVML init failed.";
nvmlDevice_t nvml_device;
nvmlReturn_t get_device_result =
xla_nvmlDeviceGetHandleByIndex(0, &nvml_device);
CHECK(get_device_result == NVML_SUCCESS);
uint32_t supported_p2p = 0;
nvmlReturn_t nvlink_cap_result = xla_nvmlDeviceGetNvLinkCapability(
nvml_device, 0, NVML_NVLINK_CAP_P2P_SUPPORTED,
&supported_p2p);
CHECK(nvlink_cap_result == NVML_SUCCESS ||
nvlink_cap_result == NVML_ERROR_NOT_SUPPORTED);
CHECK(ShutdownNvml()) << "NVML shutdown failed.";
return supported_p2p;
#else
return 0;
#endif
}
absl::Duration
GpuPerformanceWithCollectiveModel::ComputeAllreduceTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info) {
absl::Duration total_time = kNcclKernelLaunchOverhead;
stream_executor::CudaComputeCapability compute_cap =
gpu_device_info.cuda_compute_capability();
int64_t size_of_speed_array = kIntraNodeSpeeds.size();
int64_t size_of_sm90_speed_array = kIntraNodeSpeedsSm90.size();
int num_speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER
? size_of_sm90_speed_array
: size_of_speed_array;
const double* speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER
? kIntraNodeSpeedsSm90.data()
: kIntraNodeSpeeds.data();
int speed_index = 0;
float max_sys_bw =
GetMaxSysBwFromGpu(compute_cap, kLowLatencyMaxBandwidths.data());
CHECK_GT(max_sys_bw, 0);
while ((speed_index < num_speeds - 1) && speeds[speed_index] > max_sys_bw) {
speed_index++;
}
float bw_intra_node = speeds[speed_index];
int64_t num_devices = cost_analysis->NumOfDevices(instr);
int64_t min_nchannels =
std::max(num_devices, GetMinNumberOfChannels(CollectiveAlgo::RING));
int64_t num_channels =
std::max(min_nchannels, GetNcclMaxNumChannels(CollectiveAlgo::RING));
int default_threads =
(bw_intra_node * num_channels <= kPciBandwidth) ? 256 : kLL128NumThreads;
int warp_size = gpu_device_info.threads_per_warp();
int num_threads = GetNumThreads(warp_size, kLL128NumThreads / 4,
kLL128NumThreads, default_threads);
absl::Duration compute_time_per_channel = ComputeTime(
gpu_device_info, cost_analysis->flop_count(instr) / num_channels,
num_channels, num_threads);
total_time += compute_time_per_channel;
uint32_t supported_p2p = CheckIfNvlinkSupportsP2P();
if (supported_p2p == 0) {
VLOG(8) << "Nvlink doesn't support p2p communication. Model will "
"continue using default system bandwidth.";
} else {
VLOG(8) << "Nvlink supports p2p communication, setting intra node "
"bandwidth to nvlink bw.";
bw_intra_node = GetNvlinkBw(compute_cap);
}
double bus_bandwidth = bw_intra_node * num_channels;
double per_channel_ring_ll128_Bw =
GetMaxSysBwFromGpu(compute_cap, kPerChannelMaxRingLL128Bandwidths.data());
bus_bandwidth = std::min(bus_bandwidth * kRingAlgorithmDiscountFactor,
num_channels * per_channel_ring_ll128_Bw);
double actual_bandwidth = bus_bandwidth * cost_analysis->ScalingRatio(instr);
absl::Duration communication_time = absl::Milliseconds(
cost_analysis->bytes_accessed(instr) / (1e6 * actual_bandwidth));
total_time += communication_time;
return total_time;
}
absl::Duration
GpuPerformanceWithCollectiveModel::ComputeCollectiveTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info) {
if (cost_analysis->NumOfDevices(instr) == 1) {
VLOG(8) << "Returning only kernel launch overhead for a single partition.";
return kNcclKernelLaunchOverhead;
}
if (HloDataflowAnalysis::IsAsynchronousOperationDone(instr.opcode())) {
VLOG(8) << "Returning 0 cost for async done op " << instr.name();
return absl::ZeroDuration();
}
switch (instr.opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
return ComputeAllreduceTime(instr, cost_analysis, gpu_device_info);
default: {
LOG(WARNING)
<< "Runtime estimate for " << instr.name()
<< " not implemented. Returning only the kernel launch time.";
return kNcclKernelLaunchOverhead;
}
}
}
}
} | #include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using GpuPerformanceWithCollectiveModelTest = HloTestBase;
TEST_F(GpuPerformanceWithCollectiveModelTest, TestNvmlLibraryLoading) {
#if GOOGLE_CUDA
EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml());
nvmlDevice_t nvml_device;
nvmlReturn_t get_device_result =
xla_nvmlDeviceGetHandleByIndex(0, &nvml_device);
EXPECT_TRUE(get_device_result == NVML_SUCCESS);
EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml());
#endif
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_collective_performance_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_collective_performance_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd9bc4b7-6f85-4f51-abe9-f426720e4b12 | cpp | abseil/abseil-cpp | seed_sequences | absl/random/seed_sequences.cc | absl/random/seed_sequences_test.cc | #include "absl/random/seed_sequences.h"
#include "absl/random/internal/pool_urbg.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
SeedSeq MakeSeedSeq() {
SeedSeq::result_type seed_material[8];
random_internal::RandenPool<uint32_t>::Fill(absl::MakeSpan(seed_material));
return SeedSeq(std::begin(seed_material), std::end(seed_material));
}
ABSL_NAMESPACE_END
} | #include "absl/random/seed_sequences.h"
#include <iterator>
#include <random>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/random/internal/nonsecure_base.h"
#include "absl/random/random.h"
namespace {
TEST(SeedSequences, Examples) {
{
absl::SeedSeq seed_seq({1, 2, 3});
absl::BitGen bitgen(seed_seq);
EXPECT_NE(0, bitgen());
}
{
absl::BitGen engine;
auto seed_seq = absl::CreateSeedSeqFrom(&engine);
absl::BitGen bitgen(seed_seq);
EXPECT_NE(engine(), bitgen());
}
{
auto seed_seq = absl::MakeSeedSeq();
std::mt19937 random(seed_seq);
EXPECT_NE(0, random());
}
}
TEST(CreateSeedSeqFrom, CompatibleWithStdTypes) {
using ExampleNonsecureURBG =
absl::random_internal::NonsecureURBGBase<std::minstd_rand0>;
ExampleNonsecureURBG rng;
auto seq_from_rng = absl::CreateSeedSeqFrom(&rng);
std::mt19937_64{seq_from_rng};
}
TEST(CreateSeedSeqFrom, CompatibleWithBitGenerator) {
absl::BitGen rng;
auto seq_from_rng = absl::CreateSeedSeqFrom(&rng);
std::mt19937_64{seq_from_rng};
}
TEST(CreateSeedSeqFrom, CompatibleWithInsecureBitGen) {
absl::InsecureBitGen rng;
auto seq_from_rng = absl::CreateSeedSeqFrom(&rng);
std::mt19937_64{seq_from_rng};
}
TEST(CreateSeedSeqFrom, CompatibleWithRawURBG) {
std::random_device urandom;
auto seq_from_rng = absl::CreateSeedSeqFrom(&urandom);
std::mt19937_64{seq_from_rng};
}
template <typename URBG>
void TestReproducibleVariateSequencesForNonsecureURBG() {
const size_t kNumVariates = 1000;
URBG rng;
auto reusable_seed = absl::CreateSeedSeqFrom(&rng);
typename URBG::result_type variates[kNumVariates];
{
URBG child(reusable_seed);
for (auto& variate : variates) {
variate = child();
}
}
{
URBG child(reusable_seed);
for (auto& variate : variates) {
ASSERT_EQ(variate, child());
}
}
}
TEST(CreateSeedSeqFrom, ReproducesVariateSequencesForInsecureBitGen) {
TestReproducibleVariateSequencesForNonsecureURBG<absl::InsecureBitGen>();
}
TEST(CreateSeedSeqFrom, ReproducesVariateSequencesForBitGenerator) {
TestReproducibleVariateSequencesForNonsecureURBG<absl::BitGen>();
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/seed_sequences.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/seed_sequences_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
721b2893-361f-43f7-b07b-7b0aee6e1d39 | cpp | tensorflow/tensorflow | rocm_rocdl_path | third_party/xla/third_party/tsl/tsl/platform/default/rocm_rocdl_path.cc | tensorflow/core/platform/rocm_rocdl_path_test.cc | #include "tsl/platform/rocm_rocdl_path.h"
#include <stdlib.h>
#include "tsl/platform/path.h"
#if !defined(PLATFORM_GOOGLE) && TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
#include "tsl/platform/logging.h"
namespace tsl {
std::string RocmRoot() {
#if TENSORFLOW_USE_ROCM
if (const char* rocm_path_env = std::getenv("ROCM_PATH")) {
VLOG(3) << "ROCM root = " << rocm_path_env;
return rocm_path_env;
} else {
VLOG(3) << "ROCM root = " << TF_ROCM_TOOLKIT_PATH;
return TF_ROCM_TOOLKIT_PATH;
}
#else
return "";
#endif
}
std::string RocdlRoot() {
if (const char* device_lib_path_env = std::getenv("HIP_DEVICE_LIB_PATH")) {
return device_lib_path_env;
} else {
return io::JoinPath(RocmRoot(), "amdgcn/bitcode");
}
}
} | #include "tensorflow/core/platform/rocm_rocdl_path.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#if !defined(PLATFORM_GOOGLE) && TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace tensorflow {
#if TENSORFLOW_USE_ROCM
TEST(RocmRocdlPathTest, ROCDLPath) {
VLOG(2) << "ROCm-Device-Libs root = " << RocdlRoot();
std::vector<string> rocdl_files;
TF_EXPECT_OK(Env::Default()->GetMatchingPaths(
io::JoinPath(RocdlRoot(), "*.bc"), &rocdl_files));
EXPECT_LT(0, rocdl_files.size());
}
#endif
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/default/rocm_rocdl_path.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/platform/rocm_rocdl_path_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b935a03c-90c5-43fc-8348-cf2a6b8dcea3 | cpp | tensorflow/tensorflow | generic_layout_optimizer | tensorflow/core/grappler/optimizers/generic_layout_optimizer.cc | tensorflow/core/grappler/optimizers/generic_layout_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include <utility>
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer.h"
#include "tensorflow/core/grappler/optimizers/generic_layout_optimizer_transposer_factory.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kNHWC[] = "NHWC";
constexpr char kNCHW[] = "NCHW";
constexpr float kGPURatioThreshold = 0.5;
constexpr float kConvGPUExpectedDtypeThreshold = 0.5;
struct MutableNodeViewFormatter {
void operator()(std::string* out, utils::MutableNodeView* node_view) const {
absl::StrAppend(out, node_view->node()->name());
}
};
struct GpuStats {
int num_gpus;
int num_voltas;
int num_amperes;
};
inline GpuStats GetNumGPUs(const Cluster& cluster) {
auto devices = cluster.GetDevices();
GpuStats gpu_stats{};
for (const auto& device : devices) {
if (device.second.type() != kGPU) {
continue;
}
gpu_stats.num_gpus++;
auto compute_capability_it =
device.second.environment().find("architecture");
if (compute_capability_it == device.second.environment().end()) {
continue;
}
double compute_capability = 0.0;
if (absl::SimpleAtod(compute_capability_it->second, &compute_capability)) {
if (compute_capability >= 7.0) gpu_stats.num_voltas++;
if (compute_capability >= 8.0) gpu_stats.num_amperes++;
}
}
return gpu_stats;
}
inline bool ConvBackpropExists(const TransposeContext& context,
absl::string_view device,
const DataType& data_type) {
for (const auto& node : context.graph_view->GetNodes()) {
const auto* node_def = node.node();
if (!IsConv2DBackpropFilter(*node_def) &&
!IsConv2DBackpropInput(*node_def) &&
!IsConv3DBackpropFilterV2(*node_def) &&
!IsConv3DBackpropInputV2(*node_def)) {
continue;
}
const string& device_name = GetDeviceName(*node_def);
string device_type;
string task;
if (!DeviceNameUtils::SplitDeviceName(device_name, &task, &device_type) ||
!absl::StrContains(absl::AsciiStrToLower(device_type),
absl::AsciiStrToLower(device))) {
continue;
}
const auto* t_attr = node.GetAttr("T");
if (t_attr == nullptr) {
continue;
}
if (t_attr->type() == data_type) {
return true;
}
}
return false;
}
inline std::pair<string, string> GetSrcAndDstDataFormats(
const TransposeContext& context, GpuStats gpu_stats) {
string src_format = kNHWC;
string dst_format = kNCHW;
const bool is_NHWC_enforced =
(!context.enforced_layout.empty() && context.enforced_layout == "NHWC");
const bool volta_ready =
(static_cast<float>(gpu_stats.num_voltas) /
static_cast<float>(gpu_stats.num_gpus)) >= kGPURatioThreshold;
const bool ampere_ready =
(static_cast<float>(gpu_stats.num_amperes) /
static_cast<float>(gpu_stats.num_gpus)) >= kGPURatioThreshold;
int num_conv_gpu = 0;
int num_conv_gpu_prefer_swap = 0;
bool fp32_backprop = ConvBackpropExists(context, kGPU, DT_FLOAT);
for (const auto& node : context.graph_view->GetNodes()) {
const auto* node_def = node.node();
if (!IsConv2D(*node_def) && !IsConv3D(*node_def)) {
continue;
}
const string& device_name = GetDeviceName(*node_def);
string device_type;
string task;
if (!DeviceNameUtils::SplitDeviceName(device_name, &task, &device_type) ||
!absl::StrContains(absl::AsciiStrToLower(device_type),
absl::AsciiStrToLower(kGPU))) {
continue;
}
num_conv_gpu++;
const auto* t_attr = node.GetAttr("T");
if (t_attr == nullptr) {
continue;
}
const DataType dtype = t_attr->type();
if ((volta_ready && dtype == DT_HALF) ||
(ampere_ready && dtype == DT_BFLOAT16) ||
(ampere_ready && dtype == DT_FLOAT &&
tsl::tensor_float_32_execution_enabled() && !fp32_backprop)) {
num_conv_gpu_prefer_swap++;
}
}
const bool should_swap =
num_conv_gpu > 0 &&
(static_cast<float>(num_conv_gpu_prefer_swap) /
static_cast<float>(num_conv_gpu)) >= kConvGPUExpectedDtypeThreshold;
if (is_NHWC_enforced || (context.enforced_layout.empty() && should_swap)) {
std::swap(src_format, dst_format);
}
VLOG(2) << "Layout conversion of " << src_format << " to " << dst_format
<< " will take place.";
return {src_format, dst_format};
}
Status ExpandLayoutSensitiveOp(TransposeContext* context,
TransposerFactory* transposer_factory) {
const int num_nodes = context->num_nodes;
for (int i = 0; i < num_nodes; ++i) {
auto* node_view = context->graph_view->GetNode(i);
auto* node_def = node_view->node();
if (IsLayoutSensitiveOp(*node_def)) {
std::shared_ptr<Transposer> transposer =
transposer_factory->GetTransposer(*node_def);
if (transposer == nullptr) {
return Status(
absl::StatusCode::kNotFound,
absl::StrCat(
"Layout sensitive operation should have a transposer. Node: ",
node_def->DebugString()));
}
TF_RETURN_IF_ERROR(transposer->TransposeNode(context, node_view));
}
}
return absl::OkStatus();
}
Status ExpandLayoutAgnosticOp(TransposeContext* context,
TransposerFactory* transposer_factory) {
const int num_nodes = context->num_nodes;
for (int i = 0; i < num_nodes; ++i) {
auto* node_view = context->graph_view->GetNode(i);
auto* node_def = node_view->node();
if (IsLayoutAgnosticOp(*node_def)) {
const auto& transposer = transposer_factory->GetTransposer(*node_def);
if (transposer == nullptr) {
return Status(
absl::StatusCode::kNotFound,
absl::StrCat(
"Layout agnostic operation should have a transposer. Node: ",
node_def->DebugString()));
}
TF_RETURN_IF_ERROR(transposer->TransposeNode(context, node_view));
}
}
return absl::OkStatus();
}
inline bool IsCancellableConstPermTransposeNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
Tensor fanout_tensor;
if (!GetValueAttrFromConstInputNode(fanout_transpose, IsTranspose, 1,
&fanout_tensor)) {
return false;
}
Tensor fanin_tensor;
if (!GetValueAttrFromConstInputNode(fanin_transpose, IsTranspose, 1,
&fanin_tensor)) {
return false;
}
if (fanout_tensor.NumElements() != fanin_tensor.NumElements()) {
return false;
}
const auto& fanout_tensor_data = fanout_tensor.unaligned_flat<int32>();
const auto& fanin_tensor_data = fanin_tensor.unaligned_flat<int32>();
const int num_elements = fanout_tensor.NumElements();
for (int i = 0; i < num_elements; ++i) {
if (fanout_tensor_data(fanin_tensor_data(i)) != i) {
return false;
}
}
return true;
}
inline bool IsCancellableDataFormatNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
if (!IsDataFormatOp(fanout_transpose) || !IsDataFormatOp(fanin_transpose)) {
return false;
}
auto src_dst_match = [](const utils::MutableNodeView& src,
const utils::MutableNodeView& dst) {
const auto* src_format = src.GetAttr(kAttrSrcFormat);
if (src_format == nullptr) {
return false;
}
const auto* dst_format = dst.GetAttr(kAttrDstFormat);
if (dst_format == nullptr) {
return false;
}
return src_format->s() == dst_format->s();
};
return src_dst_match(fanin_transpose, fanout_transpose) &&
src_dst_match(fanout_transpose, fanin_transpose);
}
inline bool IsCancellableNodePair(
const utils::MutableNodeView& fanout_transpose,
const utils::MutableNodeView& fanin_transpose) {
return IsCancellableConstPermTransposeNodePair(fanout_transpose,
fanin_transpose) ||
IsCancellableDataFormatNodePair(fanout_transpose, fanin_transpose);
}
Status EraseCancellableNodes(TransposeContext* context) {
const int original_num_nodes = context->num_nodes;
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
const int num_nodes = graph_view->NumNodes();
for (int i = original_num_nodes; i < num_nodes; ++i) {
auto* node = graph_view->GetNode(i);
if (node->NumRegularFanins() < 1) {
continue;
}
const auto& regular_fanin_0 = node->GetRegularFanin(0);
auto* fanin_node = regular_fanin_0.node_view();
if (fanin_node->node_index() < original_num_nodes) {
continue;
}
if (!IsCancellableNodePair(*node, *fanin_node)) {
continue;
}
const auto& fanin_to_forward = fanin_node->GetRegularFanin(0);
TensorId fanin_id_to_forward(fanin_to_forward.node_view()->GetName(),
fanin_to_forward.index());
for (const auto& regular_fanout : node->GetRegularFanout(0)) {
mutation->AddOrUpdateRegularFanin(regular_fanout.node_view(),
regular_fanout.index(),
fanin_id_to_forward);
}
mutation->RemoveNode(node);
if (node->NumRegularFanins() > 1) {
mutation->RemoveNode(node->GetRegularFanin(1).node_view());
}
mutation->RemoveNode(fanin_node);
if (fanin_node->NumRegularFanins() > 1) {
mutation->RemoveNode(fanin_node->GetRegularFanin(1).node_view());
}
}
return mutation->Apply();
}
Status EraseCancellableNodesAroundPad(TransposeContext* context) {
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
absl::flat_hash_set<utils::MutableNodeView*> cancelled_transposes;
const int num_nodes = graph_view->NumNodes();
for (int i = 0; i < num_nodes; ++i) {
auto* transpose_after = graph_view->GetNode(i);
if (!IsTranspose(*transpose_after->node())) continue;
if (cancelled_transposes.contains(transpose_after)) continue;
const auto& transpose_after_fanin = transpose_after->GetRegularFanin(0);
auto* pad = transpose_after_fanin.node_view();
if (!IsPad(*pad->node())) continue;
const auto& pad_fanin_0 = pad->GetRegularFanin(0);
auto* transpose_before = pad_fanin_0.node_view();
if (!IsTranspose(*transpose_before->node())) continue;
if (transpose_before->NumRegularFanouts() != 1) continue;
if (!IsCancellableConstPermTransposeNodePair(*transpose_after,
*transpose_before))
continue;
Tensor paddings_t;
if (!GetValueAttrFromConstInputNode(*pad, IsPad, 1, &paddings_t)) continue;
const auto& pad_fanin_1 = pad->GetRegularFanin(1);
auto* paddings = pad_fanin_1.node_view();
if (paddings->NumRegularFanouts() != 1) continue;
Tensor permute_t;
if (!GetValueAttrFromConstInputNode(*transpose_after, IsTranspose, 1,
&permute_t))
continue;
std::vector<utils::MutableNodeView*> pad_fanout_transposes;
pad_fanout_transposes.emplace_back(transpose_after);
bool pad_has_unsupported_fanout = false;
for (auto& fanout : pad->GetRegularFanout(0)) {
auto* extra_transpose = fanout.node_view();
if (extra_transpose == transpose_after) continue;
Tensor extra_permute_t;
if (!GetValueAttrFromConstInputNode(*extra_transpose, IsTranspose, 1,
&extra_permute_t) ||
extra_permute_t.tensor_data() != permute_t.tensor_data()) {
pad_has_unsupported_fanout = true;
break;
}
pad_fanout_transposes.emplace_back(extra_transpose);
}
if (pad_has_unsupported_fanout) continue;
VLOG(0) << "Cancel Transpose nodes around Pad:"
<< " transpose_before=" << transpose_before->node()->name()
<< " pad=" << pad->node()->name() << " transpose_after="
<< absl::StrJoin(pad_fanout_transposes, ",",
MutableNodeViewFormatter());
auto permutation_s = absl::Span<int32>(permute_t.flat<int32>().data(),
permute_t.NumElements());
auto paddings_s = absl::Span<int32>(paddings_t.flat<int32>().data(),
paddings_t.NumElements());
TF_RETURN_IF_ERROR(
PermuteDouble(absl::StrCat("paddings in ", pad->GetName()),
permutation_s, &paddings_s));
AttrValue permuted_paddings_tensor;
paddings_t.AsProtoTensorContent(permuted_paddings_tensor.mutable_tensor());
mutation->AddOrUpdateNodeAttr(paddings, "value", permuted_paddings_tensor);
const auto transpose_to_identity =
[&cancelled_transposes,
&mutation](utils::MutableNodeView* transpose) -> void {
mutation->UpdateNodeOp(transpose, "Identity");
mutation->RemoveNodeAttr(transpose, "Tperm");
mutation->RemoveRegularFanin(transpose, 1);
cancelled_transposes.insert(transpose);
};
transpose_to_identity(transpose_before);
absl::c_for_each(pad_fanout_transposes, transpose_to_identity);
}
return mutation->Apply();
}
Status EraseOutputShapeAttrs(TransposeContext* context) {
utils::MutableGraphView* graph_view = context->graph_view.get();
utils::Mutation* mutation = graph_view->GetMutationBuilder();
const int num_nodes = graph_view->NumNodes();
for (int i = 0; i < num_nodes; ++i) {
auto* node = graph_view->GetNode(i);
if (IsArg(*node->node())) {
continue;
}
mutation->RemoveNodeAttr(node, kAttrOutputShape);
TF_RETURN_IF_ERROR(mutation->Apply());
}
return absl::OkStatus();
}
}
Status GenericLayoutOptimizer::Optimize(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output) {
if (cluster == nullptr) {
LOG(WARNING)
<< "generic layout optimizer was called with cluster == nullptr";
return errors::Aborted("cluster == nullptr.");
}
if (!enforced_layout_.empty() && enforced_layout_ != "NHWC" &&
enforced_layout_ != "NCHW") {
return Status(
absl::StatusCode::kInvalidArgument,
absl::StrCat("Invalid value for enforced_layout: ", enforced_layout_,
". Supported layouts: 'NHWC', 'NCHW'."));
}
const auto gpu_stats = GetNumGPUs(*cluster);
const bool is_aggressive = opt_level_ == RewriterConfig::AGGRESSIVE;
TransposeContext context;
context.enforced_layout = enforced_layout_;
if (gpu_stats.num_gpus > 0) {
TF_RETURN_IF_ERROR(TransposeContext::InitializeTransposeContext(
is_aggressive, item, cluster, &context));
const auto src_dst_formats = GetSrcAndDstDataFormats(context, gpu_stats);
context.AssignDeviceAndDataFormats(kGPU, src_dst_formats.first,
src_dst_formats.second);
} else {
TF_RETURN_IF_ERROR(TransposeContext::InitializeTransposeContext(
is_aggressive, item, cluster, &context));
switch (cpu_layout_conversion_) {
case RewriterConfig::NCHW_TO_NHWC:
context.AssignDeviceAndDataFormats(kCPU, kNCHW, kNHWC);
break;
case RewriterConfig::NHWC_TO_NCHW:
return errors::Aborted(
"Conversion from NHWC to NCHW is currently not available for "
"CPU.");
default:
*output = item.graph;
VLOG(2) << "No layout conversion will take place for CPU.";
return absl::OkStatus();
}
}
TransposerFactory transposer_factory;
TF_RETURN_IF_ERROR(ExpandLayoutSensitiveOp(&context, &transposer_factory));
if (context.graph.node_size() > context.num_nodes || is_aggressive) {
TF_RETURN_IF_ERROR(ExpandLayoutAgnosticOp(&context, &transposer_factory));
TF_RETURN_IF_ERROR(EraseCancellableNodes(&context));
TF_RETURN_IF_ERROR(EraseCancellableNodesAroundPad(&context));
TF_RETURN_IF_ERROR(
context.graph_view->SortTopologically(false, {}));
}
TF_RETURN_IF_ERROR(EraseOutputShapeAttrs(&context));
*output = context.graph;
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/generic_layout_optimizer.h"
#include "absl/memory/memory.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/clusters/single_machine.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/tensor_float_32_utils.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
using ::tensorflow::Scope;
using ::tensorflow::ops::Conv2D;
using ::tensorflow::ops::Conv3D;
using ::tensorflow::ops::Identity;
using ::tensorflow::ops::RandomUniform;
constexpr int kBatchSize = 32;
constexpr int kWidth = 10;
constexpr int kHeight = 10;
constexpr int kDepthIn = 8;
constexpr int kKernel = 3;
constexpr int kDepthOut = 16;
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
#define DIMS(n, h, w, c) \
{ n, h, w, c }
#define SRC_DATA_FORMAT "NHWC"
#define DST_DATA_FORMAT "NCHW"
#define DEVICE "GPU"
#define REWRITER_CONFIG \
RewriterConfig::DEFAULT, RewriterConfig::NO_CONVERSION_ON_CPU
#define PERMUTATION_SRC_TO_DST \
{ 0, 3, 1, 2 }
#define PERMUTATION_DST_TO_SRC \
{ 0, 2, 3, 1 }
#define DIMS_5D(n, d, h, w, c) \
{ n, d, h, w, c }
#define SRC_DATA_FORMAT_5D "NDHWC"
#define DST_DATA_FORMAT_5D "NCDHW"
#else
#define DIMS(n, h, w, c) \
{ n, c, h, w }
#define SRC_DATA_FORMAT "NCHW"
#define DST_DATA_FORMAT "NHWC"
#define DEVICE "CPU"
#define REWRITER_CONFIG RewriterConfig::DEFAULT, RewriterConfig::NCHW_TO_NHWC
#define PERMUTATION_SRC_TO_DST \
{ 0, 2, 3, 1 }
#define PERMUTATION_DST_TO_SRC \
{ 0, 3, 1, 2 }
#define DIMS_5D(n, d, h, w, c) \
{ n, c, d, h, w }
#define SRC_DATA_FORMAT_5D "NCDHW"
#define DST_DATA_FORMAT_5D "NDHWC"
#endif
template <typename T = float>
Output SimpleConv2D(tensorflow::Scope* s, int input_size, int filter_size,
const string& padding, const string& device) {
int batch_size = 8;
int input_height = input_size;
int input_width = input_size;
int input_depth = 3;
int filter_count = 2;
int stride = 1;
TensorShape input_shape(
DIMS(batch_size, input_height, input_width, input_depth));
Tensor input_data(DataTypeToEnum<T>::value, input_shape);
test::FillIota<T>(&input_data, static_cast<T>(1));
Output input =
ops::Const(s->WithOpName("Input"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, input_depth, filter_count});
Tensor filter_data(DataTypeToEnum<T>::value, filter_shape);
test::FillIota<T>(&filter_data, static_cast<T>(1));
Output filter =
ops::Const(s->WithOpName("Filter"), Input::Initializer(filter_data));
Output conv = ops::Conv2D(s->WithOpName("Conv2D").WithDevice(device), input,
filter, DIMS(1, stride, stride, 1), padding,
ops::Conv2D::Attrs().DataFormat(SRC_DATA_FORMAT));
return conv;
}
Output SimpleConv2DBackpropInput(tensorflow::Scope* s, int input_size,
int filter_size, const string& padding,
bool dilated, const int input_sizes_length) {
int batch_size = 128;
int input_height = input_size;
int input_width = input_size;
int input_depth = 3;
int filter_count = 2;
int stride = 1;
TensorShape input_sizes_shape({input_sizes_length});
Tensor input_data(DT_INT32, input_sizes_shape);
if (input_sizes_length == 4) {
test::FillValues<int>(
&input_data, DIMS(batch_size, input_height, input_width, input_depth));
} else {
test::FillValues<int>(&input_data, {input_height, input_width});
}
Output input_sizes =
ops::Const(s->WithOpName("InputSizes"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, input_depth, filter_count});
Output filter =
ops::Variable(s->WithOpName("Filter"), filter_shape, DT_FLOAT);
int output_height = input_height;
int output_width = input_width;
TensorShape output_shape(
DIMS(batch_size, output_height, output_width, filter_count));
Tensor output_data(DT_FLOAT, output_shape);
test::FillIota<float>(&output_data, 1.0f);
Output output =
ops::Const(s->WithOpName("Output"), Input::Initializer(output_data));
Output conv_backprop_input;
Output input_sizes_i =
ops::Identity(s->WithOpName("InputSizesIdentity"), input_sizes);
ops::Conv2DBackpropInput::Attrs attrs;
attrs = attrs.DataFormat(SRC_DATA_FORMAT);
if (dilated) {
attrs = attrs.Dilations(DIMS(1, 2, 2, 1));
}
conv_backprop_input = ops::Conv2DBackpropInput(
s->WithOpName("Conv2DBackpropInput"), input_sizes_i, filter, output,
DIMS(1, stride, stride, 1), padding, attrs);
return conv_backprop_input;
}
template <typename T = float>
Output SimpleConv3D(tensorflow::Scope* s, int input_size, int filter_size,
const string& padding, const string& device) {
int batch_size = 8;
int input_height = input_size;
int input_width = input_size;
int input_depth = 4;
int input_channel = 3;
int filter_count = 6;
int stride = 1;
TensorShape input_shape(DIMS_5D(batch_size, input_depth, input_height,
input_width, input_channel));
Tensor input_data(DataTypeToEnum<T>::value, input_shape);
test::FillIota<T>(&input_data, static_cast<T>(1));
Output input =
ops::Const(s->WithOpName("Input"), Input::Initializer(input_data));
TensorShape filter_shape(
{filter_size, filter_size, filter_size, input_channel, filter_count});
Tensor filter_data(DataTypeToEnum<T>::value, filter_shape);
test::FillIota<T>(&filter_data, static_cast<T>(1));
Output filter =
ops::Const(s->WithOpName("Filter"), Input::Initializer(filter_data));
Output conv =
ops::Conv3D(s->WithOpName("Conv3D").WithDevice(device), input, filter,
DIMS_5D(1, stride, stride, stride, 1), padding,
ops::Conv3D::Attrs().DataFormat(SRC_DATA_FORMAT_5D));
return conv;
}
class GenericLayoutOptimizerTest : public GrapplerTest {
protected:
void SetUp() override {
bool gpu_available = GetNumAvailableGPUs() > 0;
if (gpu_available) {
virtual_cluster_ =
std::make_unique<SingleMachine>(10, 1, 1);
} else {
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_frequency(1000);
cpu_device.set_num_cores(4);
cpu_device.set_bandwidth(32);
cpu_device.set_l1_cache_size(32 * 1024);
cpu_device.set_l2_cache_size(256 * 1024);
cpu_device.set_l3_cache_size(4 * 1024 * 1024);
cpu_device.set_memory_size(1024 * 1024);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.mutable_environment()->insert({"architecture", "6"});
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/CPU:0", cpu_device},
{ "/GPU:1",
gpu_device }}));
#else
virtual_cluster_ =
absl::WrapUnique(new VirtualCluster({{"/CPU:0", cpu_device}}));
#endif
}
TF_ASSERT_OK(virtual_cluster_->Provision());
}
void TearDown() override {
TF_ASSERT_OK(virtual_cluster_->Shutdown());
tsl::enable_tensor_float_32_execution(true);
}
std::unique_ptr<Cluster> virtual_cluster_;
};
void VerifyRegularFaninMatch(const utils::NodeView* node, int port,
absl::string_view fanin_name, int fanin_port) {
ASSERT_GE(node->NumRegularFanins(), port);
const auto& fanin = node->GetRegularFanin(port);
EXPECT_EQ(fanin.node_view()->GetName(), fanin_name);
EXPECT_EQ(fanin.index(), fanin_port);
}
void VerifyRegularFanoutMatch(const utils::NodeView* node, int port,
absl::string_view fanout_name, int fanout_port) {
bool found = false;
for (const auto& regular_fanout : node->GetRegularFanout(port)) {
if (regular_fanout.node_view()->GetName() == fanout_name &&
regular_fanout.index() == fanout_port) {
found = true;
}
}
EXPECT_TRUE(found);
}
void VerifyDataFormatAttributeMatch(const utils::NodeView* node,
absl::string_view attr_value) {
const auto* attr = node->GetAttr("data_format");
ASSERT_NE(attr, nullptr);
EXPECT_EQ(attr->s(), attr_value);
}
TEST_F(GenericLayoutOptimizerTest, OptimizeSimpleConv2DGraph) {
Scope scope = Scope::NewRootScope();
auto conv2d = SimpleConv2D(&scope, 4, 2, "VALID", "");
auto identity = Identity(scope.WithOpName("Output"), conv2d);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv2d_node, 1, "Filter", 0);
VerifyDataFormatAttributeMatch(conv2d_node, SRC_DATA_FORMAT);
auto* output_node = graph_view.GetNode("Output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
}
TEST_F(GenericLayoutOptimizerTest, PreserveFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "");
auto i = ops::Identity(s.WithOpName("i"), conv);
GrapplerItem item;
item.fetch.push_back("Conv2D");
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, EmptyDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, GPUDevice) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
tsl::enable_tensor_float_32_execution(false);
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv =
SimpleConv2D(&s, 4, 2, "VALID", "/job:w/replica:0/task:0/device:GPU:0");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, "NCHW");
}
TEST_F(GenericLayoutOptimizerTest, CPUDevice) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D(&s, 4, 2, "VALID", "/CPU:0");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
VerifyDataFormatAttributeMatch(conv_node, "NHWC");
#else
VerifyDataFormatAttributeMatch(conv_node, DST_DATA_FORMAT);
#endif
}
TEST_F(GenericLayoutOptimizerTest, NoOptimizeIntegerConvolution) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto conv = SimpleConv2D<int32>(&s, 4, 2, "VALID", "");
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv_node, nullptr);
VerifyDataFormatAttributeMatch(conv_node, SRC_DATA_FORMAT);
}
TEST_F(GenericLayoutOptimizerTest, Connectivity) {
Scope scope = Scope::NewRootScope();
auto conv = SimpleConv2D(&scope, 4, 2, "VALID",
absl::StrCat("/device:", DEVICE, ":0"));
auto i1 = ops::Identity(scope.WithOpName("i1"), conv);
auto i2 = ops::Identity(scope.WithOpName("i2"), i1);
auto i3 = ops::Identity(scope.WithOpName("i3"), i2);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
Status status;
utils::GraphView graph_view_original(&item.graph, &status);
const int i1_index = graph_view_original.GetNode("i1")->node_index();
const int i2_index = graph_view_original.GetNode("i2")->node_index();
item.graph.mutable_node()->SwapElements(i1_index, i2_index);
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* node_i2_output = graph_view.GetNode("i2");
ASSERT_NE(node_i2_output, nullptr);
ASSERT_EQ(node_i2_output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(node_i2_output, 0, "i1", 0);
}
TEST_F(GenericLayoutOptimizerTest, Conv2DBackpropInputNonConstInputSizes) {
for (const int input_sizes_length : {2, 4}) {
Scope s = Scope::NewRootScope();
auto conv = SimpleConv2DBackpropInput(&s, 7, 2, "SAME", false,
input_sizes_length);
Output fetch = ops::Identity(s.WithOpName("Fetch"), {conv});
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_backprop_node = graph_view.GetNode("Conv2DBackpropInput");
ASSERT_NE(conv2d_backprop_node, nullptr);
ASSERT_EQ(conv2d_backprop_node->NumRegularFanins(), 3);
VerifyRegularFaninMatch(conv2d_backprop_node, 0, "InputSizesIdentity", 0);
}
}
TEST_F(GenericLayoutOptimizerTest, Conv2DDataFormatVecPermuteCollapse) {
tsl::enable_tensor_float_32_execution(false);
Scope scope =
Scope::NewRootScope().WithDevice(absl::StrCat("/device:", DEVICE, ":0"));
auto conv = SimpleConv2D(&scope, 4, 2, "VALID",
absl::StrCat("/device:", DEVICE, ":0"));
auto shape = ops::Shape(scope.WithOpName("shape"), conv);
auto value = ops::Const(scope.WithOpName("value"), 0, {});
auto fill = ops::Fill(scope.WithOpName("fill"), shape, value);
auto i = ops::Identity(scope.WithOpName("i"), fill);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv2d_node = graph_view.GetNode("Conv2D");
ASSERT_NE(conv2d_node, nullptr);
ASSERT_EQ(conv2d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(
conv2d_node, 0,
absl::StrCat("Conv2D-0-Transpose", SRC_DATA_FORMAT, "To", DST_DATA_FORMAT,
"-LayoutOptimizer"),
0);
auto* shape_node = graph_view.GetNode("shape");
ASSERT_NE(shape_node, nullptr);
ASSERT_EQ(shape_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(shape_node, 0, conv2d_node->GetName(), 0);
auto* fill_node = graph_view.GetNode("fill");
ASSERT_NE(fill_node, nullptr);
ASSERT_EQ(fill_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(fill_node, 0, shape_node->GetName(), 0);
VerifyRegularFanoutMatch(
fill_node, 0,
absl::StrCat("fill-0-0-Transpose", DST_DATA_FORMAT, "To", SRC_DATA_FORMAT,
"-LayoutOptimizer"),
0);
auto* graph_output = graph_view.GetNode("i");
ASSERT_NE(graph_output, nullptr);
ASSERT_EQ(graph_output->NumRegularFanins(), 1);
VerifyRegularFaninMatch(
graph_output, 0,
absl::StrCat("fill-0-0-Transpose", DST_DATA_FORMAT, "To", SRC_DATA_FORMAT,
"-LayoutOptimizer"),
0);
}
TEST_F(GenericLayoutOptimizerTest, DoNotPruneNonAddedCancellableTransposes) {
GrapplerItem item;
{
Scope scope = Scope::NewRootScope().WithDevice(
absl::StrCat("/device:", DEVICE, ":0"));
auto input = ops::RandomUniform(scope.WithOpName("input"),
DIMS(kBatchSize, kHeight, kWidth, kDepthIn),
DT_FLOAT);
auto input_in_transpose =
ops::Transpose(scope.WithOpName("input_in_transpose"), input,
ops::Const(scope, PERMUTATION_SRC_TO_DST, {4}));
auto input_out_transpose = ops::Transpose(
scope.WithOpName("input_out_transpose"), input_in_transpose,
ops::Const(scope, PERMUTATION_DST_TO_SRC, {4}));
Tensor bias_data(DT_FLOAT, TensorShape({kDepthIn}));
test::FillIota<float>(&bias_data, 1.0f);
auto bias_add = ops::BiasAdd(
scope.WithOpName("bias_add"), input_out_transpose, bias_data,
ops::BiasAdd::Attrs().DataFormat(SRC_DATA_FORMAT));
auto output_in_transpose =
ops::Transpose(scope.WithOpName("output_in_transpose"), bias_add,
ops::Const(scope, PERMUTATION_SRC_TO_DST, {4}));
auto output_out_transpose = ops::Transpose(
scope.WithOpName("output_out_transpose"), output_in_transpose,
ops::Const(scope, PERMUTATION_DST_TO_SRC, {4}));
auto output =
ops::Identity(scope.WithOpName("output"), output_out_transpose);
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
}
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* input_node = graph_view.GetNode("input");
ASSERT_NE(input_node, nullptr);
auto* input_in_transpose_node = graph_view.GetNode("input_in_transpose");
ASSERT_NE(input_in_transpose_node, nullptr);
ASSERT_EQ(input_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_in_transpose_node, 0, input_node->GetName(), 0);
auto* input_out_transpose_node = graph_view.GetNode("input_out_transpose");
ASSERT_NE(input_out_transpose_node, nullptr);
ASSERT_EQ(input_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_out_transpose_node, 0,
input_in_transpose_node->GetName(), 0);
auto* bias_add_in_transpose_node = graph_view.GetNode(
absl::StrCat("bias_add-0-Transpose", SRC_DATA_FORMAT, "To",
DST_DATA_FORMAT, "-LayoutOptimizer"));
ASSERT_NE(bias_add_in_transpose_node, nullptr);
ASSERT_EQ(bias_add_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_in_transpose_node, 0,
input_out_transpose_node->GetName(), 0);
auto* bias_add_node = graph_view.GetNode("bias_add");
ASSERT_NE(bias_add_node, nullptr);
ASSERT_EQ(bias_add_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_node, 0,
bias_add_in_transpose_node->GetName(), 0);
auto* bias_add_out_transpose_node = graph_view.GetNode(
absl::StrCat("bias_add-0-0-Transpose", DST_DATA_FORMAT, "To",
SRC_DATA_FORMAT, "-LayoutOptimizer"));
ASSERT_NE(bias_add_out_transpose_node, nullptr);
ASSERT_EQ(bias_add_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(bias_add_out_transpose_node, 0,
bias_add_node->GetName(), 0);
auto* output_in_transpose_node = graph_view.GetNode("output_in_transpose");
ASSERT_NE(output_in_transpose_node, nullptr);
ASSERT_EQ(output_in_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_in_transpose_node, 0,
bias_add_out_transpose_node->GetName(), 0);
auto* output_out_transpose_node = graph_view.GetNode("output_out_transpose");
ASSERT_NE(output_out_transpose_node, nullptr);
ASSERT_EQ(output_out_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_out_transpose_node, 0,
output_in_transpose_node->GetName(), 0);
auto* output_node = graph_view.GetNode("output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
VerifyRegularFaninMatch(output_node, 0, output_out_transpose_node->GetName(),
0);
}
TEST_F(GenericLayoutOptimizerTest, CancelTransposeAroundPad) {
using test::function::NDef;
GenericLayoutOptimizer optimizer(
RewriterConfig::AGGRESSIVE,
RewriterConfig::NCHW_TO_NHWC );
const Tensor kPermuteNhwcToNchw = test::AsTensor<int32>({0, 3, 1, 2});
const Tensor kPermuteNchwToNhwc = test::AsTensor<int32>({0, 2, 3, 1});
const Tensor kPad = test::AsTensor<int32>({1, 2, 3, 4, 5, 6, 7, 8}, {4, 2});
GrapplerItem item;
item.graph = test::function::GDef({
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef("paddings", "Const", {}, {{"dtype", DT_INT32}, {"value", kPad}}),
NDef("perm_nhwc_to_nchw", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNhwcToNchw}}),
NDef("perm_nchw_to_nhwc", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNchwToNhwc}}),
NDef("transpose_0", "Transpose", {"x", "perm_nhwc_to_nchw"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
NDef("pad", "Pad", {"transpose_0", "paddings"},
{{"T", DT_FLOAT}, {"Tpaddings", DT_INT32}}),
NDef("transpose_1", "Transpose", {"pad", "perm_nchw_to_nhwc"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
NDef("transpose_2", "Transpose", {"pad", "perm_nchw_to_nhwc"},
{{"T", DT_FLOAT}, {"Tperm", DT_INT32}}),
});
GraphDef output;
TF_EXPECT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
const Tensor kPermutedPaddings =
test::AsTensor<int32>({1, 2, 5, 6, 7, 8, 3, 4}, {4, 2});
GraphDef expected = test::function::GDef({
NDef("x", "Placeholder", {}, {{"dtype", DT_FLOAT}}),
NDef("paddings", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermutedPaddings}}),
NDef("perm_nhwc_to_nchw", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNhwcToNchw}}),
NDef("perm_nchw_to_nhwc", "Const", {},
{{"dtype", DT_INT32}, {"value", kPermuteNchwToNhwc}}),
NDef("transpose_0", "Identity", {"x"}, {{"T", DT_FLOAT}}),
NDef("pad", "Pad", {"transpose_0", "paddings"},
{{"T", DT_FLOAT}, {"Tpaddings", DT_INT32}}),
NDef("transpose_1", "Identity", {"pad"}, {{"T", DT_FLOAT}}),
NDef("transpose_2", "Identity", {"pad"}, {{"T", DT_FLOAT}}),
});
CompareGraphs(expected, output);
Tensor x = GenerateRandomTensor<DT_FLOAT>({2, 6, 6, 8});
item.fetch = {"transpose_1", "transpose_2"};
item.feed.emplace_back("x", x);
auto tensors_expected = EvaluateFetchNodes(item);
GrapplerItem optimized = item.WithGraph(std::move(output));
auto tensors = EvaluateFetchNodes(optimized);
ASSERT_EQ(tensors.size(), 2);
ASSERT_EQ(tensors_expected.size(), 2);
test::ExpectTensorEqual<float>(tensors_expected[0], tensors[0]);
test::ExpectTensorEqual<float>(tensors_expected[1], tensors[1]);
}
TEST_F(GenericLayoutOptimizerTest, PreserveInputShapes) {
using test::function::NDef;
GenericLayoutOptimizer optimizer(RewriterConfig::AGGRESSIVE);
AttrValue output_shapes;
auto* shape = output_shapes.mutable_list()->add_shape();
shape->add_dim()->set_size(-1);
GrapplerItem item;
item.graph = test::function::GDef({NDef(
"x", "_Arg", {},
{{"T", DT_FLOAT}, {"index", 0}, {"_output_shapes", output_shapes}})});
item.feed.emplace_back("x", Tensor(DT_FLOAT));
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* arg = graph_view.GetNode("x");
ASSERT_NE(arg, nullptr);
EXPECT_TRUE(arg->HasAttr("_output_shapes"));
EXPECT_EQ(arg->GetAttr("_output_shapes")->DebugString(),
output_shapes.DebugString());
}
TEST_F(GenericLayoutOptimizerTest, OptimizeSimpleConv3DGraph_CPU) {
Scope scope = Scope::NewRootScope();
auto conv3d = SimpleConv3D(&scope, 32, 1, "VALID", "/CPU:0");
auto identity = Identity(scope.WithOpName("Output"), conv3d);
GrapplerItem item;
TF_ASSERT_OK(scope.ToGraphDef(&item.graph));
GenericLayoutOptimizer optimizer(REWRITER_CONFIG);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(virtual_cluster_.get(), item, &output));
Status status;
utils::GraphView graph_view(&output, &status);
TF_ASSERT_OK(status);
auto* conv3d_node = graph_view.GetNode("Conv3D");
ASSERT_NE(conv3d_node, nullptr);
ASSERT_EQ(conv3d_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(conv3d_node, 1, "Filter", 0);
auto* output_node = graph_view.GetNode("Output");
ASSERT_NE(output_node, nullptr);
ASSERT_EQ(output_node->NumRegularFanins(), 1);
#if (GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
VerifyDataFormatAttributeMatch(conv3d_node, SRC_DATA_FORMAT_5D);
#else
auto* input_transpose_node = graph_view.GetNode(
absl::StrCat("Conv3D-0-Transpose", SRC_DATA_FORMAT_5D, "To",
DST_DATA_FORMAT_5D, "-LayoutOptimizer"));
ASSERT_NE(input_transpose_node, nullptr);
ASSERT_EQ(input_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(input_transpose_node, 0, "Input", 0);
VerifyRegularFaninMatch(conv3d_node, 0, input_transpose_node->GetName(), 0);
VerifyDataFormatAttributeMatch(conv3d_node, DST_DATA_FORMAT_5D);
auto* output_transpose_node = graph_view.GetNode(
absl::StrCat("Conv3D-0-0-Transpose", DST_DATA_FORMAT_5D, "To",
SRC_DATA_FORMAT_5D, "-LayoutOptimizer"));
ASSERT_NE(output_transpose_node, nullptr);
ASSERT_EQ(output_transpose_node->NumRegularFanins(), 2);
VerifyRegularFaninMatch(output_transpose_node, 0, conv3d_node->GetName(), 0);
VerifyRegularFaninMatch(output_node, 0, output_transpose_node->GetName(), 0);
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/generic_layout_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3f6d82ef-f7d8-4ac5-a96a-0301c89d3a59 | cpp | google/cel-cpp | math_ext | extensions/math_ext.cc | extensions/math_ext_test.cc | #include "extensions/math_ext.h"
#include <cmath>
#include <cstdint>
#include <limits>
#include "absl/base/casts.h"
#include "absl/base/optimization.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/casting.h"
#include "common/value.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_number.h"
#include "eval/public/cel_options.h"
#include "internal/status_macros.h"
#include "runtime/function_adapter.h"
#include "runtime/function_registry.h"
#include "runtime/runtime_options.h"
namespace cel::extensions {
namespace {
using ::google::api::expr::runtime::CelFunctionRegistry;
using ::google::api::expr::runtime::CelNumber;
using ::google::api::expr::runtime::InterpreterOptions;
static constexpr char kMathMin[] = "math.@min";
static constexpr char kMathMax[] = "math.@max";
struct ToValueVisitor {
Value operator()(uint64_t v) const { return UintValue{v}; }
Value operator()(int64_t v) const { return IntValue{v}; }
Value operator()(double v) const { return DoubleValue{v}; }
};
Value NumberToValue(CelNumber number) {
return number.visit<Value>(ToValueVisitor{});
}
absl::StatusOr<CelNumber> ValueToNumber(const Value& value,
absl::string_view function) {
if (auto int_value = As<IntValue>(value); int_value) {
return CelNumber::FromInt64(int_value->NativeValue());
}
if (auto uint_value = As<UintValue>(value); uint_value) {
return CelNumber::FromUint64(uint_value->NativeValue());
}
if (auto double_value = As<DoubleValue>(value); double_value) {
return CelNumber::FromDouble(double_value->NativeValue());
}
return absl::InvalidArgumentError(
absl::StrCat(function, " arguments must be numeric"));
}
CelNumber MinNumber(CelNumber v1, CelNumber v2) {
if (v2 < v1) {
return v2;
}
return v1;
}
Value MinValue(CelNumber v1, CelNumber v2) {
return NumberToValue(MinNumber(v1, v2));
}
template <typename T>
Value Identity(ValueManager&, T v1) {
return NumberToValue(CelNumber(v1));
}
template <typename T, typename U>
Value Min(ValueManager&, T v1, U v2) {
return MinValue(CelNumber(v1), CelNumber(v2));
}
absl::StatusOr<Value> MinList(ValueManager& value_manager,
const ListValue& values) {
CEL_ASSIGN_OR_RETURN(auto iterator, values.NewIterator(value_manager));
if (!iterator->HasNext()) {
return ErrorValue(
absl::InvalidArgumentError("math.@min argument must not be empty"));
}
Value value;
CEL_RETURN_IF_ERROR(iterator->Next(value_manager, value));
absl::StatusOr<CelNumber> current = ValueToNumber(value, kMathMin);
if (!current.ok()) {
return ErrorValue{current.status()};
}
CelNumber min = *current;
while (iterator->HasNext()) {
CEL_RETURN_IF_ERROR(iterator->Next(value_manager, value));
absl::StatusOr<CelNumber> other = ValueToNumber(value, kMathMin);
if (!other.ok()) {
return ErrorValue{other.status()};
}
min = MinNumber(min, *other);
}
return NumberToValue(min);
}
CelNumber MaxNumber(CelNumber v1, CelNumber v2) {
if (v2 > v1) {
return v2;
}
return v1;
}
Value MaxValue(CelNumber v1, CelNumber v2) {
return NumberToValue(MaxNumber(v1, v2));
}
template <typename T, typename U>
Value Max(ValueManager&, T v1, U v2) {
return MaxValue(CelNumber(v1), CelNumber(v2));
}
absl::StatusOr<Value> MaxList(ValueManager& value_manager,
const ListValue& values) {
CEL_ASSIGN_OR_RETURN(auto iterator, values.NewIterator(value_manager));
if (!iterator->HasNext()) {
return ErrorValue(
absl::InvalidArgumentError("math.@max argument must not be empty"));
}
Value value;
CEL_RETURN_IF_ERROR(iterator->Next(value_manager, value));
absl::StatusOr<CelNumber> current = ValueToNumber(value, kMathMax);
if (!current.ok()) {
return ErrorValue{current.status()};
}
CelNumber min = *current;
while (iterator->HasNext()) {
CEL_RETURN_IF_ERROR(iterator->Next(value_manager, value));
absl::StatusOr<CelNumber> other = ValueToNumber(value, kMathMax);
if (!other.ok()) {
return ErrorValue{other.status()};
}
min = MaxNumber(min, *other);
}
return NumberToValue(min);
}
template <typename T, typename U>
absl::Status RegisterCrossNumericMin(FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, T, U>::CreateDescriptor(
kMathMin, false),
BinaryFunctionAdapter<Value, T, U>::WrapFunction(Min<T, U>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, U, T>::CreateDescriptor(
kMathMin, false),
BinaryFunctionAdapter<Value, U, T>::WrapFunction(Min<U, T>)));
return absl::OkStatus();
}
template <typename T, typename U>
absl::Status RegisterCrossNumericMax(FunctionRegistry& registry) {
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, T, U>::CreateDescriptor(
kMathMax, false),
BinaryFunctionAdapter<Value, T, U>::WrapFunction(Max<T, U>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, U, T>::CreateDescriptor(
kMathMax, false),
BinaryFunctionAdapter<Value, U, T>::WrapFunction(Max<U, T>)));
return absl::OkStatus();
}
double CeilDouble(ValueManager&, double value) { return std::ceil(value); }
double FloorDouble(ValueManager&, double value) { return std::floor(value); }
double RoundDouble(ValueManager&, double value) { return std::round(value); }
double TruncDouble(ValueManager&, double value) { return std::trunc(value); }
bool IsInfDouble(ValueManager&, double value) { return std::isinf(value); }
bool IsNaNDouble(ValueManager&, double value) { return std::isnan(value); }
bool IsFiniteDouble(ValueManager&, double value) {
return std::isfinite(value);
}
double AbsDouble(ValueManager&, double value) { return std::fabs(value); }
Value AbsInt(ValueManager& value_manager, int64_t value) {
if (ABSL_PREDICT_FALSE(value == std::numeric_limits<int64_t>::min())) {
return ErrorValue(absl::InvalidArgumentError("integer overflow"));
}
return IntValue(value < 0 ? -value : value);
}
uint64_t AbsUint(ValueManager&, uint64_t value) { return value; }
double SignDouble(ValueManager&, double value) {
if (std::isnan(value)) {
return value;
}
if (value == 0.0) {
return 0.0;
}
return std::signbit(value) ? -1.0 : 1.0;
}
int64_t SignInt(ValueManager&, int64_t value) {
return value < 0 ? -1 : value > 0 ? 1 : 0;
}
uint64_t SignUint(ValueManager&, uint64_t value) { return value == 0 ? 0 : 1; }
int64_t BitAndInt(ValueManager&, int64_t lhs, int64_t rhs) { return lhs & rhs; }
uint64_t BitAndUint(ValueManager&, uint64_t lhs, uint64_t rhs) {
return lhs & rhs;
}
int64_t BitOrInt(ValueManager&, int64_t lhs, int64_t rhs) { return lhs | rhs; }
uint64_t BitOrUint(ValueManager&, uint64_t lhs, uint64_t rhs) {
return lhs | rhs;
}
int64_t BitXorInt(ValueManager&, int64_t lhs, int64_t rhs) { return lhs ^ rhs; }
uint64_t BitXorUint(ValueManager&, uint64_t lhs, uint64_t rhs) {
return lhs ^ rhs;
}
int64_t BitNotInt(ValueManager&, int64_t value) { return ~value; }
uint64_t BitNotUint(ValueManager&, uint64_t value) { return ~value; }
Value BitShiftLeftInt(ValueManager&, int64_t lhs, int64_t rhs) {
if (ABSL_PREDICT_FALSE(rhs < 0)) {
return ErrorValue(absl::InvalidArgumentError(
absl::StrCat("math.bitShiftLeft() invalid negative shift: ", rhs)));
}
if (rhs > 63) {
return IntValue(0);
}
return IntValue(lhs << static_cast<int>(rhs));
}
Value BitShiftLeftUint(ValueManager&, uint64_t lhs, int64_t rhs) {
if (ABSL_PREDICT_FALSE(rhs < 0)) {
return ErrorValue(absl::InvalidArgumentError(
absl::StrCat("math.bitShiftLeft() invalid negative shift: ", rhs)));
}
if (rhs > 63) {
return UintValue(0);
}
return UintValue(lhs << static_cast<int>(rhs));
}
Value BitShiftRightInt(ValueManager&, int64_t lhs, int64_t rhs) {
if (ABSL_PREDICT_FALSE(rhs < 0)) {
return ErrorValue(absl::InvalidArgumentError(
absl::StrCat("math.bitShiftRight() invalid negative shift: ", rhs)));
}
if (rhs > 63) {
return IntValue(0);
}
return IntValue(absl::bit_cast<int64_t>(absl::bit_cast<uint64_t>(lhs) >>
static_cast<int>(rhs)));
}
Value BitShiftRightUint(ValueManager&, uint64_t lhs, int64_t rhs) {
if (ABSL_PREDICT_FALSE(rhs < 0)) {
return ErrorValue(absl::InvalidArgumentError(
absl::StrCat("math.bitShiftRight() invalid negative shift: ", rhs)));
}
if (rhs > 63) {
return UintValue(0);
}
return UintValue(lhs >> static_cast<int>(rhs));
}
}
absl::Status RegisterMathExtensionFunctions(FunctionRegistry& registry,
const RuntimeOptions& options) {
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, int64_t>::CreateDescriptor(
kMathMin, false),
UnaryFunctionAdapter<Value, int64_t>::WrapFunction(Identity<int64_t>)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, double>::CreateDescriptor(
kMathMin, false),
UnaryFunctionAdapter<Value, double>::WrapFunction(Identity<double>)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, uint64_t>::CreateDescriptor(
kMathMin, false),
UnaryFunctionAdapter<Value, uint64_t>::WrapFunction(Identity<uint64_t>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, int64_t, int64_t>::CreateDescriptor(
kMathMin, false),
BinaryFunctionAdapter<Value, int64_t, int64_t>::WrapFunction(
Min<int64_t, int64_t>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, double, double>::CreateDescriptor(
kMathMin, false),
BinaryFunctionAdapter<Value, double, double>::WrapFunction(
Min<double, double>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, uint64_t, uint64_t>::CreateDescriptor(
kMathMin, false),
BinaryFunctionAdapter<Value, uint64_t, uint64_t>::WrapFunction(
Min<uint64_t, uint64_t>)));
CEL_RETURN_IF_ERROR((RegisterCrossNumericMin<int64_t, uint64_t>(registry)));
CEL_RETURN_IF_ERROR((RegisterCrossNumericMin<int64_t, double>(registry)));
CEL_RETURN_IF_ERROR((RegisterCrossNumericMin<double, uint64_t>(registry)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<absl::StatusOr<Value>, ListValue>::CreateDescriptor(
kMathMin, false),
UnaryFunctionAdapter<absl::StatusOr<Value>, ListValue>::WrapFunction(
MinList)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, int64_t>::CreateDescriptor(
kMathMax, false),
UnaryFunctionAdapter<Value, int64_t>::WrapFunction(Identity<int64_t>)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, double>::CreateDescriptor(
kMathMax, false),
UnaryFunctionAdapter<Value, double>::WrapFunction(Identity<double>)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, uint64_t>::CreateDescriptor(
kMathMax, false),
UnaryFunctionAdapter<Value, uint64_t>::WrapFunction(Identity<uint64_t>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, int64_t, int64_t>::CreateDescriptor(
kMathMax, false),
BinaryFunctionAdapter<Value, int64_t, int64_t>::WrapFunction(
Max<int64_t, int64_t>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, double, double>::CreateDescriptor(
kMathMax, false),
BinaryFunctionAdapter<Value, double, double>::WrapFunction(
Max<double, double>)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, uint64_t, uint64_t>::CreateDescriptor(
kMathMax, false),
BinaryFunctionAdapter<Value, uint64_t, uint64_t>::WrapFunction(
Max<uint64_t, uint64_t>)));
CEL_RETURN_IF_ERROR((RegisterCrossNumericMax<int64_t, uint64_t>(registry)));
CEL_RETURN_IF_ERROR((RegisterCrossNumericMax<int64_t, double>(registry)));
CEL_RETURN_IF_ERROR((RegisterCrossNumericMax<double, uint64_t>(registry)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<absl::StatusOr<Value>, ListValue>::CreateDescriptor(
kMathMax, false),
UnaryFunctionAdapter<absl::StatusOr<Value>, ListValue>::WrapFunction(
MaxList)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<double, double>::CreateDescriptor(
"math.ceil", false),
UnaryFunctionAdapter<double, double>::WrapFunction(CeilDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<double, double>::CreateDescriptor(
"math.floor", false),
UnaryFunctionAdapter<double, double>::WrapFunction(FloorDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<double, double>::CreateDescriptor(
"math.round", false),
UnaryFunctionAdapter<double, double>::WrapFunction(RoundDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<double, double>::CreateDescriptor(
"math.trunc", false),
UnaryFunctionAdapter<double, double>::WrapFunction(TruncDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<bool, double>::CreateDescriptor(
"math.isInf", false),
UnaryFunctionAdapter<bool, double>::WrapFunction(IsInfDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<bool, double>::CreateDescriptor(
"math.isNaN", false),
UnaryFunctionAdapter<bool, double>::WrapFunction(IsNaNDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<bool, double>::CreateDescriptor(
"math.isFinite", false),
UnaryFunctionAdapter<bool, double>::WrapFunction(IsFiniteDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<double, double>::CreateDescriptor(
"math.abs", false),
UnaryFunctionAdapter<double, double>::WrapFunction(AbsDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<Value, int64_t>::CreateDescriptor(
"math.abs", false),
UnaryFunctionAdapter<Value, int64_t>::WrapFunction(AbsInt)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<uint64_t, uint64_t>::CreateDescriptor(
"math.abs", false),
UnaryFunctionAdapter<uint64_t, uint64_t>::WrapFunction(AbsUint)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<double, double>::CreateDescriptor(
"math.sign", false),
UnaryFunctionAdapter<double, double>::WrapFunction(SignDouble)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<int64_t, int64_t>::CreateDescriptor(
"math.sign", false),
UnaryFunctionAdapter<int64_t, int64_t>::WrapFunction(SignInt)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<uint64_t, uint64_t>::CreateDescriptor(
"math.sign", false),
UnaryFunctionAdapter<uint64_t, uint64_t>::WrapFunction(SignUint)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<int64_t, int64_t, int64_t>::CreateDescriptor(
"math.bitAnd", false),
BinaryFunctionAdapter<int64_t, int64_t, int64_t>::WrapFunction(
BitAndInt)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<uint64_t, uint64_t, uint64_t>::CreateDescriptor(
"math.bitAnd", false),
BinaryFunctionAdapter<uint64_t, uint64_t, uint64_t>::WrapFunction(
BitAndUint)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<int64_t, int64_t, int64_t>::CreateDescriptor(
"math.bitOr", false),
BinaryFunctionAdapter<int64_t, int64_t, int64_t>::WrapFunction(
BitOrInt)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<uint64_t, uint64_t, uint64_t>::CreateDescriptor(
"math.bitOr", false),
BinaryFunctionAdapter<uint64_t, uint64_t, uint64_t>::WrapFunction(
BitOrUint)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<int64_t, int64_t, int64_t>::CreateDescriptor(
"math.bitXor", false),
BinaryFunctionAdapter<int64_t, int64_t, int64_t>::WrapFunction(
BitXorInt)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<uint64_t, uint64_t, uint64_t>::CreateDescriptor(
"math.bitXor", false),
BinaryFunctionAdapter<uint64_t, uint64_t, uint64_t>::WrapFunction(
BitXorUint)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<int64_t, int64_t>::CreateDescriptor(
"math.bitNot", false),
UnaryFunctionAdapter<int64_t, int64_t>::WrapFunction(BitNotInt)));
CEL_RETURN_IF_ERROR(registry.Register(
UnaryFunctionAdapter<uint64_t, uint64_t>::CreateDescriptor(
"math.bitNot", false),
UnaryFunctionAdapter<uint64_t, uint64_t>::WrapFunction(BitNotUint)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, int64_t, int64_t>::CreateDescriptor(
"math.bitShiftLeft", false),
BinaryFunctionAdapter<Value, int64_t, int64_t>::WrapFunction(
BitShiftLeftInt)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, uint64_t, int64_t>::CreateDescriptor(
"math.bitShiftLeft", false),
BinaryFunctionAdapter<Value, uint64_t, int64_t>::WrapFunction(
BitShiftLeftUint)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, int64_t, int64_t>::CreateDescriptor(
"math.bitShiftRight", false),
BinaryFunctionAdapter<Value, int64_t, int64_t>::WrapFunction(
BitShiftRightInt)));
CEL_RETURN_IF_ERROR(registry.Register(
BinaryFunctionAdapter<Value, uint64_t, int64_t>::CreateDescriptor(
"math.bitShiftRight", false),
BinaryFunctionAdapter<Value, uint64_t, int64_t>::WrapFunction(
BitShiftRightUint)));
return absl::OkStatus();
}
absl::Status RegisterMathExtensionFunctions(CelFunctionRegistry* registry,
const InterpreterOptions& options) {
return RegisterMathExtensionFunctions(
registry->InternalGetRegistry(),
google::api::expr::runtime::ConvertToRuntimeOptions(options));
}
} | #include "extensions/math_ext.h"
#include <memory>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_function.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/testing/matchers.h"
#include "extensions/math_ext_macros.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "google/protobuf/arena.h"
namespace cel::extensions {
namespace {
using ::absl_testing::StatusIs;
using ::google::api::expr::v1alpha1::Expr;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::v1alpha1::SourceInfo;
using ::google::api::expr::parser::ParseWithMacros;
using ::google::api::expr::runtime::Activation;
using ::google::api::expr::runtime::CelExpressionBuilder;
using ::google::api::expr::runtime::CelFunction;
using ::google::api::expr::runtime::CelFunctionDescriptor;
using ::google::api::expr::runtime::CelValue;
using ::google::api::expr::runtime::ContainerBackedListImpl;
using ::google::api::expr::runtime::CreateCelExpressionBuilder;
using ::google::api::expr::runtime::InterpreterOptions;
using ::google::api::expr::runtime::RegisterBuiltinFunctions;
using ::google::api::expr::runtime::test::EqualsCelValue;
using ::google::protobuf::Arena;
using ::testing::HasSubstr;
constexpr absl::string_view kMathMin = "math.@min";
constexpr absl::string_view kMathMax = "math.@max";
struct TestCase {
absl::string_view operation;
CelValue arg1;
absl::optional<CelValue> arg2;
CelValue result;
};
TestCase MinCase(CelValue v1, CelValue v2, CelValue result) {
return TestCase{kMathMin, v1, v2, result};
}
TestCase MinCase(CelValue list, CelValue result) {
return TestCase{kMathMin, list, absl::nullopt, result};
}
TestCase MaxCase(CelValue v1, CelValue v2, CelValue result) {
return TestCase{kMathMax, v1, v2, result};
}
TestCase MaxCase(CelValue list, CelValue result) {
return TestCase{kMathMax, list, absl::nullopt, result};
}
struct MacroTestCase {
absl::string_view expr;
absl::string_view err = "";
};
class TestFunction : public CelFunction {
public:
explicit TestFunction(absl::string_view name)
: CelFunction(CelFunctionDescriptor(
name, true,
{CelValue::Type::kBool, CelValue::Type::kInt64,
CelValue::Type::kInt64})) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* result,
Arena* arena) const override {
*result = CelValue::CreateBool(true);
return absl::OkStatus();
}
};
constexpr absl::string_view kGreatest = "greatest";
std::unique_ptr<CelFunction> CreateGreatestFunction() {
return std::make_unique<TestFunction>(kGreatest);
}
constexpr absl::string_view kLeast = "least";
std::unique_ptr<CelFunction> CreateLeastFunction() {
return std::make_unique<TestFunction>(kLeast);
}
Expr CallExprOneArg(absl::string_view operation) {
Expr expr;
auto call = expr.mutable_call_expr();
call->set_function(operation);
auto arg = call->add_args();
auto ident = arg->mutable_ident_expr();
ident->set_name("a");
return expr;
}
Expr CallExprTwoArgs(absl::string_view operation) {
Expr expr;
auto call = expr.mutable_call_expr();
call->set_function(operation);
auto arg = call->add_args();
auto ident = arg->mutable_ident_expr();
ident->set_name("a");
arg = call->add_args();
ident = arg->mutable_ident_expr();
ident->set_name("b");
return expr;
}
void ExpectResult(const TestCase& test_case) {
Expr expr;
Activation activation;
activation.InsertValue("a", test_case.arg1);
if (test_case.arg2.has_value()) {
activation.InsertValue("b", *test_case.arg2);
expr = CallExprTwoArgs(test_case.operation);
} else {
expr = CallExprOneArg(test_case.operation);
}
SourceInfo source_info;
InterpreterOptions options;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterMathExtensionFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expression,
builder->CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
ASSERT_OK_AND_ASSIGN(auto value,
cel_expression->Evaluate(activation, &arena));
if (!test_case.result.IsError()) {
EXPECT_THAT(value, EqualsCelValue(test_case.result));
} else {
auto expected = test_case.result.ErrorOrDie();
EXPECT_THAT(*value.ErrorOrDie(),
StatusIs(expected->code(), HasSubstr(expected->message())));
}
}
using MathExtParamsTest = testing::TestWithParam<TestCase>;
TEST_P(MathExtParamsTest, MinMaxTests) { ExpectResult(GetParam()); }
INSTANTIATE_TEST_SUITE_P(
MathExtParamsTest, MathExtParamsTest,
testing::ValuesIn<TestCase>({
MinCase(CelValue::CreateInt64(3L), CelValue::CreateInt64(2L),
CelValue::CreateInt64(2L)),
MinCase(CelValue::CreateInt64(-1L), CelValue::CreateUint64(2u),
CelValue::CreateInt64(-1L)),
MinCase(CelValue::CreateInt64(-1L), CelValue::CreateDouble(-1.1),
CelValue::CreateDouble(-1.1)),
MinCase(CelValue::CreateDouble(-2.0), CelValue::CreateDouble(-1.1),
CelValue::CreateDouble(-2.0)),
MinCase(CelValue::CreateDouble(3.1), CelValue::CreateInt64(2),
CelValue::CreateInt64(2)),
MinCase(CelValue::CreateDouble(2.5), CelValue::CreateUint64(2u),
CelValue::CreateUint64(2u)),
MinCase(CelValue::CreateUint64(2u), CelValue::CreateDouble(-1.1),
CelValue::CreateDouble(-1.1)),
MinCase(CelValue::CreateUint64(3u), CelValue::CreateInt64(20),
CelValue::CreateUint64(3u)),
MinCase(CelValue::CreateUint64(4u), CelValue::CreateUint64(2u),
CelValue::CreateUint64(2u)),
MinCase(CelValue::CreateInt64(2L), CelValue::CreateUint64(2u),
CelValue::CreateInt64(2L)),
MinCase(CelValue::CreateInt64(-1L), CelValue::CreateDouble(-1.0),
CelValue::CreateInt64(-1L)),
MinCase(CelValue::CreateDouble(2.0), CelValue::CreateInt64(2),
CelValue::CreateDouble(2.0)),
MinCase(CelValue::CreateDouble(2.0), CelValue::CreateUint64(2u),
CelValue::CreateDouble(2.0)),
MinCase(CelValue::CreateUint64(2u), CelValue::CreateDouble(2.0),
CelValue::CreateUint64(2u)),
MinCase(CelValue::CreateUint64(3u), CelValue::CreateInt64(3),
CelValue::CreateUint64(3u)),
MaxCase(CelValue::CreateInt64(3L), CelValue::CreateInt64(2L),
CelValue::CreateInt64(3L)),
MaxCase(CelValue::CreateInt64(-1L), CelValue::CreateUint64(2u),
CelValue::CreateUint64(2u)),
MaxCase(CelValue::CreateInt64(-1L), CelValue::CreateDouble(-1.1),
CelValue::CreateInt64(-1L)),
MaxCase(CelValue::CreateDouble(-2.0), CelValue::CreateDouble(-1.1),
CelValue::CreateDouble(-1.1)),
MaxCase(CelValue::CreateDouble(3.1), CelValue::CreateInt64(2),
CelValue::CreateDouble(3.1)),
MaxCase(CelValue::CreateDouble(2.5), CelValue::CreateUint64(2u),
CelValue::CreateDouble(2.5)),
MaxCase(CelValue::CreateUint64(2u), CelValue::CreateDouble(-1.1),
CelValue::CreateUint64(2u)),
MaxCase(CelValue::CreateUint64(3u), CelValue::CreateInt64(20),
CelValue::CreateInt64(20)),
MaxCase(CelValue::CreateUint64(4u), CelValue::CreateUint64(2u),
CelValue::CreateUint64(4u)),
MaxCase(CelValue::CreateInt64(2L), CelValue::CreateUint64(2u),
CelValue::CreateInt64(2L)),
MaxCase(CelValue::CreateInt64(-1L), CelValue::CreateDouble(-1.0),
CelValue::CreateInt64(-1L)),
MaxCase(CelValue::CreateDouble(2.0), CelValue::CreateInt64(2),
CelValue::CreateDouble(2.0)),
MaxCase(CelValue::CreateDouble(2.0), CelValue::CreateUint64(2u),
CelValue::CreateDouble(2.0)),
MaxCase(CelValue::CreateUint64(2u), CelValue::CreateDouble(2.0),
CelValue::CreateUint64(2u)),
MaxCase(CelValue::CreateUint64(3u), CelValue::CreateInt64(3),
CelValue::CreateUint64(3u)),
}));
TEST(MathExtTest, MinMaxList) {
ContainerBackedListImpl single_item_list({CelValue::CreateInt64(1)});
ExpectResult(MinCase(CelValue::CreateList(&single_item_list),
CelValue::CreateInt64(1)));
ExpectResult(MaxCase(CelValue::CreateList(&single_item_list),
CelValue::CreateInt64(1)));
ContainerBackedListImpl list({CelValue::CreateInt64(1),
CelValue::CreateUint64(2u),
CelValue::CreateDouble(-1.1)});
ExpectResult(
MinCase(CelValue::CreateList(&list), CelValue::CreateDouble(-1.1)));
ExpectResult(
MaxCase(CelValue::CreateList(&list), CelValue::CreateUint64(2u)));
absl::Status empty_list_err =
absl::InvalidArgumentError("argument must not be empty");
CelValue err_value = CelValue::CreateError(&empty_list_err);
ContainerBackedListImpl empty_list({});
ExpectResult(MinCase(CelValue::CreateList(&empty_list), err_value));
ExpectResult(MaxCase(CelValue::CreateList(&empty_list), err_value));
absl::Status bad_arg_err =
absl::InvalidArgumentError("arguments must be numeric");
err_value = CelValue::CreateError(&bad_arg_err);
ContainerBackedListImpl bad_single_item({CelValue::CreateBool(true)});
ExpectResult(MinCase(CelValue::CreateList(&bad_single_item), err_value));
ExpectResult(MaxCase(CelValue::CreateList(&bad_single_item), err_value));
ContainerBackedListImpl bad_middle_item({CelValue::CreateInt64(1),
CelValue::CreateBool(false),
CelValue::CreateDouble(-1.1)});
ExpectResult(MinCase(CelValue::CreateList(&bad_middle_item), err_value));
ExpectResult(MaxCase(CelValue::CreateList(&bad_middle_item), err_value));
}
using MathExtMacroParamsTest = testing::TestWithParam<MacroTestCase>;
TEST_P(MathExtMacroParamsTest, MacroTests) {
const MacroTestCase& test_case = GetParam();
auto result = ParseWithMacros(test_case.expr, cel::extensions::math_macros(),
"<input>");
if (!test_case.err.empty()) {
EXPECT_THAT(result.status(), StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(test_case.err)));
return;
}
ASSERT_OK(result);
ParsedExpr parsed_expr = *result;
Expr expr = parsed_expr.expr();
SourceInfo source_info = parsed_expr.source_info();
InterpreterOptions options;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
ASSERT_OK(builder->GetRegistry()->Register(CreateGreatestFunction()));
ASSERT_OK(builder->GetRegistry()->Register(CreateLeastFunction()));
ASSERT_OK(RegisterBuiltinFunctions(builder->GetRegistry(), options));
ASSERT_OK(RegisterMathExtensionFunctions(builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto cel_expression,
builder->CreateExpression(&expr, &source_info));
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(auto value,
cel_expression->Evaluate(activation, &arena));
ASSERT_TRUE(value.IsBool());
EXPECT_EQ(value.BoolOrDie(), true);
}
INSTANTIATE_TEST_SUITE_P(
MathExtMacrosParamsTest, MathExtMacroParamsTest,
testing::ValuesIn<MacroTestCase>({
{"math.least(-0.5) == -0.5"},
{"math.least(-1) == -1"},
{"math.least(1u) == 1u"},
{"math.least(42.0, -0.5) == -0.5"},
{"math.least(-1, 0) == -1"},
{"math.least(-1, -1) == -1"},
{"math.least(1u, 42u) == 1u"},
{"math.least(42.0, -0.5, -0.25) == -0.5"},
{"math.least(-1, 0, 1) == -1"},
{"math.least(-1, -1, -1) == -1"},
{"math.least(1u, 42u, 0u) == 0u"},
{"math.least(1, 1.0) == 1"},
{"math.least(1, -2.0) == -2.0"},
{"math.least(2, 1u) == 1u"},
{"math.least(1.5, 2) == 1.5"},
{"math.least(1.5, -2) == -2"},
{"math.least(2.5, 1u) == 1u"},
{"math.least(1u, 2) == 1u"},
{"math.least(1u, -2) == -2"},
{"math.least(2u, 2.5) == 2u"},
{"math.least(1u, dyn(42)) == 1"},
{"math.least(1u, dyn(42), dyn(0.0)) == 0u"},
{"math.least([1u, 42u, 0u]) == 0u"},
{
"math.least()",
"math.least() requires at least one argument.",
},
{
"math.least('hello')",
"math.least() invalid single argument value.",
},
{
"math.least({})",
"math.least() invalid single argument value",
},
{
"math.least([])",
"math.least() invalid single argument value",
},
{
"math.least([1, true])",
"math.least() invalid single argument value",
},
{
"math.least(1, true)",
"math.least() simple literal arguments must be numeric",
},
{
"math.least(1, 2, true)",
"math.least() simple literal arguments must be numeric",
},
{"math.greatest(-0.5) == -0.5"},
{"math.greatest(-1) == -1"},
{"math.greatest(1u) == 1u"},
{"math.greatest(42.0, -0.5) == 42.0"},
{"math.greatest(-1, 0) == 0"},
{"math.greatest(-1, -1) == -1"},
{"math.greatest(1u, 42u) == 42u"},
{"math.greatest(42.0, -0.5, -0.25) == 42.0"},
{"math.greatest(-1, 0, 1) == 1"},
{"math.greatest(-1, -1, -1) == -1"},
{"math.greatest(1u, 42u, 0u) == 42u"},
{"math.greatest(1, 1.0) == 1"},
{"math.greatest(1, -2.0) == 1"},
{"math.greatest(2, 1u) == 2"},
{"math.greatest(1.5, 2) == 2"},
{"math.greatest(1.5, -2) == 1.5"},
{"math.greatest(2.5, 1u) == 2.5"},
{"math.greatest(1u, 2) == 2"},
{"math.greatest(1u, -2) == 1u"},
{"math.greatest(2u, 2.5) == 2.5"},
{"math.greatest(1u, dyn(42)) == 42.0"},
{"math.greatest(1u, dyn(0.0), 0u) == 1"},
{"math.greatest([1u, dyn(0.0), 0u]) == 1"},
{
"math.greatest()",
"math.greatest() requires at least one argument.",
},
{
"math.greatest('hello')",
"math.greatest() invalid single argument value.",
},
{
"math.greatest({})",
"math.greatest() invalid single argument value",
},
{
"math.greatest([])",
"math.greatest() invalid single argument value",
},
{
"math.greatest([1, true])",
"math.greatest() invalid single argument value",
},
{
"math.greatest(1, true)",
"math.greatest() simple literal arguments must be numeric",
},
{
"math.greatest(1, 2, true)",
"math.greatest() simple literal arguments must be numeric",
},
{
"false.greatest(1,2)",
},
{
"true.least(1,2)",
},
}));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/math_ext.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/math_ext_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
bea336ee-0035-41f2-b969-693116bc4a27 | cpp | tensorflow/tensorflow | trt_lru_cache | tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.cc | tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache_test.cc | #include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include <sstream>
#include "tensorflow/compiler/tf2tensorrt/utils/trt_allocator.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/mutex.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "third_party/tensorrt/NvInfer.h"
namespace tensorflow {
namespace tensorrt {
string CalibrationContext::TerminateCalibration() {
mutex_lock l(mu_);
if (terminated_) return calibration_table_;
TRTInt8Calibrator* raw_calibrator = calibrator_.get();
raw_calibrator->waitAndSetDone();
terminated_ = true;
thr_->join();
calibration_table_ = raw_calibrator->getCalibrationTableAsString();
return calibration_table_;
}
const absl::string_view kTfTrtContainerName = "TF-TRT";
Logger& TRTEngineCacheResource::GetLogger() {
static Logger* logger = new Logger();
return *logger;
}
TRTEngineCacheResource::TRTEngineCacheResource(OpKernelContext* ctx,
size_t capacity)
: cache_(capacity) {
auto device = ctx->device();
auto alloc = device->GetAllocator(AllocatorAttributes());
if (!alloc) {
LOG(ERROR) << "Can't find device allocator for gpu device "
<< device->name();
allocator_ = nullptr;
} else {
allocator_.reset(new TRTDeviceAllocator(alloc));
}
}
TRTEngineCacheResource::~TRTEngineCacheResource() {
VLOG(1) << "Destroying TRTEngineCacheResource...";
}
string TRTEngineCacheResource::DebugString() const {
std::stringstream oss;
using std::dec;
using std::endl;
using std::hex;
oss << "TRTEngineCacheResource: ";
oss << "TRTBaseAllocator = " << hex << allocator_.get() << dec << ", ";
oss << "LRUCache = " << hex << &cache_ << dec << endl;
oss << "Containing " << cache_.size() << " entries: " << endl;
for (const auto& item : cache_) {
mutex_lock lock(item.second->mu);
oss << TensorShapeUtils::ShapeListString(item.first) << ": " << hex
<< "ICudaEngine: " << item.second->GetCudaEngine() << ", "
<< "IExecutionContext: ";
absl::c_for_each(
item.second->execution_contexts,
[&](const ExecutionContext& ctx) { oss << ctx.get() << ","; });
oss << dec << endl;
}
return oss.str();
}
EngineContext* TRTEngineCacheResource::GetEngineContext(
const std::vector<TensorShape>& input_shapes) {
EngineContext* engine_context = nullptr;
int64 min_matched_batch_size = kint64max;
for (const auto& pair : cache_) {
const std::vector<TensorShape>& cached_input_shapes = pair.first;
if (input_shapes.size() != cached_input_shapes.size()) {
LOG(ERROR) << "Input shape list size mismatch"
<< ", cached size: " << cached_input_shapes.size()
<< " vs. input size: " << input_shapes.size();
}
if (AreShapesCompatible(input_shapes, cached_input_shapes)) {
const int cached_batch_size = cached_input_shapes[0].dim_size(0);
if (min_matched_batch_size > cached_batch_size) {
min_matched_batch_size = cached_batch_size;
engine_context = pair.second.get();
}
}
}
return engine_context;
}
EngineContext* TRTEngineCacheResource::GetEngineContext(const int profile_id) {
if (profiles_.NeedProfiles() && profile_id >= profiles_.GetNumProfiles()) {
LOG(ERROR) << "Out of range: profile_id " << profile_id
<< " is larger than number of profiles "
<< profiles_.GetNumProfiles();
return nullptr;
}
if (cache_.size() > 1) {
LOG(ERROR) << "Cache is expected to have at most "
<< "1 engine in explicit batch mode where profiles are used.";
return nullptr;
}
if (cache_.size() == 0) {
return nullptr;
}
return cache_.begin()->second.get();
}
}
}
#endif | #include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace tensorrt {
TEST(LRUCacheTest, Basic) {
LRUCache<int, int, std::hash<int>> cache;
cache.reserve(2);
cache.emplace(10, 100);
EXPECT_EQ(cache.size(), 1);
EXPECT_EQ(cache.count(10), 1);
EXPECT_EQ(cache.at(10), 100);
EXPECT_EQ(cache.count(100), 0);
cache.emplace(20, 200);
EXPECT_EQ(cache.size(), 2);
EXPECT_EQ(cache.count(10), 1);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.at(10), 100);
EXPECT_EQ(cache.at(20), 200);
EXPECT_EQ(cache.count(100), 0);
EXPECT_EQ(cache.count(200), 0);
cache.emplace(30, 300);
EXPECT_EQ(cache.count(10), 0);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.count(30), 1);
cache.at(20);
cache.emplace(40, 400);
EXPECT_EQ(cache.count(10), 0);
EXPECT_EQ(cache.count(20), 1);
EXPECT_EQ(cache.count(30), 0);
EXPECT_EQ(cache.count(40), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
22370324-516e-4f49-8766-2c999df7dcb1 | cpp | google/tensorstore | string_like | tensorstore/internal/string_like.h | tensorstore/internal/string_like_test.cc | #ifndef TENSORSTORE_INTERNAL_STRING_LIKE_H_
#define TENSORSTORE_INTERNAL_STRING_LIKE_H_
#include <cassert>
#include <cstddef>
#include <string>
#include <string_view>
#include "absl/base/optimization.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
template <typename T>
constexpr inline bool IsStringLike = false;
template <>
constexpr inline bool IsStringLike<std::string_view> = true;
template <>
constexpr inline bool IsStringLike<std::string> = true;
template <>
constexpr inline bool IsStringLike<const char*> = true;
class StringLikeSpan {
public:
StringLikeSpan() = default;
StringLikeSpan(tensorstore::span<const char* const> c_strings)
: c_strings_(c_strings.data()), size_and_tag_(c_strings.size() << 2) {}
StringLikeSpan(tensorstore::span<const std::string> strings)
: strings_(strings.data()), size_and_tag_((strings.size() << 2) | 1) {}
StringLikeSpan(tensorstore::span<const std::string_view> string_views)
: string_views_(string_views.data()),
size_and_tag_((string_views.size() << 2) | 2) {}
std::string_view operator[](ptrdiff_t i) const {
assert(i >= 0 && i < size());
switch (size_and_tag_ & 3) {
case 0:
return c_strings_[i];
case 1:
return strings_[i];
case 2:
return string_views_[i];
default:
ABSL_UNREACHABLE();
}
}
ptrdiff_t size() const { return size_and_tag_ >> 2; }
private:
union {
const char* const* c_strings_;
const std::string* strings_;
const std::string_view* string_views_;
};
ptrdiff_t size_and_tag_ = 0;
};
}
}
#endif | #include "tensorstore/internal/string_like.h"
#include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::StringLikeSpan;
TEST(StringLikeSpan, Default) {
StringLikeSpan x;
EXPECT_EQ(0, x.size());
}
TEST(StringLikeSpan, CStrings) {
std::vector<const char*> c_strings{"a", "b", "c"};
StringLikeSpan x(c_strings);
EXPECT_EQ(3, x.size());
EXPECT_EQ("a", x[0]);
EXPECT_EQ("b", x[1]);
EXPECT_EQ("c", x[2]);
}
TEST(StringLikeSpan, StdStrings) {
std::vector<std::string> std_strings{"a", "b", "c"};
StringLikeSpan x(std_strings);
EXPECT_EQ(3, x.size());
EXPECT_EQ("a", x[0]);
EXPECT_EQ("b", x[1]);
EXPECT_EQ("c", x[2]);
}
TEST(StringLikeSpan, StringViews) {
std::vector<std::string_view> string_views{"a", "b", "c"};
StringLikeSpan x(string_views);
EXPECT_EQ(3, x.size());
EXPECT_EQ("a", x[0]);
EXPECT_EQ("b", x[1]);
EXPECT_EQ("c", x[2]);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/string_like.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/string_like_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
1ce4f09f-ee50-4e20-8d02-024686a491c7 | cpp | google/tensorstore | schema | tensorstore/proto/schema.cc | tensorstore/proto/schema_test.cc | #include "tensorstore/proto/schema.h"
#include <stddef.h>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "tensorstore/array.h"
#include "tensorstore/chunk_layout.h"
#include "tensorstore/codec_spec.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dimension_units.h"
#include "tensorstore/index_space/index_domain.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/proto/array.h"
#include "tensorstore/proto/index_transform.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/rank.h"
#include "tensorstore/schema.h"
#include "tensorstore/serialization/batch.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/strided_layout.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/unit.h"
namespace tensorstore {
namespace {
void EncodeToProto(::tensorstore::proto::OptionalUnit& proto,
const std::optional<Unit>& unit) {
if (unit.has_value()) {
proto.set_base_unit(unit->base_unit);
proto.set_multiplier(unit->multiplier);
}
}
bool IsValidGridView(ChunkLayout::GridView view) {
return (view.aspect_ratio().valid() || view.elements().valid() ||
view.shape().valid());
}
void EncodeToProto(::tensorstore::proto::ChunkLayout& proto,
const ChunkLayout& chunk_layout) {
auto encode_grid =
[](::tensorstore::proto::ChunkLayout::Grid& proto,
ChunkLayout::GridView grid_view) {
{
DimensionSet soft_constraints(false);
auto shape = grid_view.shape();
for (size_t i = 0; i < shape.size(); i++) {
proto.add_shape(shape[i]);
soft_constraints[i] = !shape.hard_constraint[i];
}
if (soft_constraints) {
proto.set_shape_soft_constraint_bitset(soft_constraints.to_uint());
}
}
{
DimensionSet soft_constraints(false);
auto aspect_ratio = grid_view.aspect_ratio();
for (size_t i = 0; i < aspect_ratio.size(); i++) {
proto.add_aspect_ratio(aspect_ratio[i]);
soft_constraints[i] = !aspect_ratio.hard_constraint[i];
}
if (soft_constraints) {
proto.set_aspect_ratio_soft_constraint_bitset(
soft_constraints.to_uint());
}
}
if (grid_view.elements().valid()) {
proto.set_elements(grid_view.elements().value);
if (!grid_view.elements().hard_constraint) {
proto.set_elements_soft_constraint(true);
}
}
};
{
DimensionSet grid_origin_soft_constraint_bitset(false);
auto grid_origin = chunk_layout.grid_origin();
for (size_t i = 0; i < grid_origin.size(); i++) {
proto.add_grid_origin(grid_origin[i]);
grid_origin_soft_constraint_bitset[i] = !grid_origin.hard_constraint[i];
}
if (grid_origin_soft_constraint_bitset) {
proto.set_grid_origin_soft_constraint_bitset(
grid_origin_soft_constraint_bitset.to_uint());
}
}
{
auto inner_order = chunk_layout.inner_order();
if (!inner_order.hard_constraint) {
proto.set_inner_order_soft_constraint(true);
}
for (size_t i = 0; i < inner_order.size(); i++) {
proto.add_inner_order(inner_order[i]);
}
}
if (IsValidGridView(chunk_layout.read_chunk())) {
encode_grid(*proto.mutable_read_chunk(), chunk_layout.read_chunk());
}
if (IsValidGridView(chunk_layout.write_chunk())) {
encode_grid(*proto.mutable_write_chunk(), chunk_layout.write_chunk());
}
if (IsValidGridView(chunk_layout.codec_chunk())) {
encode_grid(*proto.mutable_codec_chunk(), chunk_layout.codec_chunk());
}
}
Result<ChunkLayout> ParseChunkLayoutFromProto(
const ::tensorstore::proto::ChunkLayout& proto) {
auto parse_grid = [](const ::tensorstore::proto::ChunkLayout::Grid& proto)
-> Result<ChunkLayout::Grid> {
ChunkLayout::Grid grid;
if (proto.shape_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.shape_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::Shape(
tensorstore::span(proto.shape()), ~soft_constraints)));
}
if (proto.aspect_ratio_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.aspect_ratio_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::AspectRatio(
tensorstore::span(proto.aspect_ratio()), ~soft_constraints)));
}
if (proto.has_elements()) {
TENSORSTORE_RETURN_IF_ERROR(grid.Set(ChunkLayout::Grid::Elements(
proto.elements(), !proto.elements_soft_constraint())));
}
return grid;
};
ChunkLayout chunk_layout;
if (proto.grid_origin_size() > 0) {
DimensionSet soft_constraints =
DimensionSet::FromUint(proto.grid_origin_soft_constraint_bitset());
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(ChunkLayout::GridOrigin(
tensorstore::span(proto.grid_origin()), ~soft_constraints)));
}
if (proto.inner_order_size() > 0) {
std::vector<DimensionIndex> inner_order(proto.inner_order().begin(),
proto.inner_order().end());
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(ChunkLayout::InnerOrder(
inner_order, !proto.inner_order_soft_constraint())));
}
if (proto.has_read_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.read_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kRead>(grid)));
}
if (proto.has_write_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.write_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kWrite>(grid)));
}
if (proto.has_codec_chunk()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto grid, parse_grid(proto.codec_chunk()));
TENSORSTORE_RETURN_IF_ERROR(chunk_layout.Set(
ChunkLayout::GridViewFor<ChunkLayout::Usage::kCodec>(grid)));
}
return chunk_layout;
}
}
void EncodeToProto(::tensorstore::proto::Schema& proto,
const Schema& schema) {
if (DimensionIndex rank = schema.rank(); rank != dynamic_rank) {
proto.set_rank(rank);
}
if (DataType dtype = schema.dtype(); dtype.valid()) {
proto.set_dtype(std::string(dtype.name()));
}
if (IndexDomain<> domain = schema.domain(); domain.valid()) {
EncodeToProto(*proto.mutable_domain(), domain);
}
EncodeToProto(*proto.mutable_chunk_layout(), schema.chunk_layout());
if (Schema::FillValue fill_value = schema.fill_value(); fill_value.valid()) {
EncodeToProto(*proto.mutable_fill_value(), fill_value);
}
if (CodecSpec codec = schema.codec(); codec.valid()) {
auto serialized = tensorstore::serialization::EncodeBatch(schema.codec());
proto.set_codec(serialized.value());
}
if (Schema::DimensionUnits dimension_units = schema.dimension_units();
dimension_units.valid()) {
for (const auto& unit : dimension_units) {
EncodeToProto(*proto.add_dimension_unit(), unit);
}
}
}
Result<Schema> ParseSchemaFromProto(const ::tensorstore::proto::Schema& proto) {
Schema schema;
if (proto.has_rank()) {
TENSORSTORE_RETURN_IF_ERROR(schema.Set(RankConstraint(proto.rank())));
}
if (proto.has_dtype() && !proto.dtype().empty()) {
auto dtype = GetDataType(proto.dtype());
if (!dtype.valid()) {
return absl::InvalidArgumentError("dtype is not valid");
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(dtype));
}
if (proto.has_domain()) {
TENSORSTORE_ASSIGN_OR_RETURN(auto domain,
ParseIndexDomainFromProto(proto.domain()))
TENSORSTORE_RETURN_IF_ERROR(schema.Set(domain));
}
if (proto.has_chunk_layout()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto chunk_layout, ParseChunkLayoutFromProto(proto.chunk_layout()))
TENSORSTORE_RETURN_IF_ERROR(schema.Set(chunk_layout));
}
if (proto.has_codec()) {
CodecSpec codec;
TENSORSTORE_RETURN_IF_ERROR(
tensorstore::serialization::DecodeBatch(proto.codec(), codec));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(codec));
}
if (proto.has_fill_value()) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto array, ParseArrayFromProto(proto.fill_value(), zero_origin));
TENSORSTORE_ASSIGN_OR_RETURN(auto fill_value,
ArrayOriginCast<zero_origin>(array));
TENSORSTORE_RETURN_IF_ERROR(schema.Set(Schema::FillValue(fill_value)));
}
if (!proto.dimension_unit().empty()) {
DimensionUnitsVector dimensions;
for (size_t i = 0; i < proto.dimension_unit_size(); i++) {
auto& unit = proto.dimension_unit(i);
if (unit.has_multiplier() || !unit.base_unit().empty()) {
dimensions.emplace_back(std::in_place, unit.multiplier(),
unit.base_unit());
} else {
dimensions.emplace_back(std::nullopt);
}
}
TENSORSTORE_RETURN_IF_ERROR(schema.Set(Schema::DimensionUnits(dimensions)));
}
return schema;
}
} | #include "tensorstore/proto/schema.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/proto/protobuf_matchers.h"
#include "tensorstore/proto/schema.pb.h"
#include "tensorstore/schema.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::protobuf_matchers::EqualsProto;
using ::tensorstore::MatchesStatus;
using ::tensorstore::ParseSchemaFromProto;
using ::tensorstore::Schema;
template <typename Proto>
Proto ParseProtoOrDie(const std::string& asciipb) {
return protobuf_matchers::internal::MakePartialProtoFromAscii<Proto>(asciipb);
}
auto DoEncode(const Schema& schema) {
::tensorstore::proto::Schema proto;
::tensorstore::EncodeToProto(proto, schema);
return proto;
}
TEST(SchemaProtoTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto schema,
Schema::FromJson(
{
{"rank", 3},
{"dtype", "uint8"},
{"domain",
{{"labels", {"x", "y", "z"}},
{"inclusive_min", {1, 2, 3}},
{"exclusive_max", {5, 6, 7}}}},
{"chunk_layout",
{
{"codec_chunk",
{
{"elements_soft_constraint", 20},
{"aspect_ratio", {1, 2, 3}},
{"shape", {nullptr, 4, 5}},
}},
{"read_chunk",
{
{"elements", 30},
{"aspect_ratio", {4, 5, 6}},
{"shape_soft_constraint", {6, nullptr, 7}},
}},
{"write_chunk",
{
{"elements", 40},
{"aspect_ratio_soft_constraint", {7, 8, 9}},
{"shape", {8, 9, nullptr}},
}},
{"grid_origin", {nullptr, nullptr, 11}},
{"inner_order_soft_constraint", {2, 0, 1}},
}},
{"fill_value", 5},
{"dimension_units", {{4, "nm"}, nullptr, {30, "nm"}}},
}));
auto proto = ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: 3
dtype: "uint8"
domain {
origin: [ 1, 2, 3 ]
shape: [ 4, 4, 4 ]
labels: [ "x", "y", "z" ]
}
chunk_layout {
grid_origin: [ -9223372036854775808, -9223372036854775808, 11 ]
grid_origin_soft_constraint_bitset: 3
inner_order: [ 2, 0, 1 ]
inner_order_soft_constraint: true
write_chunk {
aspect_ratio: [ 7, 8, 9 ]
shape: [ 8, 9, 0 ]
elements: 40
aspect_ratio_soft_constraint_bitset: 7
shape_soft_constraint_bitset: 4
}
read_chunk {
shape: [ 6, 0, 7 ]
elements: 30
aspect_ratio: [ 4, 5, 6 ]
shape_soft_constraint_bitset: 7
}
codec_chunk {
elements: 20
shape: [ 0, 4, 5 ]
aspect_ratio: [ 1, 2, 3 ]
elements_soft_constraint: true
shape_soft_constraint_bitset: 1
}
}
fill_value { dtype: "uint8" void_data: "\x05" }
dimension_unit { multiplier: 4 base_unit: "nm" }
dimension_unit {}
dimension_unit { multiplier: 30 base_unit: "nm" }
)pb");
EXPECT_THAT(DoEncode(schema), EqualsProto(proto));
EXPECT_THAT(ParseSchemaFromProto(proto), testing::Eq(schema));
}
TEST(SchemaProtoTest, Empty) {
tensorstore::Schema schema;
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
)pb")),
testing::Eq(schema));
}
TEST(SchemaProtoTest, RankFromDimensionUnit) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto schema,
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: 1
dimension_unit {}
)pb")));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
dimension_unit {}
)pb")),
testing::Eq(schema));
}
TEST(SchemaProtoTest, Errors) {
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
rank: -2
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
dtype: "foo"
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(
ParseSchemaFromProto(ParseProtoOrDie<::tensorstore::proto::Schema>(R"pb(
codec: "12345"
)pb")),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/schema.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/proto/schema_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
bc5f6db5-7e35-4909-8459-f3482866bc8a | cpp | tensorflow/tensorflow | or | tensorflow/lite/experimental/shlo/ops/or.cc | tensorflow/lite/experimental/shlo/ops/or_test.cc | #include "tensorflow/lite/experimental/shlo/ops/or.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct Or : std::bit_or<void> {};
template <>
struct Or<DataType::kI1> : std::logical_or<void> {};
OrOp Create(OrOp::Attributes) { return {}; }
absl::Status Prepare(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("or"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("or"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("or"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(OrOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
Or<DataType::kSI32> or_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
or_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
Or<DataType::kI1> or_func;
detail::EvaluateNoQuantization<DataType::kI1>(or_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.or: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/or.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<OrOp> {
static std::string Get() { return "Or"; }
};
template <DataType>
struct Or : std::bit_or<void> {};
template <>
struct Or<DataType::kI1> : std::logical_or<void> {};
template <>
struct SupportedOpDataType<OrOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(Or, BinaryElementwiseOpShapePropagationTest,
OrOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
OrOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
Or, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<OrOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(Or, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct OrTest : ::testing::Test {};
TYPED_TEST_SUITE(OrTest, SupportedTypes, TestParamNames);
TYPED_TEST(OrTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
Or<TypeParam::kStorage>());
auto op = Create(OrOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/or.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/or_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
382f3e30-b23c-401e-9945-f8020eb0786a | cpp | tensorflow/tensorflow | hlo_computation | third_party/xla/xla/hlo/ir/hlo_computation.cc | third_party/xla/xla/service/hlo_computation_test.cc | #include "xla/hlo/ir/hlo_computation.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <ostream>
#include <queue>
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/map_util.h"
#include "xla/printer.h"
#include "xla/service/mapped_ptr_container_sorter.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/gtl/iterator_range.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace xla {
using absl::StrCat;
enum class VisitState { kNew = 0, kVisiting = 1, kVisited = 2 };
static std::ostream& operator<<(std::ostream& os, const VisitState& state) {
switch (state) {
case VisitState::kNew:
os << "new";
break;
case VisitState::kVisiting:
os << "visiting";
break;
case VisitState::kVisited:
os << "visited";
break;
}
return os;
}
class HloComputation::VisitMap {
public:
VisitMap() = default;
explicit VisitMap(int capacity) : size_(capacity) {
int num_words = (capacity + 31) / 32;
bits_.resize(num_words);
bit_ptr_ = bits_.empty() ? nullptr : bits_.data();
}
using Handle = uint32_t;
VisitState GetState(Handle h) const {
DCHECK_LT(h, size_);
uint32_t word = (h / 32);
uint32_t shift = (h % 32) << 1;
return static_cast<VisitState>((bit_ptr_[word] >> shift) & 0x3);
}
void SetState(Handle h, VisitState new_state) {
DCHECK_LT(h, size_);
uint32_t word = (h / 32);
uint32_t shift = (h % 32) << 1;
uint64_t mask = ~(3ull << shift);
uint64_t val = static_cast<uint64_t>(new_state);
bit_ptr_[word] = (bit_ptr_[word] & mask) | (val << shift);
}
private:
absl::InlinedVector<uint64_t, 1> bits_;
uint64_t* bit_ptr_ = nullptr;
int size_ = 0;
};
std::unique_ptr<HloComputation> HloComputation::Builder::Build(
HloInstruction* root_instruction) {
int parameter_count = 0;
for (auto& instruction : instructions_) {
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_count++;
}
}
HloInstruction* root =
root_instruction ? root_instruction : last_added_instruction();
CHECK_NE(nullptr, root);
return absl::WrapUnique(
new HloComputation(name_, parameter_count, &instructions_, root));
}
HloComputation::HloComputation(
const std::string& name, int parameter_count,
std::vector<std::unique_ptr<HloInstruction>>* instructions,
HloInstruction* root_instruction)
: unique_id_(-1),
root_instruction_(root_instruction),
instruction_count_(0),
name_(NameUniquer::GetSanitizedName(name)) {
param_instructions_.resize(parameter_count, nullptr);
bool root_found = false;
for (auto& instruction : *instructions) {
if (instruction->opcode() == HloOpcode::kParameter) {
int64_t param_no = instruction->parameter_number();
CHECK(param_no >= 0 && param_no < parameter_count)
<< "\nERROR: invalid parameter number. Expected [0, "
<< parameter_count << "), got " << param_no;
CHECK(param_instructions_[param_no] == nullptr)
<< "\nERROR: parameter number " << param_no
<< " already allocated in this computation";
param_instructions_[param_no] = instruction.get();
}
root_found |= instruction.get() == root_instruction_;
AddInstructionInternal(std::move(instruction));
}
CHECK(root_found)
<< "\nERROR: root instruction is not present in computation.";
root_instruction_->MarkAsRoot();
}
HloComputation::~HloComputation() {
if (FusionInstruction() != nullptr) {
CHECK(FusionInstruction()->fused_instructions_computation() == this);
FusionInstruction()->ClearCalledComputations();
}
if (IsAsyncComputation()) {
CHECK(async_start_->async_wrapped_computation() == this);
async_start_->ClearCalledComputations();
}
Cleanup();
for (const auto& i : instructions_) {
delete i.inst();
}
}
void HloComputation::SetInstruction(HloInstruction* instruction,
InstructionType type) {
static_assert(alignof(HloInstruction) == kInstructionTypeMask + 1,
"HloInstruction should be aligned as a QWORD");
DCHECK(type != InstructionType::kUnset)
<< "Set instruction must be called with a valid type, not kUnset.";
DCHECK(instruction_type() == InstructionType::kUnset ||
instruction_type() == type)
<< "Unexpected instruction type. Current type is "
<< static_cast<int>(instruction_type()) << " and it cannot be reset to "
<< static_cast<int>(type);
if (instruction == nullptr) {
type = instruction_type();
}
instruction_and_type_ =
reinterpret_cast<uintptr_t>(instruction) | static_cast<uintptr_t>(type);
}
HloInstruction* HloComputation::AddInstruction(
std::unique_ptr<HloInstruction> instruction, absl::string_view new_name) {
CHECK(instruction->opcode() != HloOpcode::kParameter)
<< "Parameter instructions cannot be added to a computation after "
<< "it has been built";
if (!new_name.empty()) {
instruction->SetAndSanitizeName(new_name);
}
return AddInstructionInternal(std::move(instruction));
}
HloInstruction* HloComputation::AddInstruction(
std::unique_ptr<HloInstruction> instruction, const OpMetadata* metadata) {
if (metadata != nullptr) {
instruction->set_metadata(*metadata);
}
return AddInstruction(std::move(instruction));
}
HloInstruction* HloComputation::AddInstruction(
std::unique_ptr<HloInstruction> instruction, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
if (metadata != nullptr) {
instruction->set_metadata(*metadata);
}
if (frontend_attributes != nullptr) {
instruction->set_frontend_attributes(*frontend_attributes);
}
return AddInstruction(std::move(instruction));
}
HloInstruction* HloComputation::AddInstructionInternal(
std::unique_ptr<HloInstruction> instruction) {
if (parent() != nullptr) {
instruction->UniquifyName(&parent()->instruction_name_uniquer());
instruction->SetUniqueId(parent()->NewUniqueInstructionId());
}
instruction->set_parent(this);
HloInstruction* pinst = instruction.release();
HloInstructionInfo info;
info.opcode_ = pinst->opcode();
info.inst_ = pinst;
VLOG(2) << "Adding instruction " << pinst << " " << pinst->name()
<< " from computation " << name() << " opcode " << info.opcode();
uint32_t index = instructions_.size();
instruction_count_++;
pinst->index_in_parent_ = index;
instructions_.push_back(info);
return pinst;
}
HloInstruction* HloComputation::AddParameter(
std::unique_ptr<HloInstruction> instruction) {
CHECK(instruction->opcode() == HloOpcode::kParameter);
CHECK(!IsFusionComputation() ||
FusionInstruction()->operand_count() == param_instructions_.size());
instruction->set_parent(this);
param_instructions_.push_back(instruction.get());
AddInstructionInternal(std::move(instruction));
return instructions_.back().get();
}
HloInstruction* HloComputation::AddEntryComputationParameter(
std::unique_ptr<HloInstruction> instruction) {
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
CHECK_EQ(instruction->parameter_number(), num_parameters());
CHECK(parent()->entry_computation() == this);
HloModuleConfig config = parent()->config();
config.mutable_entry_computation_layout()->add_parameter_layout(
ShapeLayout(instruction->shape()));
parent()->set_config(config);
instruction->set_parent(this);
param_instructions_.push_back(instruction.get());
AddInstructionInternal(std::move(instruction));
return instructions_.back().get();
}
absl::Status HloComputation::ReplaceEntryComputationParameter(
int64_t param_no, HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> instruction) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
CHECK_EQ(instruction->opcode(), HloOpcode::kParameter);
CHECK(parent()->entry_computation() == this);
HloModuleConfig config = parent()->config();
*config.mutable_entry_computation_layout()->mutable_parameter_layout(
param_no) = ShapeLayout(instruction->shape());
parent()->set_config(config);
instruction->set_parent(this);
param_instructions_[param_no] = instruction.get();
AddInstructionInternal(std::move(instruction));
return ForceRemoveInstruction(old_instruction);
}
absl::Status HloComputation::RemoveParameter(int64_t param_no) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
HloInstruction* param_instruction = param_instructions_[param_no];
auto param_instruction_iterator = param_instructions_.begin() + param_no;
param_instructions_.erase(param_instruction_iterator);
TF_RETURN_IF_ERROR(ForceRemoveInstruction(param_instruction));
while (param_no < param_instructions_.size()) {
param_instruction = param_instructions_[param_no];
HloInstruction* new_instr =
AddInstructionInternal(HloInstruction::CreateParameter(
param_no, param_instruction->shape(), StrCat("param_", param_no)));
TF_RETURN_IF_ERROR(param_instruction->ReplaceAllUsesWith(new_instr));
param_instructions_[param_no] = new_instr;
TF_RETURN_IF_ERROR(ForceRemoveInstruction(param_instruction));
param_no++;
}
return absl::OkStatus();
}
HloInstruction* HloComputation::ReplaceParameter(
int64_t param_no, std::unique_ptr<HloInstruction> instruction) {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, param_instructions_.size());
CHECK(instruction->opcode() == HloOpcode::kParameter);
CHECK(!IsFusionComputation() ||
FusionInstruction()->operand_count() == param_instructions_.size());
instruction->set_parent(this);
HloInstruction* new_instruction =
AddInstructionInternal(std::move(instruction));
HloInstruction* old_instruction = param_instructions_[param_no];
TF_CHECK_OK(
old_instruction->ReplaceAllUsesWithDifferentShape(new_instruction));
param_instructions_[param_no] = new_instruction;
TF_CHECK_OK(ForceRemoveInstruction(old_instruction));
return new_instruction;
}
absl::Status HloComputation::RemoveUnusedParametersFromFusedComputation() {
return RemoveUnusedParametersImpl(false);
}
absl::Status HloComputation::RemoveUnusedParametersFromAnyComputation() {
return RemoveUnusedParametersImpl(true);
}
absl::Status HloComputation::RemoveUnusedParametersImpl(bool allow_non_fusion) {
CHECK(allow_non_fusion || IsFusionComputation());
int64_t removed = 0;
for (int64_t i = 0; i < param_instructions_.size(); ++i) {
HloInstruction* param_instruction = param_instructions_[i];
if (param_instruction->IsDead()) {
TF_RETURN_IF_ERROR(
RemoveInstructionImpl(param_instruction, allow_non_fusion));
++removed;
continue;
}
if (removed > 0) {
const int64_t param_no = i - removed;
HloInstruction* new_instr = AddInstructionInternal(
HloInstruction::CreateParameter(param_no, param_instruction->shape(),
StrCat("param_", param_no)));
TF_RETURN_IF_ERROR(param_instruction->ReplaceAllUsesWith(new_instr));
param_instructions_[param_no] = new_instr;
TF_RETURN_IF_ERROR(
RemoveInstructionImpl(param_instruction, allow_non_fusion));
}
}
param_instructions_.resize(param_instructions_.size() - removed);
return absl::OkStatus();
}
bool HloComputation::IsSafelyRemovable(const HloInstruction* instruction,
bool ignore_control_dependency) {
if (!ignore_control_dependency && instruction->HasControlDependencies()) {
return false;
}
if (instruction->opcode() == HloOpcode::kParameter &&
!IsFusionComputation()) {
return false;
}
return true;
}
bool HloComputation::HasSideEffect() const {
for (auto* instruction : instructions()) {
if (instruction->HasSideEffect()) {
return true;
}
}
return false;
}
bool HloComputation::IsMarkedAsDead(const HloInstruction* inst) {
return inst->IsMarkedAsDead();
}
absl::Status HloComputation::RemoveInstructionAndUnusedOperands(
HloInstruction* instruction,
std::optional<absl::FunctionRef<void(HloInstruction*)>> cleanup,
bool ignore_control_dependencies) {
TF_RET_CHECK(root_instruction() != instruction);
TF_RET_CHECK(instruction->IsDead());
TF_RET_CHECK(IsSafelyRemovable(instruction, ignore_control_dependencies))
<< "Cannot remove instruction: " << instruction->ToString();
absl::flat_hash_set<HloInstruction*> removed;
std::queue<HloInstruction*> worklist;
worklist.push(instruction);
std::vector<HloInstruction*> parameters_to_be_removed;
while (!worklist.empty()) {
HloInstruction* item = worklist.front();
worklist.pop();
if (removed.contains(item) || !item->IsDead() ||
!IsSafelyRemovable(item, ignore_control_dependencies) ||
(item->HasSideEffect() && item != instruction)) {
continue;
}
if (ignore_control_dependencies) {
TF_RETURN_IF_ERROR(item->SafelyDropAllControlDependencies());
} else if (item->HasControlDependencies()) {
continue;
}
for (int i = 0; i < item->operand_count(); ++i) {
worklist.push(item->mutable_operand(i));
}
if (cleanup != std::nullopt) {
(*cleanup)(item);
}
if (item->opcode() == HloOpcode::kParameter) {
parameters_to_be_removed.push_back(item);
} else {
TF_RETURN_IF_ERROR(RemoveInstruction(item));
}
removed.insert(item);
}
std::sort(parameters_to_be_removed.begin(), parameters_to_be_removed.end(),
[](HloInstruction* a, HloInstruction* b) {
return a->parameter_number() > b->parameter_number();
});
for (HloInstruction* param : parameters_to_be_removed) {
int64_t parameter_number = param->parameter_number();
TF_RETURN_IF_ERROR(RemoveParameter(parameter_number));
if (FusionInstruction() != nullptr) {
auto operand = FusionInstruction()->mutable_operand(parameter_number);
FusionInstruction()->RemoveOperandAt(parameter_number);
FusionInstruction()->DetachFrom(operand);
if (operand->IsDead() && operand->parent()->IsSafelyRemovable(
operand, ignore_control_dependencies)) {
TF_RETURN_IF_ERROR(
operand->parent()->RemoveInstructionAndUnusedOperands(
operand, cleanup, ignore_control_dependencies));
}
}
}
return absl::OkStatus();
}
absl::Status HloComputation::RemoveInstruction(HloInstruction* instruction) {
return RemoveInstructionImpl(instruction, false);
}
absl::Status HloComputation::ForceRemoveInstruction(
HloInstruction* instruction) {
return RemoveInstructionImpl(instruction, true);
}
absl::Status HloComputation::RemoveInstructionImpl(HloInstruction* instruction,
bool ignore_safety_check) {
VLOG(2) << "Removing instruction " << instruction << " "
<< instruction->name() << " from computation " << name();
TF_RET_CHECK(ignore_safety_check || IsSafelyRemovable(instruction))
<< "cannot remove instruction: " << instruction->ToString();
TF_RET_CHECK(instruction->IsDead()) << "instruction " << instruction->name()
<< " is live and cannot be removed";
TF_RET_CHECK(instruction->control_predecessors().empty())
<< "instruction " << instruction->name()
<< " has control predecessors and cannot be removed";
TF_RET_CHECK(instruction->control_successors().empty())
<< "instruction " << instruction->name()
<< " has control successors and cannot be removed";
HloInstructionInfo* info = &instructions_[instruction->index_in_parent_];
DCHECK_EQ(info->inst(), instruction);
info->inst()->set_parent(nullptr);
to_be_deleted_.push_back(info->inst());
to_be_deleted_.back()->DetachFromOperandsAndUsers();
to_be_deleted_.back()->RemoveAllOperands();
to_be_deleted_.back()->ClearCalledComputations();
to_be_deleted_.back()->MarkAsDead();
info->inst_ =
nullptr;
instruction->index_in_parent_ = ~0u;
instruction_count_--;
DCHECK_EQ(instructions_.size() - to_be_deleted_.size(), instruction_count())
<< "instructions_.size(): " << instructions_.size()
<< ", to_be_deleted_.size(): " << to_be_deleted_.size();
return absl::OkStatus();
}
void HloComputation::Cleanup() {
if (to_be_deleted_.empty()) return;
DCHECK_GT(instruction_count(), 0);
auto is_marked_for_removal = [](const HloInstructionInfo& info) {
return info.inst() == nullptr;
};
auto marked_it = absl::c_find_if(instructions_, is_marked_for_removal);
DCHECK(marked_it < instructions_.end());
for (auto it = marked_it + 1; it < instructions_.end(); ++it) {
if (is_marked_for_removal(*it)) continue;
HloInstruction* unmarked_instruction = it->inst();
unmarked_instruction->index_in_parent_ =
std::distance(instructions_.begin(), marked_it);
*marked_it++ = std::move(*it);
}
DCHECK(marked_it < instructions_.end());
DCHECK_EQ(std::distance(marked_it, instructions_.end()),
to_be_deleted_.size());
DCHECK_EQ(instructions_.size() - to_be_deleted_.size(), instruction_count())
<< "instructions_.size(): " << instructions_.size()
<< ", to_be_deleted_.size(): " << to_be_deleted_.size();
for (HloInstruction* marked_instruction : to_be_deleted_) {
delete marked_instruction;
}
to_be_deleted_.clear();
instructions_.resize(instruction_count());
}
void HloComputation::set_root_instruction(HloInstruction* new_root_instruction,
bool accept_different_shape) {
if (!IsFusionComputation() && !accept_different_shape) {
CHECK(ShapeUtil::Compatible(new_root_instruction->shape(),
root_instruction_->shape()))
<< new_root_instruction->shape() << " is incompatible with "
<< root_instruction_->shape();
}
bool root_found = false;
for (auto& instruction : instructions_) {
if (new_root_instruction == instruction.get()) {
root_found = true;
break;
}
}
DCHECK(root_found);
if (parent() && parent()->has_entry_computation() &&
parent()->entry_computation() == this) {
if (!Shape::Equal().IgnoreLayout()(new_root_instruction->shape(),
root_instruction_->shape())) {
parent()->input_output_alias_config() =
HloInputOutputAliasConfig(new_root_instruction->shape());
}
}
root_instruction_->MarkAsNonRoot();
new_root_instruction->MarkAsRoot();
root_instruction_ = new_root_instruction;
}
void HloComputation::ComputeInstructionPostOrder(
HloInstruction* root, const ChannelDependencies& channel_dependencies,
VisitMap& visited, std::vector<HloInstruction*>& post_order,
std::vector<HloInstruction*>* dfs_stack_scratch) const {
ForEachInstructionPostOrderImpl(
[&post_order](HloInstruction* hlo) { post_order.push_back(hlo); }, root,
channel_dependencies, visited, dfs_stack_scratch);
}
void HloComputation::ForEachInstructionPostOrderImpl(
absl::FunctionRef<void(HloInstruction*)> func, HloInstruction* root,
const ChannelDependencies& channel_dependencies, VisitMap& visited,
std::vector<HloInstruction*>* dfs_stack_scratch) const {
bool has_channel_dependencies = !channel_dependencies.empty();
auto* dfs_stack = dfs_stack_scratch;
dfs_stack->clear();
auto dfs_stack_push = [&](HloInstruction* instr) {
VisitState state = visited.GetState(instr->index_in_parent_);
if (state != VisitState::kVisited) dfs_stack->push_back(instr);
};
dfs_stack_push(root);
while (!dfs_stack->empty()) {
HloInstruction* current = dfs_stack->back();
DCHECK_EQ(current->parent(), this)
<< "Instruction " << current->name()
<< " is not in the current computation (" << name() << ").";
VisitMap::Handle h = current->index_in_parent_;
VisitState state = visited.GetState(h);
if (state == VisitState::kNew) {
visited.SetState(h, VisitState::kVisiting);
} else {
dfs_stack->pop_back();
if (state != VisitState::kVisited) {
visited.SetState(h, VisitState::kVisited);
func(current);
}
continue;
}
if (has_channel_dependencies && current != root) {
auto it = channel_dependencies.find(current);
if (it != channel_dependencies.end()) {
absl::c_for_each(it->second, dfs_stack_push);
}
}
const HloInstruction::InstructionVector& operands = current->operands();
absl::c_for_each(tsl::gtl::make_range(operands.rbegin(), operands.rend()),
dfs_stack_push);
absl::c_for_each(current->control_predecessors(), dfs_stack_push);
}
}
HloComputation::ChannelDependencies HloComputation::ComputeChannelDependencies()
const {
if (parent() && parent()->config().has_static_device_assignment() &&
(parent()->config().static_device_assignment().computation_count() == 1 ||
parent()->config().use_spmd_partitioning())) {
return {};
}
using Instructions = absl::InlinedVector<HloInstruction*, 1>;
absl::flat_hash_map<int64_t, Instructions> channel_groups;
ChannelDependencies dependencies;
for (const auto& inst : instructions_with_info()) {
switch (inst.opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllGather:
case HloOpcode::kAllToAll:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kReduceScatter: {
HloInstruction* instruction = inst.inst();
std::optional<int64_t> channel_id = instruction->channel_id();
if (channel_id) {
Instructions& group = channel_groups[*channel_id];
for (const HloInstruction* group_inst : group) {
dependencies[group_inst].push_back(instruction);
}
dependencies[instruction] = group;
group.push_back(instruction);
}
break;
}
default:
break;
}
}
return dependencies;
}
std::vector<HloInstruction*> HloComputation::MakeInstructionPostOrderFrom(
HloInstruction& postorder_root) const {
std::vector<HloInstruction*> post_order;
VisitMap visited(instructions_.size());
std::vector<HloInstruction*> dfs_stack_scratch;
ComputeInstructionPostOrder(&postorder_root, ComputeChannelDependencies(),
visited, post_order, &dfs_stack_scratch);
return post_order;
}
std::vector<HloInstruction*> HloComputation::MakeInstructionPostOrder() const {
return MakeInstructionPostOrder(ComputeChannelDependencies());
}
std::vector<HloInstruction*> HloComputation::MakeInstructionPostOrder(
const ChannelDependencies& channel_dependencies) const {
std::vector<HloInstruction*> post_order;
post_order.reserve(instruction_count());
VisitMap visited(instructions_.size());
std::vector<HloInstruction*> dfs_stack_scratch;
dfs_stack_scratch.reserve(instruction_count());
for (const auto& instruction : instructions()) {
if (instruction->users().empty()) {
ComputeInstructionPostOrder(instruction, channel_dependencies, visited,
post_order, &dfs_stack_scratch);
}
}
CHECK_EQ(instruction_count(), post_order.size())
<< "number of instructions does not match post order size";
return post_order;
}
std::vector<HloInstruction*>
HloComputation::MakeInstructionPostOrderWithReshapeFirst() const {
std::vector<HloInstruction*> frontier_std;
std::vector<HloInstruction*> frontier_reshapes;
std::vector<HloInstruction*> sorted;
absl::flat_hash_map<int, uint32_t> visitations;
sorted.reserve(instruction_count());
visitations.reserve(instruction_count());
auto pop_frontier_element = [&frontier_std, &frontier_reshapes]() mutable {
if (!frontier_std.empty()) {
HloInstruction* const to_return = frontier_std.back();
frontier_std.pop_back();
return to_return;
}
if (!frontier_reshapes.empty()) {
HloInstruction* const to_return = frontier_reshapes.back();
frontier_reshapes.pop_back();
return to_return;
}
return static_cast<HloInstruction*>(nullptr);
};
auto add_to_frontier = [&frontier_std, &frontier_reshapes](
HloInstruction* const instruction_to_add) mutable {
if (instruction_to_add->opcode() == HloOpcode::kReshape) {
frontier_reshapes.push_back(instruction_to_add);
} else {
frontier_std.push_back(instruction_to_add);
}
};
bool found_root_instruction = false;
for (HloInstruction* const inst : instructions()) {
if (inst->user_count() == 0) {
if (inst == root_instruction()) {
found_root_instruction = true;
}
add_to_frontier(inst);
}
}
CHECK(found_root_instruction);
while (HloInstruction* const inst = pop_frontier_element()) {
sorted.push_back(inst);
for (HloInstruction* const child : inst->operands()) {
visitations[child->unique_id()]++;
if (child->user_count() == visitations[child->unique_id()]) {
add_to_frontier(child);
}
}
}
std::reverse(sorted.begin(), sorted.end());
CHECK_EQ(sorted.size(), instruction_count());
return sorted;
}
void HloComputation::ForEachInstructionPostOrder(
absl::FunctionRef<void(HloInstruction*)> func) const {
VisitMap visited(instructions_.size());
std::vector<HloInstruction*> dfs_stack_scratch;
dfs_stack_scratch.reserve(instruction_count());
auto channel_dependencies = ComputeChannelDependencies();
for (const auto& instruction : instructions()) {
if (instruction->users().empty()) {
ForEachInstructionPostOrderImpl(func, instruction, channel_dependencies,
visited, &dfs_stack_scratch);
}
}
}
std::vector<HloComputation*> HloComputation::MakeEmbeddedComputationsList()
const {
absl::flat_hash_set<HloComputation*> visited;
std::vector<HloComputation*> post_order;
using ComputationIter =
std::pair<HloComputation*, InstructionList::const_iterator>;
std::stack<ComputationIter, absl::InlinedVector<ComputationIter, 8>> st;
for (const HloInstructionInfo& instruction : instructions_with_info()) {
using PtrVec = PtrVec<HloComputation*>;
auto process_called_computations = [&](const PtrVec& called_computations) {
if (called_computations.empty()) return;
std::reverse_iterator<PtrVec::const_iterator> i(
called_computations.end());
std::reverse_iterator<PtrVec::const_iterator> rend(
called_computations.begin());
for (; i != rend; ++i) {
HloComputation* called_computation = *i;
if (visited.insert(called_computation).second) {
st.emplace(called_computation,
called_computation->instructions_.cbegin());
}
}
};
process_called_computations(instruction->called_computations());
while (!st.empty()) {
auto& cur = st.top();
HloComputation* computation = cur.first;
if (cur.second == computation->instructions_.cend()) {
st.pop();
post_order.push_back(computation);
} else {
if (cur.second->inst() == nullptr) {
++cur.second;
} else {
HloOpcode opcode = cur.second->opcode();
HloInstruction* next_instruction = cur.second->get();
++cur.second;
if (HloInstruction::MightHaveCalledComputations(opcode)) {
process_called_computations(
next_instruction->called_computations());
} else {
DCHECK(next_instruction->called_computations().empty());
}
}
}
}
}
return post_order;
}
void HloComputation::Print(Printer* printer,
const HloPrintOptions& options) const {
Print(printer, options, {});
}
void HloComputation::Print(
Printer* printer, const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const {
if (!instruction_order.empty()) {
CHECK_EQ(instruction_order.size(), instruction_count());
}
const std::string tab(2 * options.indent_amount(), ' ');
printer->Append(tab);
if (!options.is_in_nested_computation()) {
if (options.print_percent()) {
printer->Append("%");
}
if (options.print_ids()) {
printer->Append(name());
printer->Append(" ");
}
}
if (options.print_program_shape()) {
ShapeUtil::PrintHumanString(printer,
ComputeProgramShape(options.print_ids()));
printer->Append(" ");
}
printer->Append("{\n");
{
HloPrintOptions new_options =
HloPrintOptions(options)
.set_indent_amount(options.indent_amount() + 1)
.set_is_in_nested_computation(true);
CanonicalNameMap name_map;
name_map.Reserve(instruction_count());
auto print_one = [&](const HloInstruction* instruction) {
DCHECK_EQ(this, instruction->parent());
printer->Append(tab);
printer->Append(" ");
if (instruction == root_instruction_) {
printer->Append("ROOT ");
}
instruction->PrintWithCanonicalNameMap(printer, new_options, &name_map);
printer->Append("\n");
};
if (instruction_order.empty()) {
ForEachInstructionPostOrder(print_one);
} else {
for (const HloInstruction* const instruction : instruction_order) {
print_one(instruction);
}
}
}
printer->Append(tab);
printer->Append("}");
if (options.print_ids() && !IsMainThread()) {
printer->Append(", execution_thread=\"");
printer->Append(execution_thread());
printer->Append("\"");
}
if (options.print_name_after_closing_brace() && instruction_count() > 5) {
printer->Append("
printer->Append(name());
}
}
std::string HloComputation::ToString() const {
return ToString(HloPrintOptions::Default());
}
std::string HloComputation::ToString(const HloPrintOptions& options) const {
return ToString(options, MakeInstructionPostOrder());
}
std::string HloComputation::ToString(
const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const {
StringPrinter printer;
Print(&printer, options, instruction_order);
return std::move(printer).ToString();
}
absl::Cord HloComputation::ToCord(
const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const {
CordPrinter printer;
Print(&printer, options, instruction_order);
return std::move(printer).ToCord();
}
HloComputationProto HloComputation::ToProto() const {
HloComputationProto proto;
CHECK(unique_id_ != -1)
<< "This computation does not have a valid id. Please make sure the "
"computation is inside a module before dumping it.";
proto.set_id(unique_id_);
proto.set_name(name_);
for (const HloInstruction* instruction : MakeInstructionPostOrder()) {
HloInstructionProto instruction_proto = instruction->ToProto();
proto.add_instructions()->Swap(&instruction_proto);
}
proto.set_root_id(root_instruction()->unique_id());
*proto.mutable_program_shape() = ComputeProgramShape().ToProto();
proto.set_is_fusion_computation(IsFusionComputation());
proto.set_execution_thread(IsMainThread() ? ""
: std::string(execution_thread()));
return proto;
}
absl::StatusOr<std::unique_ptr<HloComputation>>
HloComputation::CreateFromProto(
const HloComputationProto& proto,
const absl::flat_hash_map<int64_t, HloComputation*>& computation_map,
bool prohibit_empty_literal) {
absl::flat_hash_map<int64_t, HloInstruction*> instruction_map;
absl::flat_hash_map<HloInstruction*, int64_t> to_proto_id;
std::vector<std::unique_ptr<HloInstruction>> instructions;
int64_t parameter_count = 0;
for (const HloInstructionProto& instruction_proto : proto.instructions()) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloInstruction> instruction,
HloInstruction::CreateFromProto(
instruction_proto, instruction_map, computation_map,
prohibit_empty_literal));
if (instruction->opcode() == HloOpcode::kParameter) {
parameter_count++;
}
TF_RET_CHECK(!ContainsKey(instruction_map, instruction_proto.id()));
instruction_map[instruction_proto.id()] = instruction.get();
to_proto_id[instruction.get()] = instruction_proto.id();
instructions.push_back(std::move(instruction));
}
TF_RET_CHECK(proto.root_id() != -1);
TF_RET_CHECK(ContainsKey(instruction_map, proto.root_id()));
HloInstruction* root = instruction_map.at(proto.root_id());
absl::c_sort(instructions, [&](const std::unique_ptr<HloInstruction>& a,
const std::unique_ptr<HloInstruction>& b) {
return to_proto_id[a.get()] < to_proto_id[b.get()];
});
TF_RETURN_IF_ERROR([&]() -> absl::Status {
std::vector<bool> parameters_seen(parameter_count);
int parameters_seen_count = 0;
for (auto& instruction : instructions) {
if (instruction->opcode() == HloOpcode::kParameter) {
int64_t param_no = instruction->parameter_number();
TF_RET_CHECK(param_no >= 0 && param_no < parameter_count)
<< "Invalid parameter number. Expected [0, " << parameter_count
<< "), got " << param_no;
TF_RET_CHECK(!parameters_seen[param_no])
<< "Parameter number " << param_no
<< " already allocated in this computation";
parameters_seen[param_no] = true;
parameters_seen_count++;
}
}
TF_RET_CHECK(parameters_seen_count == parameter_count)
<< "Not all parameters in range [0, " << parameter_count
<< ") were referenced";
return absl::OkStatus();
}());
auto computation = absl::WrapUnique(
new HloComputation(proto.name(), parameter_count, &instructions, root));
computation->unique_id_ = proto.id();
if (proto.is_fusion_computation()) {
computation->instruction_and_type_ =
static_cast<uintptr_t>(InstructionType::kFusion);
}
if (!proto.execution_thread().empty()) {
computation->SetExecutionThread(proto.execution_thread());
}
return std::move(computation);
}
void HloComputation::AppendInstructionsIntoCalledComputation(
absl::Span<HloInstruction* const> instructions_to_append,
HloInstruction* caller) {
HloInstruction* root = instructions_to_append.front();
TF_CHECK_OK(caller->CopyAllControlDepsFrom(root));
TF_CHECK_OK(root->DropAllControlDeps());
TF_CHECK_OK(root->ReplaceAllUsesWith(caller));
if (root == root_instruction()) {
set_root_instruction(caller);
}
TF_CHECK_OK(RemoveInstruction(root));
for (size_t i = 1; i < instructions_to_append.size(); ++i) {
HloInstruction* instruction = instructions_to_append[i];
caller->AppendInstructionIntoCalledComputation(instruction);
if (instruction->IsDead()) {
TF_CHECK_OK(RemoveInstruction(instruction));
}
}
}
HloInstruction* HloComputation::CreateFusionInstruction(
absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction::FusionKind fusion_kind) {
HloInstruction* root = instructions_to_fuse.front();
HloInstruction* fusion_instruction = AddInstruction(
HloInstruction::CreateFusion(root->shape(), fusion_kind, root));
AppendInstructionsIntoCalledComputation(instructions_to_fuse,
fusion_instruction);
return fusion_instruction;
}
HloInstruction* HloComputation::CreateCallInstruction(
absl::Span<HloInstruction* const> instructions_to_call) {
HloInstruction* root = instructions_to_call.front();
HloInstruction* call_instruction = AddInstruction(
HloInstruction::CreateCall(root->shape(), root), root->name());
AppendInstructionsIntoCalledComputation(instructions_to_call,
call_instruction);
return call_instruction;
}
HloInstruction* HloComputation::CreateCompositeCallInstruction(
absl::Span<HloInstruction* const> instructions_to_call,
const std::string& name, const std::string& attributes, int64_t version) {
HloInstruction* root = instructions_to_call.front();
HloInstruction* call_instruction =
AddInstruction(HloInstruction::CreateCompositeCall(
root->shape(), root, name, attributes, version),
root->name());
AppendInstructionsIntoCalledComputation(instructions_to_call,
call_instruction);
return call_instruction;
}
absl::StatusOr<HloInstruction*> HloComputation::CreateAsyncInstructions(
HloInstruction* instruction, absl::Span<const Shape> context_shapes,
absl::string_view async_execution_thread, bool replace,
bool override_names) {
HloInstruction* async_start;
HloInstruction* async_done;
if (instruction->opcode() == HloOpcode::kCopy) {
std::vector<Shape> context_shapes_tuple;
context_shapes_tuple.reserve(context_shapes.size() + 2);
Shape instruction_shape_destination = instruction->shape();
context_shapes_tuple.push_back(instruction_shape_destination);
Shape instruction_shape_source = instruction->operand(0)->shape();
context_shapes_tuple.push_back(instruction_shape_source);
context_shapes_tuple.insert(context_shapes_tuple.end(),
context_shapes.begin(), context_shapes.end());
async_start = AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape(context_shapes_tuple),
instruction->mutable_operand(0)));
async_done = AddInstruction(HloInstruction::CreateUnary(
instruction_shape_destination, HloOpcode::kCopyDone, async_start));
} else {
Builder builder("async_computation");
std::vector<HloInstruction*> parameters(instruction->operand_count());
std::vector<Shape> parameter_shapes(instruction->operand_count());
for (int i = 0; i < instruction->operand_count(); ++i) {
const Shape& parameter_shape = instruction->operand(i)->shape();
parameters[i] = builder.AddInstruction(HloInstruction::CreateParameter(
i, parameter_shape, absl::StrCat("param_", i)));
parameter_shapes[i] = parameter_shape;
}
HloInstruction* root = builder.AddInstruction(
instruction->CloneWithNewOperands(instruction->shape(), parameters));
if (override_names) {
parent()->SetAndUniquifyInstrName(
root, absl::StrCat(instruction->name(), ".cloned"));
}
HloComputation* async_computation =
parent_->AddEmbeddedComputation(builder.Build(root));
std::vector<Shape> start_shapes = {
ShapeUtil::MakeTupleShape(parameter_shapes), root->shape()};
for (const Shape& context_shape : context_shapes) {
start_shapes.push_back(context_shape);
}
async_start = AddInstruction(HloInstruction::CreateAsyncStart(
ShapeUtil::MakeTupleShape(start_shapes), instruction->operands(),
async_computation, async_execution_thread));
async_done = AddInstruction(
HloInstruction::CreateAsyncDone(root->shape(), async_start));
if (override_names) {
parent()->SetAndUniquifyInstrName(
async_start, absl::StrCat(root->name(), ".call-start"));
parent()->SetAndUniquifyInstrName(
async_done, absl::StrCat(root->name(), ".call-done"));
}
}
async_start->set_metadata(instruction->metadata());
async_start->CopyBackendConfigFrom(instruction);
async_done->set_metadata(instruction->metadata());
async_done->CopyBackendConfigFrom(instruction);
for (HloInstruction* control_pred : instruction->control_predecessors()) {
TF_RETURN_IF_ERROR(control_pred->AddControlDependencyTo(async_start));
}
for (HloInstruction* control_successor : instruction->control_successors()) {
TF_RETURN_IF_ERROR(async_done->AddControlDependencyTo(control_successor));
}
if (replace) {
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(ReplaceInstruction(instruction, async_done));
}
return async_done;
}
absl::StatusOr<HloInstruction*> HloComputation::DeepCopyHelper(
HloInstruction* instruction, ShapeIndex* index,
absl::FunctionRef<HloInstruction*(HloInstruction* leaf,
const ShapeIndex& leaf_index,
HloComputation* computation)>
copy_leaf) {
if (instruction->shape().IsTuple()) {
std::vector<HloInstruction*> elements;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(instruction->shape());
i++) {
HloInstruction* gte =
AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetTupleElementShape(instruction->shape(), i),
instruction, i));
index->push_back(i);
TF_ASSIGN_OR_RETURN(HloInstruction * element,
DeepCopyHelper(gte, index, copy_leaf));
elements.push_back(element);
index->pop_back();
}
return AddInstruction(HloInstruction::CreateTuple(elements));
}
if (instruction->shape().IsToken()) {
return instruction;
}
TF_RET_CHECK(instruction->shape().IsArray());
return copy_leaf(instruction, *index, this);
}
absl::StatusOr<HloInstruction*> HloComputation::DeepCopyInstruction(
HloInstruction* instruction, const ShapeTree<bool>* indices_to_copy,
ShapeTree<HloInstruction*>* copies_added) {
if (instruction->parent() != this) {
return FailedPrecondition(
"Can't deep copy instruction %s: instruction is not in computation %s",
instruction->name(), name());
}
if (indices_to_copy != nullptr &&
!ShapeUtil::Compatible(instruction->shape(), indices_to_copy->shape())) {
return FailedPrecondition(
"Can't deep copy instruction %s: given shape tree of indices to copy "
"has incompatible shapes: %s vs. %s",
instruction->name(), ShapeUtil::HumanString(instruction->shape()),
ShapeUtil::HumanString(indices_to_copy->shape()));
}
ShapeIndex index;
auto copy_leaf = [indices_to_copy, copies_added](
HloInstruction* leaf, const ShapeIndex& leaf_index,
HloComputation* computation) {
if (indices_to_copy == nullptr || indices_to_copy->element(leaf_index)) {
HloInstruction* copy = computation->AddInstruction(
HloInstruction::CreateUnary(leaf->shape(), HloOpcode::kCopy, leaf));
if (copies_added != nullptr) {
*copies_added->mutable_element(leaf_index) = copy;
}
return copy;
}
return leaf;
};
return DeepCopyHelper(instruction, &index, copy_leaf);
}
absl::StatusOr<HloInstruction*>
HloComputation::DeepCopyInstructionWithCustomCopier(
HloInstruction* instruction,
absl::FunctionRef<HloInstruction*(HloInstruction* leaf,
const ShapeIndex& leaf_index,
HloComputation* computation)>
copy_leaf) {
if (instruction->parent() != this) {
return FailedPrecondition(
"Can't deep copy instruction %s: instruction is not in computation %s",
instruction->name(), name());
}
ShapeIndex index;
return DeepCopyHelper(instruction, &index, copy_leaf);
}
ProgramShape HloComputation::ComputeProgramShape(bool include_ids) const {
ProgramShape program_shape;
for (auto* param_instruction : param_instructions_) {
*program_shape.add_parameters() = param_instruction->shape();
*program_shape.add_parameter_names() =
std::string(PrintName(param_instruction->name(), include_ids));
}
*program_shape.mutable_result() = root_instruction_->shape();
return program_shape;
}
bool HloComputation::EqualInternal(
const HloComputation& other, bool is_layout_sensitive,
std::optional<
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>>
computations_comparator,
bool ignore_channel_id_values, bool ignore_execution_thread) const {
if (this == &other) {
return true;
}
absl::flat_hash_set<std::pair<const HloInstruction*, const HloInstruction*>>
visited;
std::vector<std::pair<const HloInstruction*, const HloInstruction*>> worklist;
worklist.push_back({root_instruction(), other.root_instruction()});
while (!worklist.empty()) {
auto pair = worklist.back();
worklist.pop_back();
if (visited.contains(pair)) {
continue;
}
visited.emplace(pair);
auto operands_eq = [](const HloInstruction*, const HloInstruction*) {
return true;
};
auto comp_eq = [&](const HloComputation* a, const HloComputation* b) {
return a->EqualInternal(*b, is_layout_sensitive, computations_comparator,
ignore_channel_id_values,
ignore_execution_thread);
};
bool identical_ignoring_operands =
ignore_channel_id_values
? pair.first->IdenticalIgnoringChannelIdValues(
*pair.second, operands_eq,
(computations_comparator ? *computations_comparator
: comp_eq),
is_layout_sensitive)
: pair.first->Identical(
*pair.second, operands_eq,
(computations_comparator ? *computations_comparator
: comp_eq),
is_layout_sensitive);
if (!identical_ignoring_operands) {
return false;
}
for (size_t i = 0; i < pair.first->operands().size(); ++i) {
worklist.push_back({pair.first->operand(i), pair.second->operand(i)});
}
}
if (!ignore_execution_thread) {
return execution_thread() == other.execution_thread();
}
return true;
}
absl::Status HloComputation::ReplaceWithNewInstruction(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
return ReplaceInstruction(old_instruction,
AddInstruction(std::move(new_instruction)));
}
absl::Status HloComputation::ReplaceWithNewEntryComputationParameter(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
return ReplaceInstruction(old_instruction, AddEntryComputationParameter(
std::move(new_instruction)));
}
absl::StatusOr<bool> HloComputation::ReplaceInstruction(
HloInstruction* old_instruction, HloInstruction* new_instruction,
bool preserve_sharding, bool relay_control_dependency,
bool remove_unused_operands) {
TF_RET_CHECK(
ShapeUtil::Compatible(old_instruction->shape(), new_instruction->shape()))
<< absl::StreamFormat(
"\"%s\" (%s) vs \"%s\" (%s)", old_instruction->name(),
old_instruction->shape().ToString(true),
new_instruction->name(),
new_instruction->shape().ToString(true));
return ReplaceInstructionWithDifferentShape(
old_instruction, new_instruction, preserve_sharding,
relay_control_dependency, remove_unused_operands);
}
absl::Status HloComputation::ReplaceInstruction(
HloInstruction* old_instruction, HloInstruction* new_instruction) {
TF_ASSIGN_OR_RETURN(bool changed,
ReplaceInstruction(old_instruction, new_instruction,
false));
DCHECK(changed);
return absl::OkStatus();
}
absl::StatusOr<bool> HloComputation::ReplaceInstructionWithDifferentShape(
HloInstruction* old_instruction, HloInstruction* new_instruction,
bool preserve_sharding, bool relay_control_dependency,
bool remove_unused_operands) {
if (preserve_sharding && new_instruction->has_sharding() &&
old_instruction->has_sharding() &&
!new_instruction->has_compatible_sharding(old_instruction)) {
VLOG(10) << "Skipping replacement due to incompatible sharding";
return false;
}
if (relay_control_dependency) {
TF_RETURN_IF_ERROR(
new_instruction->CopyAllControlDepsFrom(old_instruction));
TF_RETURN_IF_ERROR(old_instruction->DropAllControlDeps());
} else if (old_instruction->HasControlDependencies()) {
VLOG(10) << "Skipping replacement because old instruction has "
"control dependencies";
return false;
}
VLOG(10) << "transformed " << old_instruction->ToString() << " to "
<< new_instruction->ToString();
bool overwrite_op_name = new_instruction->metadata().op_name().empty() &&
!old_instruction->metadata().op_name().empty();
if (overwrite_op_name) {
new_instruction->set_metadata(old_instruction->metadata());
}
if (new_instruction->frontend_attributes().map().empty()) {
new_instruction->set_frontend_attributes(
old_instruction->frontend_attributes());
}
if (auto old_original_value = old_instruction->original_value()) {
if (new_instruction->opcode() != HloOpcode::kFusion) {
if (ShapeUtil::Compatible(old_instruction->shape(),
new_instruction->shape())) {
new_instruction->set_original_value(old_original_value);
} else {
LOG(WARNING)
<< "Expect the new instruction to have the same shape with the old "
"instruction when copying over original_value\n";
}
}
}
if (!new_instruction->has_sharding()) {
new_instruction->copy_sharding(old_instruction);
}
TF_RETURN_IF_ERROR(
old_instruction->ReplaceAllUsesWithDifferentShape(new_instruction));
if (old_instruction->opcode() == new_instruction->opcode() &&
(old_instruction->opcode() != HloOpcode::kCustomCall ||
old_instruction->custom_call_target() ==
new_instruction->custom_call_target())) {
new_instruction->SetAndSanitizeName(old_instruction->name());
}
if (remove_unused_operands) {
TF_RETURN_IF_ERROR(RemoveInstructionAndUnusedOperands(
old_instruction, std::nullopt,
relay_control_dependency));
} else {
TF_RETURN_IF_ERROR(RemoveInstruction(old_instruction));
}
return true;
}
absl::Status HloComputation::ReplaceInstructionWithDifferentShape(
HloInstruction* old_instruction, HloInstruction* new_instruction) {
TF_ASSIGN_OR_RETURN(bool changed, ReplaceInstructionWithDifferentShape(
old_instruction, new_instruction,
false));
DCHECK(changed);
return absl::OkStatus();
}
std::vector<HloInstruction*> HloComputation::CollectUnreachableRoots() const {
std::vector<HloInstruction*> unreachable_roots;
for (auto* instruction : instructions()) {
if (instruction->IsDead() && instruction->control_successors().empty()) {
unreachable_roots.push_back(instruction);
}
}
VLOG(3) << "Unreachable roots:"
<< absl::StrJoin(unreachable_roots, "\n\t",
[](std::string* out, const HloInstruction* hlo) {
absl::StrAppend(out, hlo->ToString());
});
return unreachable_roots;
}
absl::Status HloComputation::AcceptWithOperandOrder(
DfsHloVisitor* visitor,
const HloInstruction::CompareFunction& operand_order) const {
for (HloInstruction* root : CollectUnreachableRoots()) {
TF_RETURN_IF_ERROR(
root->AcceptWithOperandOrder(visitor, operand_order,
false));
}
return root_instruction()->AcceptWithOperandOrder(visitor, operand_order,
true);
}
std::unique_ptr<HloComputation> HloComputation::Clone(
const std::string& suffix, HloCloneContext* context) {
return CloneWithReplacements(
nullptr,
{}, context, suffix);
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
HloCloneContext* context, const std::string& suffix) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(std::move(r1));
return CloneWithReplacements(&replacements, {}, context,
suffix);
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r2,
HloCloneContext* context, const std::string& suffix) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(std::move(r1));
replacements.emplace(std::move(r2));
return CloneWithReplacements(&replacements, {}, context,
suffix);
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r2,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r3,
HloCloneContext* context, const std::string& suffix) {
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(std::move(r1));
replacements.emplace(std::move(r2));
replacements.emplace(std::move(r3));
return CloneWithReplacements(&replacements, {}, context,
suffix);
}
namespace {
void SortClonedInstructions(
const HloCloneContext& context,
absl::FunctionRef<const HloInstruction*(const HloInstruction*)> replace,
const HloComputation& computation,
const HloComputation::InstructionList& ordered_instructions,
std::vector<std::unique_ptr<HloInstruction>>& unordered_instructions) {
using InstructionSorter = MappedPtrContainerSorter<HloInstruction>;
auto instruction_mapper = [&context, replace](const HloInstruction* i) {
return context.FindInstruction(replace(i));
};
size_t num_mapped_instructions = 0;
size_t mapped_index_of_last_parameter_plus_one = 0;
for (const auto& instruction : ordered_instructions) {
if (!instruction_mapper(instruction.get())) {
continue;
}
++num_mapped_instructions;
if (!dynamic_cast<const HloParameterInstruction*>(instruction.get())) {
continue;
}
mapped_index_of_last_parameter_plus_one = num_mapped_instructions;
}
auto unmapped_ptr_index =
[num_mapped_instructions,
mapped_index_of_last_parameter_plus_one](const HloInstruction* i) {
if (dynamic_cast<const HloParameterInstruction*>(i)) {
if (num_mapped_instructions > 0 &&
mapped_index_of_last_parameter_plus_one > 0) {
return mapped_index_of_last_parameter_plus_one - 1;
}
return InstructionSorter::IndexBeforeMappedElementsFn()(i);
}
return InstructionSorter::IndexAfterMappedElementsFn()(i);
};
auto status =
InstructionSorter::Sort(instruction_mapper, unmapped_ptr_index,
ordered_instructions, unordered_instructions);
if (!status.ok()) {
LOG(ERROR) << "Failed to reorder instructions while cloning computation: "
<< computation.name() << "; " << status;
}
}
void SortClonedInstructionUsersAndControlLists(
const HloCloneContext& context,
absl::FunctionRef<const HloInstruction*(const HloInstruction*)> replace,
const HloComputation::InstructionList& sorted_instructions) {
auto instruction_mapper = [&context, replace](const HloInstruction* i) {
return context.FindInstruction(replace(i));
};
for (const HloInstructionInfo& instruction : sorted_instructions) {
HloInstruction* cloned_instruction =
context.FindInstruction(replace(instruction.get()));
if (!cloned_instruction) {
continue;
}
cloned_instruction->SortInstructionUsersAndControlLists(instruction_mapper,
*instruction);
}
}
}
std::unique_ptr<HloComputation> HloComputation::CloneWithReplacements(
const absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>* replacements,
absl::Span<const HloInstruction* const> extra_parameters,
HloCloneContext* context, const std::string& suffix,
const HloInstruction* new_root) {
std::unique_ptr<HloCloneContext> context_ptr;
if (context == nullptr) {
context_ptr = std::make_unique<HloCloneContext>(parent(), suffix);
context = context_ptr.get();
}
return CloneInContext(*context, replacements, extra_parameters, suffix,
new_root);
}
std::unique_ptr<HloComputation> HloComputation::CloneInContext(
HloCloneContext& context,
const absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>* replacements,
absl::Span<const HloInstruction* const> extra_parameters,
const std::string& suffix, const HloInstruction* new_root) const {
if (new_root == nullptr) {
new_root = root_instruction();
}
auto replace = [&](const HloInstruction* instr) {
if (!replacements) return instr;
auto it = replacements->find(instr);
return it != replacements->end() ? it->second.get() : instr;
};
VLOG(1) << "Cloning " << name() << " --> " << suffix << "\n";
std::vector<const HloInstruction*> postorder;
absl::flat_hash_map<const HloInstruction*, VisitState> visited;
std::vector<const HloInstruction*> dfs_stack;
for (const auto& instr : instructions()) {
const HloInstruction* new_instr = replace(instr);
if (!new_instr) {
continue;
}
dfs_stack.clear();
dfs_stack.push_back(new_instr);
while (!dfs_stack.empty()) {
auto* cur = dfs_stack.back();
auto it = visited.find(cur);
if (it != visited.end()) {
dfs_stack.pop_back();
if (it->second == VisitState::kVisited) {
continue;
}
CHECK_EQ(it->second, VisitState::kVisiting);
postorder.push_back(cur);
it->second = VisitState::kVisited;
continue;
}
visited.insert({cur, VisitState::kVisiting});
for (HloInstruction* operand : cur->operands()) {
const HloInstruction* new_operand = replace(operand);
if (new_operand) {
dfs_stack.emplace_back(new_operand);
}
}
}
}
std::vector<std::unique_ptr<HloInstruction>> instructions;
for (const auto& instr : extra_parameters) {
CHECK_EQ(instr->opcode(), HloOpcode::kParameter)
<< "Only parameter instructions are allowed in 'extra_parameters'";
instructions.emplace_back(instr->Clone());
}
for (auto instr : postorder) {
std::vector<HloInstruction*> new_operands;
for (auto operand : instr->operands()) {
auto replaced_operand = replace(operand);
CHECK_NE(replaced_operand, nullptr)
<< "replacements map tried to eliminate a used instruction "
<< operand->ToString() << ", used by " << instr->ToString();
new_operands.push_back(context.GetInstruction(replaced_operand));
}
std::unique_ptr<HloInstruction> new_instr =
instr->CloneWithNewOperands(instr->shape(), new_operands, &context);
if (instr->opcode() == HloOpcode::kParameter &&
instr->parameter_replicated_at_leaf_buffers().has_value()) {
new_instr->set_parameter_replicated_at_leaf_buffers(
instr->parameter_replicated_at_leaf_buffers().value());
}
instructions.push_back(std::move(new_instr));
}
SortClonedInstructions(context, replace, *this, instructions_, instructions);
Builder builder(suffix.empty() ? std::string(name())
: absl::StrCat(name(), ".", suffix));
for (auto& instr : instructions) {
builder.AddInstruction(std::move(instr));
}
auto result = builder.Build(
context.GetInstruction(replace(new_root)));
for (auto instr : postorder) {
HloInstruction* new_instr = context.GetInstruction(instr);
for (auto successor : instr->control_successors()) {
auto replaced_successor = replace(successor);
if (replaced_successor != nullptr) {
TF_CHECK_OK(new_instr->AddControlDependencyTo(
context.GetInstruction(replaced_successor)));
}
}
}
SortClonedInstructionUsersAndControlLists(context, replace, instructions_);
context.MapComputation(this, result.get());
result->SetExecutionThread(execution_thread());
return result;
}
void HloComputation::UniquifyName(NameUniquer* name_uniquer) {
name_ = name_uniquer->GetUniqueName(name_);
}
void HloComputation::UniquifyName(HloModule* module) {
UniquifyName(&module->computation_name_uniquer());
}
HloInstruction* HloComputation::GetInstructionWithName(absl::string_view name) {
auto instructions_in_computation = instructions();
auto it = absl::c_find_if(
instructions_in_computation,
[&](HloInstruction* instr) { return instr->name() == name; });
return it == instructions_in_computation.end() ? nullptr : *it;
}
bool HloComputation::IsEntryComputation() const {
return parent()->entry_computation() == this;
}
bool HloComputation::CanExpandIntoSingleInstruction() const {
return absl::c_all_of(
instructions(), [root = root_instruction()](const HloInstruction* instr) {
return root == instr || instr->opcode() == HloOpcode::kParameter;
});
}
} | #include "xla/hlo/ir/hlo_computation.h"
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
namespace op = xla::testing::opcode_matchers;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
class HloComputationTest : public HloTestBase {
protected:
HloComputationTest() {}
std::unique_ptr<HloComputation> CreateNegateComputation() {
auto builder = HloComputation::Builder("Negate");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, param));
return builder.Build();
}
std::unique_ptr<HloComputation> CreateMapComputation(
HloComputation* map_computation) {
auto builder = HloComputation::Builder("Map");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map_computation));
return builder.Build();
}
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloComputationTest, GetEmbeddedComputationsEmpty) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEntryComputation(CreateNegateComputation());
EXPECT_TRUE(negate_computation->MakeEmbeddedComputationsList().empty());
}
TEST_F(HloComputationTest, GetEmbeddedComputationsOneComputation) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEmbeddedComputation(CreateNegateComputation());
auto map_computation =
module->AddEntryComputation(CreateMapComputation(negate_computation));
EXPECT_TRUE(negate_computation->MakeEmbeddedComputationsList().empty());
EXPECT_THAT(map_computation->MakeEmbeddedComputationsList(),
ElementsAre(negate_computation));
}
TEST_F(HloComputationTest, GetEmbeddedComputationsDiamond) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEmbeddedComputation(CreateNegateComputation());
auto map1_computation =
module->AddEmbeddedComputation(CreateMapComputation(negate_computation));
auto map2_computation =
module->AddEmbeddedComputation(CreateMapComputation(negate_computation));
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto map1 = builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map1_computation));
auto map2 = builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map2_computation));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, map1, map2));
auto computation = module->AddEntryComputation(builder.Build());
auto embedded_computations = computation->MakeEmbeddedComputationsList();
EXPECT_EQ(3, embedded_computations.size());
EXPECT_EQ(negate_computation, *embedded_computations.begin());
EXPECT_THAT(embedded_computations,
UnorderedElementsAre(negate_computation, map1_computation,
map2_computation));
}
TEST_F(HloComputationTest, PostOrderSingleton) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(), ElementsAre(constant));
}
TEST_F(HloComputationTest, PostOrderSimple) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, negate1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
ElementsAre(constant, negate1, negate2));
}
TEST_F(HloComputationTest, PostOrderDisconnectedInstructions) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
UnorderedElementsAre(constant1, constant2, constant3, constant4));
}
TEST_F(HloComputationTest, PostOrderWithReshapeFirst) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
parameter.0 = f32[3] parameter(0)
broadcast.0 = f32[1, 3] broadcast(f32[3] parameter.0), dimensions={1}
reshape.0 = f32[3, 1] reshape(f32[3] parameter.0)
ROOT tuple.0 = (f32[1, 3], f32[3, 1]) tuple(f32[1, 3] broadcast.0, f32[3, 1] reshape.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* entry_computation =
FindComputation(hlo_module.get(), "entry");
HloInstruction* parameter_0 =
FindInstruction(hlo_module.get(), "parameter.0");
HloInstruction* broadcast_0 =
FindInstruction(hlo_module.get(), "broadcast.0");
HloInstruction* reshape_0 = FindInstruction(hlo_module.get(), "reshape.0");
HloInstruction* tuple_0 = FindInstruction(hlo_module.get(), "tuple.0");
EXPECT_THAT(entry_computation->MakeInstructionPostOrder(),
ElementsAre(parameter_0, broadcast_0, reshape_0, tuple_0));
EXPECT_THAT(entry_computation->MakeInstructionPostOrderWithReshapeFirst(),
ElementsAre(parameter_0, reshape_0, broadcast_0, tuple_0));
}
TEST_F(HloComputationTest, PostOrderWithMultipleRoots) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant2, constant3));
auto add3 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto post_order = computation->MakeInstructionPostOrder();
EXPECT_EQ(6, post_order.size());
EXPECT_THAT(post_order, UnorderedElementsAre(constant1, constant2, constant3,
add1, add2, add3));
}
TEST_F(HloComputationTest, VisitWithMultipleRoots) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant2));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant2, constant3));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
class TestVisitor : public DfsHloVisitorWithDefault {
public:
explicit TestVisitor(HloComputation* computation)
: computation_(computation) {}
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
EXPECT_FALSE(visited_set_.contains(hlo_instruction));
visited_set_.insert(hlo_instruction);
last_visited_ = hlo_instruction;
return absl::OkStatus();
}
absl::Status FinishVisit(HloInstruction* root) override {
EXPECT_EQ(computation_->root_instruction(), root);
++finish_visit_calls_;
return absl::OkStatus();
}
HloComputation* computation_;
absl::flat_hash_set<HloInstruction*> visited_set_;
int64_t finish_visit_calls_ = 0;
HloInstruction* last_visited_ = nullptr;
};
TestVisitor visitor(computation);
EXPECT_IS_OK(computation->Accept(&visitor));
EXPECT_EQ(6, visitor.visited_set_.size());
EXPECT_EQ(1, visitor.finish_visit_calls_);
EXPECT_EQ(computation->root_instruction(), visitor.last_visited_);
}
TEST_F(HloComputationTest, DeepCopyArray) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(constant).value();
EXPECT_THAT(copy, GmockMatch(m::Copy(m::Op().Is(constant))));
}
TEST_F(HloComputationTest, DeepCopyTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto tuple_copy = computation->DeepCopyInstruction(tuple).value();
EXPECT_THAT(tuple_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple))),
m::Copy(m::GetTupleElement(m::Op().Is(tuple))))));
EXPECT_EQ(0, tuple_copy->operand(0)->operand(0)->tuple_index());
EXPECT_EQ(1, tuple_copy->operand(1)->operand(0)->tuple_index());
}
TEST_F(HloComputationTest, DeepCopyArrayAtIndices) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto computation = builder.Build();
{
ShapeTree<bool> indices_to_copy(constant->shape(), true);
EXPECT_THAT(
computation->DeepCopyInstruction(constant, &indices_to_copy).value(),
GmockMatch(m::Copy(m::Op().Is(constant))));
}
{
ShapeTree<bool> indices_to_copy(constant->shape(), false);
EXPECT_EQ(
computation->DeepCopyInstruction(constant, &indices_to_copy).value(),
constant);
}
}
TEST_F(HloComputationTest, DeepCopyTupleAtIndices) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto computation = builder.Build();
{
ShapeTree<bool> indices_to_copy(tuple->shape(), true);
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple)))
.Is(copies_added.element({0})),
m::Copy(m::GetTupleElement(m::Op().Is(tuple)))
.Is(copies_added.element({1})))));
}
{
ShapeTree<bool> indices_to_copy(tuple->shape(), false);
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy,
GmockMatch(m::Tuple(m::GetTupleElement(m::Op().Is(tuple)),
m::GetTupleElement(m::Op().Is(tuple)))));
EXPECT_TRUE(copies_added.element({}) == nullptr);
EXPECT_TRUE(copies_added.element({0}) == nullptr);
EXPECT_TRUE(copies_added.element({1}) == nullptr);
}
{
ShapeTree<bool> indices_to_copy(tuple->shape(), false);
*indices_to_copy.mutable_element({0}) = true;
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple))),
m::GetTupleElement(m::Op().Is(tuple)))));
EXPECT_TRUE(copies_added.element({}) == nullptr);
EXPECT_TRUE(copies_added.element({0}) != nullptr);
EXPECT_TRUE(copies_added.element({1}) == nullptr);
}
}
TEST_F(HloComputationTest, DeepCopyToken) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(token).value();
EXPECT_THAT(copy, GmockMatch(m::AfterAll()));
}
TEST_F(HloComputationTest, DeepCopyTokenTuple) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({token, constant}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(tuple).value();
EXPECT_THAT(copy, GmockMatch(m::Tuple(
m::GetTupleElement(m::Op().Is(tuple)),
m::Copy(m::GetTupleElement(m::Op().Is(tuple))))));
}
TEST_F(HloComputationTest, CycleDetection) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, negate, negate));
auto module = CreateNewUnverifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(add->AddControlDependencyTo(negate));
auto instructions = computation->MakeInstructionPostOrder();
EXPECT_EQ(3, instructions.size());
FunctionVisitor visitor(
[](HloInstruction* instruction) { return absl::OkStatus(); });
auto visit_status = computation->Accept(&visitor);
ASSERT_FALSE(visit_status.ok());
ASSERT_THAT(visit_status.message(),
::testing::ContainsRegex("cycle is detecte"));
}
TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto dead_negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, dead_negate, dead_negate));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Negate(m::Op().Is(constant))));
EXPECT_EQ(negate, computation->root_instruction());
ASSERT_IS_OK(computation->RemoveInstructionAndUnusedOperands(dead_add));
EXPECT_EQ(2, computation->instruction_count());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Negate(m::Op().Is(constant))));
EXPECT_EQ(negate, computation->root_instruction());
}
TEST_F(HloComputationTest, RemoveSeveralUnusedFusionParameters) {
const char* const kHloModule = R"(
HloModule test
f {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
add = f32[] add(p0, p2)
ROOT neg = f32[] negate(p1)
}
ENTRY main {
param0 = f32[] parameter(0)
param1 = f32[] parameter(1)
param2 = f32[] parameter(2)
ROOT res = f32[] fusion(param0, param1, param2), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloModule));
auto root = module->entry_computation()->root_instruction();
auto dead_add = FindInstruction(module.get(), "add");
ASSERT_IS_OK(root->fused_instructions_computation()
->RemoveInstructionAndUnusedOperands(dead_add));
root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(1))));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Negate(m::Parameter(0))));
}
TEST_F(HloComputationTest, ReplaceParameter) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.1 = s32[] parameter(0)
const = f32[2] constant({0,1})
while_init = (f32[2], s32[]) tuple(const, param.1)
while = (f32[2], s32[]) while(while_init), condition=condition, body=body
ROOT out = s32[] get-tuple-element(while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloModule));
HloComputation* body = module->GetComputationWithName("body");
Shape new_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {2}), ShapeUtil::MakeShape(S32, {})});
body->ReplaceParameter(
0, HloInstruction::CreateParameter(0, new_param_shape, "new_p_body"));
EXPECT_TRUE(ShapeUtil::Equal(body->parameter_instruction(0)->shape(),
new_param_shape));
}
TEST_F(HloComputationTest, CloneWithControlDependency) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, param));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(add));
TF_CHECK_OK(negate->AddControlDependencyTo(add));
auto clone = computation->Clone();
auto cloned_add = clone->root_instruction();
EXPECT_EQ(cloned_add->opcode(), HloOpcode::kAdd);
auto predecessors = cloned_add->control_predecessors();
EXPECT_EQ(1, predecessors.size());
EXPECT_EQ(HloOpcode::kNegate, predecessors[0]->opcode());
auto successors = predecessors[0]->control_successors();
EXPECT_THAT(successors, ::testing::ElementsAre(cloned_add));
}
TEST_F(HloComputationTest, CloneWithReplacements) {
auto builder = HloComputation::Builder(TestName());
Shape r0s64 = ShapeUtil::MakeShape(S64, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
Shape r0u32 = ShapeUtil::MakeShape(U32, {});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "p.0.lhs"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "p.0.rhs"));
auto param2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0s64, "p.1"));
auto lt = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
param1, ComparisonDirection::kLt));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(lt));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(param2,
HloInstruction::CreateParameter(2, r0s32, "p.1"));
auto param3 = HloInstruction::CreateParameter(3, r0u32, "p.2");
std::vector<const HloInstruction*> extra_parameters{param3.get()};
auto clone =
computation->CloneWithReplacements(&replacements, extra_parameters);
ASSERT_EQ(clone->num_parameters(), 4);
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(0)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(1)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(2)->shape(), r0s32));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(3)->shape(), r0u32));
}
TEST_F(HloComputationTest, CloneInContext) {
HloComputation::Builder builder(TestName());
Shape r0s64 = ShapeUtil::MakeShape(S64, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
Shape r0u32 = ShapeUtil::MakeShape(U32, {});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "p.0.lhs"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "p.0.rhs"));
HloInstruction* param2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0s64, "p.1"));
HloInstruction* lt = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
param1, ComparisonDirection::kLt));
std::unique_ptr<VerifiedHloModule> module = CreateNewVerifiedModule();
const HloComputation& computation =
*module->AddEntryComputation(builder.Build(lt));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(param2,
HloInstruction::CreateParameter(2, r0s32, "p.1"));
std::unique_ptr<HloInstruction> param3 =
HloInstruction::CreateParameter(3, r0u32, "p.2");
std::vector<const HloInstruction*> extra_parameters = {param3.get()};
HloCloneContext clone_context(module.get());
std::unique_ptr<HloComputation> clone = computation.CloneInContext(
clone_context, &replacements, extra_parameters);
ASSERT_EQ(clone->num_parameters(), 4);
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(0)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(1)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(2)->shape(), r0s32));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(3)->shape(), r0u32));
}
TEST_F(HloComputationTest, Stringification) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options = HloPrintOptions().set_print_metadata(false);
const std::string expected_computation =
R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationIndent) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options =
HloPrintOptions().set_print_metadata(false).set_indent_amount(2);
const std::string expected_computation =
R"( %TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationCanonical) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options = HloPrintOptions().set_print_metadata(false);
const std::string expected_computation1 =
R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation1);
options = HloPrintOptions().Canonical();
const std::string expected_computation2 = R"(TransposeDot {
tmp_0 = f32[5,10]{1,0} parameter(0)
tmp_1 = f32[20,10]{1,0} parameter(1)
tmp_2 = f32[10,20]{1,0} transpose(f32[20,10]{1,0} tmp_1), dimensions={1,0}
ROOT tmp_3 = f32[5,20]{1,0} dot(f32[5,10]{1,0} tmp_0, f32[10,20]{1,0} tmp_2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation2);
}
std::unique_ptr<HloComputation> MakeAddNComputation(
int n, std::string name = "add_n") {
auto builder = HloComputation::Builder(name);
auto result = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "x_value"));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
for (int i = 0; i < n; ++i) {
result = builder.AddInstruction(HloInstruction::CreateBinary(
one->shape(), HloOpcode::kAdd, result, one));
}
return builder.Build();
}
TEST_F(HloComputationTest, DeepEquality) {
auto computation_a = MakeAddNComputation(200000);
auto computation_b = MakeAddNComputation(200000);
EXPECT_TRUE(*computation_a == *computation_b);
auto computation_c = MakeAddNComputation(199999);
EXPECT_FALSE(*computation_a == *computation_c);
EXPECT_FALSE(*computation_c == *computation_b);
}
TEST_F(HloComputationTest, InstructionPostOrderWithAllReduce) {
const char* const hlo_string = R"(
HloModule Module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY entry {
param = f32[128] parameter(0), sharding={maximal device=0}
crs0 = f32[128] all-reduce(param),
replica_groups={{0}}, channel_id=1, to_apply=add,
sharding={maximal device=0}
crs1 = f32[128] all-reduce(param),
replica_groups={{0}}, channel_id=1, to_apply=add,
sharding={maximal device=1}
add = f32[128] add(crs0, crs0), sharding={maximal device=0}
ROOT t = (f32[128], f32[128]) tuple(add, crs1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(module->entry_computation()->MakeInstructionPostOrder(),
ElementsAre(op::Parameter(), op::AllReduce(), op::AllReduce(),
op::Add(), op::Tuple()));
}
TEST_F(HloComputationTest, ComparisonWithCustomComparator) {
std::string_view mod_txt = R"(
HloModule Module
region_X {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Arg_1.6)
}
region_Y {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
region_A {
Arg_0.5 = s32[] parameter(0)
Arg_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] multiply(Arg_0.5, Arg_1.6)
}
region_B {
Arg_0.5 = s32[] parameter(0)
Ar_1.6 = s32[] parameter(1)
ROOT add.7 = s32[] add(Arg_0.5, Ar_1.6)
}
main.15 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_X
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_Y
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
ENTRY main.16 {
Arg_0.1 = s32[10]{0} parameter(0)
constant.3 = s32[] constant(0)
rd1 = s32[] reduce(Arg_0.1, constant.3), dimensions={0}, to_apply=region_A
Arg_1.2 = s32[15]{0} parameter(1)
rd2 = s32[] reduce(Arg_1.2, constant.3), dimensions={0}, to_apply=region_B
ROOT multiply.14 = s32[] multiply(rd1, rd2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(mod_txt));
absl::flat_hash_map<std::string_view, std::string_view> replace_map;
replace_map["region_X"] = "region_A";
replace_map["region_Y"] = "region_B";
auto compare_func = [&replace_map](const HloComputation* a,
const HloComputation* b) {
return (a->name() == b->name() || replace_map[a->name()] == b->name());
};
HloComputation *comp_a = nullptr, *comp_b = nullptr;
for (auto comp : module->computations()) {
if (comp->name() == "main.15") {
comp_a = comp;
}
if (comp->name() == "main.16") {
comp_b = comp;
}
}
EXPECT_FALSE(comp_a->Equal(*comp_b, false));
EXPECT_TRUE(comp_a->Equal(*comp_b, false, compare_func));
}
TEST_F(HloComputationTest, CloneWrappedAsyncInstructionSameWrappedFunc) {
const char* const hlo_string = R"(
HloModule Module
add (lhs: u32[], rhs: u32[]) -> u32[] {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(u32[] lhs, u32[] rhs)
}
async_wrapped (async_param.1: u32[8]) -> u32[4] {
async_param.1 = u32[8]{0} parameter(0)
ROOT reduce-scatter.1 = u32[4]{0} reduce-scatter(u32[8]{0} async_param.1),
replica_groups={}, dimensions={0}, to_apply=add
}
ENTRY main (data: u32[8]) -> u32[4] {
data = u32[8]{0} parameter(0)
reduce-scatter-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} data),
calls=async_wrapped, backend_config={"is_sync":false}
ROOT reduce-scatter-done = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) reduce-scatter-start),
calls=async_wrapped
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* start = FindInstruction(module.get(), "reduce-scatter-start");
HloInstruction* done = FindInstruction(module.get(), "reduce-scatter-done");
EXPECT_EQ(start->async_wrapped_computation(),
done->async_wrapped_computation());
std::unique_ptr<HloInstruction> cloned_start = start->Clone();
std::unique_ptr<HloInstruction> cloned_done =
done->CloneWithNewOperands(done->shape(), {cloned_start.get()});
EXPECT_EQ(cloned_start.get()->async_wrapped_computation(),
cloned_done.get()->async_wrapped_computation());
}
TEST_F(HloComputationTest, CompositeCall) {
const char* const hlo_string = R"(
HloModule Module
add (x: f32[]) -> f32[] {
%x = f32[] parameter(0)
%constant = f32[] constant(2)
ROOT %z = f32[] add(f32[] %x, f32[] %constant)
}
ENTRY %CallR0F32AddScalar.v2 () -> f32[] {
%constant.1 = f32[] constant(42)
ROOT %call = f32[] call(f32[] %constant.1), to_apply=add, is_composite=true,
frontend_attributes={
composite.attributes={n = 1 : i32, tensor = dense<1> : tensor<i32>},
composite.name="foo.bar",
composite.version="1"
}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* composite_call = FindInstruction(module.get(), "call");
EXPECT_EQ(composite_call->opcode(), HloOpcode::kCall);
EXPECT_TRUE(composite_call->is_composite());
EXPECT_EQ(composite_call->frontend_attributes().map().size(), 3);
}
TEST_F(HloComputationTest, CloneComputationWithAsyncInstructions) {
constexpr std::string_view hlo = R"(
HloModule main
comp.0 {
ROOT custom-call.0 = () custom-call(), custom_call_target="foo"
}
ENTRY main {
in.0 = () parameter(0)
call.0 = () call(), to_apply=comp.0
ROOT out.0 = () tuple()
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo));
HloComputation* comp0 = FindComputation(module.get(), "comp.0");
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call.0");
TF_ASSERT_OK(comp0->CreateAsyncInstructions(
custom_call, {ShapeUtil::MakeScalarShape(U32)},
HloInstruction::kMainExecutionThread,
true,
true));
HloComputation* comp1 = module->AddEmbeddedComputation(comp0->Clone());
HloComputation* comp2 = module->AddEmbeddedComputation(comp0->Clone());
EXPECT_NE(comp0->root_instruction()->name(),
comp1->root_instruction()->name());
EXPECT_NE(comp0->root_instruction()->operand(0)->name(),
comp1->root_instruction()->operand(0)->name());
EXPECT_NE(comp1->root_instruction()->name(),
comp2->root_instruction()->name());
EXPECT_NE(comp1->root_instruction()->operand(0)->name(),
comp2->root_instruction()->operand(0)->name());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_computation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_computation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecb8d878-03fe-44f2-9ce6-ad2285013004 | cpp | tensorflow/tensorflow | graph_optimizer_stage | tensorflow/core/grappler/optimizers/graph_optimizer_stage.cc | tensorflow/core/grappler/optimizers/graph_optimizer_stage_test.cc | #include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/core/graph/tensor_id.h"
namespace tensorflow {
namespace grappler {
const NodeScopeAndName ParseNodeScopeAndName(const string& node_name) {
auto pos = node_name.find_last_of('/');
if (pos == string::npos) {
return {"", node_name};
} else {
return {node_name.substr(0, pos), node_name.substr(pos + 1)};
}
};
Status GetInputNode(const GraphOptimizerContext& ctx, const string& input,
NodeDef** node) {
string node_name = NodeName(input);
NodeDef* node_by_name = ctx.node_map->GetNode(node_name);
if (node_by_name == nullptr) {
return errors::FailedPrecondition("Node ", node_name,
" doesn't exists in a node map");
}
*node = node_by_name;
return absl::OkStatus();
}
Status GetTensorProperties(const GraphOptimizerContext& ctx,
const string& tensor,
const OpInfo::TensorProperties** properties) {
if (ctx.graph_properties == nullptr) {
return errors::InvalidArgument("Graph properties are unknown.");
}
SafeTensorId tensor_id = ParseTensorName(tensor);
if (tensor_id.index() < 0) {
return errors::InvalidArgument(
"Can't get tensor properties of control dependency ", tensor);
}
const auto& output_properties =
ctx.graph_properties->GetOutputProperties(tensor_id.node());
int num_outputs = output_properties.size();
if (num_outputs == 0 || tensor_id.index() > num_outputs - 1) {
return errors::InvalidArgument(
"Node ", tensor_id.node(),
" is missing output properties at position :", tensor_id.index(),
" (num_outputs=", num_outputs, ")");
}
*properties = &output_properties[tensor_id.index()];
return absl::OkStatus();
}
NodeDef* AddCopyNode(const GraphOptimizerContext& ctx, const string& name,
const NodeDef* node_to_copy) {
CHECK(node_to_copy != nullptr);
CHECK(!ctx.node_map->NodeExists(name))
<< "Node " << name << " already exists in a graph";
NodeDef* new_node = ctx.optimized_graph->add_node();
*new_node = *node_to_copy;
new_node->set_name(name);
ctx.node_map->AddNode(name, new_node);
return new_node;
}
NodeDef* AddEmptyNode(const GraphOptimizerContext& ctx, const string& name) {
std::string new_name = name;
for (int count = 0; ctx.node_map->NodeExists(new_name); ++count) {
LOG(WARNING) << name << " already exists in the graph.";
new_name = absl::StrCat(name, "_", count);
}
NodeDef* new_node = ctx.optimized_graph->add_node();
new_node->set_name(new_name);
ctx.node_map->AddNode(new_name, new_node);
return new_node;
}
const string MakeOptimizedNodeName(const NodeScopeAndName& node,
const string& sub_scope,
const string& prefix) {
CHECK(!sub_scope.empty() || !prefix.empty())
<< "Either optimized node name prefix or sub-scope must be non-empty";
string optimized_node_name;
if (!node.scope.empty()) {
strings::StrAppend(&optimized_node_name, node.scope, "/");
}
if (!sub_scope.empty()) {
strings::StrAppend(&optimized_node_name, sub_scope, "/");
}
if (!prefix.empty()) {
strings::StrAppend(&optimized_node_name, prefix, "_");
}
strings::StrAppend(&optimized_node_name, node.name);
return optimized_node_name;
}
const string MakeOptimizedNodeName(const NodeScopeAndName& root,
const std::vector<string> node_names,
const string& sub_scope,
const string& prefix) {
string optimized_node_name = MakeOptimizedNodeName(root, sub_scope, prefix);
for (const string& node_name : node_names) {
auto name_and_scope = ParseNodeScopeAndName(node_name);
strings::StrAppend(&optimized_node_name, "_", name_and_scope.name);
}
return optimized_node_name;
}
}
} | #include "tensorflow/core/grappler/optimizers/graph_optimizer_stage.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
using ::tensorflow::test::function::GDef;
using ::tensorflow::test::function::NDef;
class GraphOptimizerStageTest : public ::testing::Test {};
struct FakeResult {};
class FakeOptimizerStage : public GraphOptimizerStage<FakeResult> {
public:
explicit FakeOptimizerStage(const string& optimizer_name,
const string& stage_name,
const GraphOptimizerContext& ctx)
: GraphOptimizerStage(optimizer_name, stage_name, ctx) {}
~FakeOptimizerStage() override = default;
bool IsSupported(const NodeDef* node) const override { return true; }
Status TrySimplify(NodeDef* node, FakeResult* result) override {
return absl::OkStatus();
}
};
TEST_F(GraphOptimizerStageTest, ParseNodeNameAndScopeInRoot) {
const auto scope_and_name = ParseNodeScopeAndName("Add");
EXPECT_EQ(scope_and_name.scope, "");
EXPECT_EQ(scope_and_name.name, "Add");
}
TEST_F(GraphOptimizerStageTest, ParseNodeNameAndScopeInScope) {
const auto scope_and_name = ParseNodeScopeAndName("a/b/c/Add");
EXPECT_EQ(scope_and_name.scope, "a/b/c");
EXPECT_EQ(scope_and_name.name, "Add");
}
TEST_F(GraphOptimizerStageTest, OptimizedNodeName) {
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
nullptr,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/Add");
EXPECT_EQ(stage.OptimizedNodeName(node), "a/b/c/my_opt/my_stg_Add");
EXPECT_EQ(stage.OptimizedNodeName(node, std::vector<string>({"Mul", "Sqrt"})),
"a/b/c/my_opt/my_stg_Add_Mul_Sqrt");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.OptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_Add");
}
TEST_F(GraphOptimizerStageTest, UniqueOptimizedNodeName) {
GraphDef graph =
GDef({NDef("a/b/c/A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A", "NotImportant", {})},
{});
NodeMap node_map(&graph);
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/A");
EXPECT_EQ(stage.UniqueOptimizedNodeName(node),
"a/b/c/my_opt/my_stg_A_unique0");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.UniqueOptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_A_unique1");
}
TEST_F(GraphOptimizerStageTest, UniqueOptimizedNodeNameWithUsedNodeNames) {
GraphDef graph = GDef(
{NDef("a/b/c/A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_A_unique0", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A", "NotImportant", {}),
NDef("a/b/c/my_opt/my_stg_my_rewrite_A_unique1", "NotImportant", {})},
{});
NodeMap node_map(&graph);
GraphOptimizerContext ctx( nullptr,
nullptr,
nullptr,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
const auto node = ParseNodeScopeAndName("a/b/c/A");
EXPECT_EQ(stage.UniqueOptimizedNodeName(node),
"a/b/c/my_opt/my_stg_A_unique1");
const string rewrite = "my_rewrite";
EXPECT_EQ(stage.UniqueOptimizedNodeName(node, rewrite),
"a/b/c/my_opt/my_stg_my_rewrite_A_unique2");
}
TEST_F(GraphOptimizerStageTest, GetInputNodeAndProperties) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add = ops::Add(s.WithOpName("Add"), a, b);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_CHECK_OK(properties.InferStatically( false));
NodeMap node_map(&item.graph);
GraphOptimizerContext ctx( nullptr,
&item.graph,
&properties,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
NodeDef* add_node;
TF_CHECK_OK(stage.GetInputNode("Add", &add_node));
ASSERT_EQ(add_node->input_size(), 2);
EXPECT_EQ(add_node->input(0), "a");
EXPECT_EQ(add_node->input(1), "b");
const OpInfo::TensorProperties* add_properties;
TF_CHECK_OK(stage.GetTensorProperties("Add", &add_properties));
EXPECT_EQ(add_properties->dtype(), DT_FLOAT);
const OpInfo::TensorProperties* a_properties;
TF_CHECK_OK(stage.GetTensorProperties("a:0", &a_properties));
EXPECT_EQ(a_properties->dtype(), DT_FLOAT_REF);
const OpInfo::TensorProperties* b_properties;
TF_CHECK_OK(stage.GetTensorProperties("b:0", &b_properties));
EXPECT_EQ(b_properties->dtype(), DT_FLOAT_REF);
}
TEST_F(GraphOptimizerStageTest, AddNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto a = ops::Variable(s.WithOpName("a"), {2, 2}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("b"), {2, 2}, DT_FLOAT);
auto add = ops::Add(s.WithOpName("Add"), a, b);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
GraphProperties properties(item);
TF_CHECK_OK(properties.InferStatically( false));
NodeMap node_map(&item.graph);
GraphOptimizerContext ctx( nullptr,
&item.graph,
&properties,
&node_map,
nullptr,
RewriterConfig::ON);
FakeOptimizerStage stage("my_opt", "my_stg", ctx);
NodeDef* add_node;
TF_CHECK_OK(stage.GetInputNode("Add", &add_node));
NodeDef* add_node_copy = stage.AddCopyNode("Add_1", add_node);
EXPECT_EQ(add_node_copy->name(), "Add_1");
EXPECT_EQ(add_node_copy->op(), "Add");
ASSERT_EQ(add_node->input_size(), 2);
EXPECT_EQ(add_node_copy->input(0), "a");
EXPECT_EQ(add_node_copy->input(1), "b");
NodeDef* add_node_copy_by_name;
TF_CHECK_OK(stage.GetInputNode("Add_1", &add_node_copy_by_name));
EXPECT_EQ(add_node_copy, add_node_copy_by_name);
NodeDef* empty_node = stage.AddEmptyNode("Add_2");
EXPECT_EQ(empty_node->name(), "Add_2");
EXPECT_EQ(empty_node->input_size(), 0);
NodeDef* empty_node_by_name;
TF_CHECK_OK(stage.GetInputNode("Add_2", &empty_node_by_name));
EXPECT_EQ(empty_node, empty_node_by_name);
NodeDef* unique_empty_node = stage.AddEmptyNode("Add_2");
EXPECT_EQ(unique_empty_node->name(), "Add_2_0");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/graph_optimizer_stage.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/graph_optimizer_stage_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b94443b4-6fb3-4c90-8ccc-d0cf952d9da3 | cpp | tensorflow/tensorflow | hlo_alias_analysis | third_party/xla/xla/service/hlo_alias_analysis.cc | third_party/xla/xla/service/hlo_alias_analysis_test.cc | #include "xla/service/hlo_alias_analysis.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/map_util.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
using absl::StrAppend;
namespace {
using FlatValueSet = absl::flat_hash_set<const HloValue*>;
void ComputeInputOutputAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
const HloModule& module = dataflow.module();
const HloComputation& entry_computation = *module.entry_computation();
const HloInputOutputAliasConfig& io_alias_config =
module.input_output_alias_config();
for (const HloPosition& pos : value.positions()) {
if (pos.instruction == entry_computation.root_instruction()) {
std::optional<HloInputOutputAliasConfig::Alias> aliased_input =
io_alias_config.GetAliasedParameter(pos.index);
if (aliased_input) {
aliased_values.insert(
&dataflow.GetUniqueValueAt(entry_computation.parameter_instruction(
aliased_input->parameter_number),
aliased_input->parameter_index));
}
}
}
}
void ComputeWhileAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute kWhile aliases";
for (const HloUse& use : value.GetUses()) {
if (use.instruction->opcode() == HloOpcode::kWhile) {
const HloValue& while_value =
dataflow.GetUniqueValueAt(use.instruction, use.operand_index);
aliased_values.insert(&while_value);
VLOG(3) << " value is init value to a while; must share buffer with "
"while value "
<< while_value;
}
}
if (value.defining_instruction()->opcode() == HloOpcode::kParameter) {
const HloComputation* computation = value.defining_instruction()->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1);
const HloValue& while_value = dataflow.GetUniqueValueAt(
callsite.instruction(), value.defining_index());
VLOG(3) << " value is parameter value of the body or condition of a "
"while; must share buffer with while value "
<< while_value;
aliased_values.insert(&while_value);
}
}
}
for (const HloPosition& position : value.positions()) {
if (!position.instruction->IsRoot()) continue;
const HloComputation* computation = position.instruction->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kWhile &&
callsite.instruction()->while_body() == computation) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1)
<< "Call graph must have been flattened.";
const HloValue& while_value =
dataflow.GetUniqueValueAt(callsite.instruction(), position.index);
VLOG(3) << " value @ " << position << " is root of "
<< callsite.instruction()->name()
<< "; body root and while value root must share buffer "
"among them: "
<< while_value;
aliased_values.insert(&while_value);
}
}
}
}
void ComputeConditionalAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute kConditional aliases";
for (const HloPosition& position : value.positions()) {
if (!position.instruction->IsRoot()) continue;
const HloComputation* computation = position.instruction->parent();
const CallGraphNode& call_graph_node =
dataflow.call_graph().GetNode(computation);
for (const CallSite& callsite : call_graph_node.caller_callsites()) {
if (callsite.instruction()->opcode() == HloOpcode::kConditional) {
CHECK_EQ(call_graph_node.caller_callsites().size(), 1);
const HloValue& cond_value =
dataflow.GetUniqueValueAt(callsite.instruction(), position.index);
VLOG(3) << " value @ " << position << " is root of "
<< callsite.instruction()->name()
<< "; branch computation roots must share buffer among them : "
<< cond_value;
aliased_values.insert(&cond_value);
}
}
}
}
void ComputeInPlaceOperationAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow,
FlatValueSet& aliased_values) {
VLOG(3) << "Compute aliases for in-place operations (e.g. "
"kDynamicUpdateSlice and kScatter)";
for (const HloPosition& position : value.positions()) {
HloInstruction* instruction = position.instruction;
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(instruction)) {
if (position.index == operand_and_output_index.second) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
const HloValue& operand_value = dataflow.GetUniqueValueAt(
instruction->operand(operand_index.operand_number),
operand_index.operand_index);
VLOG(3) << " operand value " << operand_value << " aliases.";
aliased_values.insert(&operand_value);
}
}
}
for (const HloUse& use : value.GetUses()) {
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(use.instruction)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (use.operand_number == operand_index.operand_number &&
use.operand_index == operand_index.operand_index) {
const HloValue& use_value = dataflow.GetUniqueValueAt(
use.instruction, operand_and_output_index.second);
VLOG(3) << " use value " << use_value << " aliases.";
aliased_values.insert(&use_value);
}
}
}
}
FlatValueSet ComputeAliasedValues(const HloValue& value,
const HloDataflowAnalysis& dataflow) {
if (VLOG_IS_ON(2)) {
for (const HloUse& use : value.GetUses()) {
VLOG(2) << "Use of value " << value << ": " << use;
}
}
FlatValueSet aliased_values{&value};
ComputeInputOutputAliasedValues(value, dataflow, aliased_values);
ComputeWhileAliasedValues(value, dataflow, aliased_values);
ComputeConditionalAliasedValues(value, dataflow, aliased_values);
ComputeInPlaceOperationAliasedValues(value, dataflow, aliased_values);
return aliased_values;
}
std::vector<HloBuffer> CreateBuffers(const HloDataflowAnalysis& dataflow) {
const std::vector<HloValue*>& values = dataflow.values();
size_t num_buffers = values.size();
std::vector<FlatValueSet> buffer_values(values.size());
absl::flat_hash_map<const HloValue*, FlatValueSet*> value_to_set;
value_to_set.reserve(values.size());
for (size_t i = 0; i < values.size(); ++i) {
buffer_values[i].insert(values[i]);
value_to_set[values[i]] = &buffer_values[i];
}
for (const HloValue* value : values) {
VLOG(3) << "Merging colocated values, value: " << *value;
FlatValueSet aliased_values = ComputeAliasedValues(*value, dataflow);
if (aliased_values.size() < 2) continue;
std::vector<std::pair<FlatValueSet*, HloValue::Id>> aliased_sets;
aliased_sets.reserve(aliased_values.size());
for (const HloValue* aliased : aliased_values) {
aliased_sets.push_back({value_to_set[aliased], aliased->id()});
}
auto key = [](const auto& set_and_id) {
return std::make_pair(set_and_id.first->size(), -set_and_id.second);
};
FlatValueSet* union_set =
absl::c_max_element(aliased_sets, LessThanByKey(key))->first;
for (auto& aliased_set_and_id : aliased_sets) {
FlatValueSet* aliased_set = aliased_set_and_id.first;
if ((aliased_set != union_set) && !aliased_set->empty()) {
for (const HloValue* aliased_value : *aliased_set) {
CHECK(union_set->insert(aliased_value).second);
value_to_set[aliased_value] = union_set;
}
aliased_set->clear();
--num_buffers;
}
}
}
std::vector<HloBuffer> buffers;
buffers.reserve(num_buffers);
for (const FlatValueSet& value_set : buffer_values) {
if (!value_set.empty()) {
HloBuffer::Id id = buffers.size();
buffers.push_back({id, HloValueSet(value_set).TakeValues()});
}
}
CHECK_EQ(buffers.size(), num_buffers);
return buffers;
}
}
HloAliasAnalysis::HloAliasAnalysis(const HloModule* module) : module_(module) {}
const HloBuffer& HloAliasAnalysis::GetUniqueBufferAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
std::vector<const HloBuffer*> buffers = ComputeBuffersAt(instruction, index);
CHECK_EQ(buffers.size(), 1);
return *buffers[0];
}
HloBuffer& HloAliasAnalysis::GetUniqueBufferAt(
const HloInstruction* instruction, const ShapeIndex& index) {
return GetBuffer(const_cast<const HloAliasAnalysis*>(this)
->GetUniqueBufferAt(instruction, index)
.id());
}
std::vector<const HloBuffer*> HloAliasAnalysis::ComputeBuffersAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
const HloValueSet& value_set =
dataflow_analysis_->GetValueSet(instruction, index);
std::vector<const HloBuffer*> buffers;
buffers.reserve(value_set.values().size());
for (const HloValue* value : value_set.values()) {
buffers.push_back(&GetBufferContainingValue(*value));
}
absl::c_sort(buffers, HloBuffer::IdLessThan);
buffers.erase(std::unique(buffers.begin(), buffers.end()), buffers.end());
return buffers;
}
absl::Status HloAliasAnalysis::Verify() const {
for (const auto& pair : value_to_buffer_) {
const HloValue* value = pair.first;
const HloBuffer& buffer = *pair.second;
TF_RET_CHECK(absl::c_linear_search(buffer.values(), value));
}
for (HloBuffer::Id id = 0; id < buffers_.size(); ++id) {
const HloBuffer& buffer = buffers_[id];
TF_RET_CHECK(buffer.id() == id);
HloValue::Id last_value_id = -1;
for (const HloValue* value : buffer.values()) {
TF_RET_CHECK(GetBufferContainingValue(*value) == buffer);
TF_RET_CHECK(value->id() > last_value_id);
last_value_id = value->id();
}
}
return absl::OkStatus();
}
std::string HloAliasAnalysis::ToString() const {
std::string out =
absl::StrCat("HloAliasAnalysis, module ", module_->name(), "\n");
StrAppend(&out, " Buffers at each position:\n");
for (const HloComputation* computation : module_->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
StrAppend(&out, " ", instruction->name(), ":\n");
if (instruction->shape().IsTuple()) {
ShapeUtil::ForEachSubshape(
instruction->shape(),
[&out, &instruction, this](const Shape&, const ShapeIndex& index) {
StrAppend(&out, " tuple index ", index.ToString(), ":\n");
for (const HloBuffer* buffer :
ComputeBuffersAt(instruction, index)) {
StrAppend(&out, " ", buffer->ToString(), "\n");
}
});
} else {
for (const HloBuffer* buffer :
ComputeBuffersAt(instruction, {})) {
StrAppend(&out, " ", buffer->ToString(), "\n");
}
}
}
}
StrAppend(&out, " Buffers:\n");
for (const HloBuffer& buffer : buffers()) {
StrAppend(&out, " ", buffer.ToString(), "\n");
StrAppend(&out, " positions:\n");
for (const HloPosition& position : buffer.ComputePositions()) {
StrAppend(&out, " ", position.ToString(), "\n");
}
}
return out;
}
absl::StatusOr<std::unique_ptr<HloAliasAnalysis>> HloAliasAnalysis::Run(
const HloModule* module,
const HloDataflowAnalysis::CanShareBuffer& can_share_buffer) {
VLOG(2) << "HloAliasAnalysis::Run on module " << module->name();
XLA_VLOG_LINES(2, module->ToString());
auto alias_analysis = absl::WrapUnique(new HloAliasAnalysis(module));
TF_ASSIGN_OR_RETURN(alias_analysis->dataflow_analysis_,
HloDataflowAnalysis::Run(*module, true,
false,
can_share_buffer));
size_t num_values = alias_analysis->dataflow_analysis_->values().size();
alias_analysis->buffers_ = CreateBuffers(alias_analysis->dataflow_analysis());
alias_analysis->value_to_buffer_.reserve(num_values);
for (HloBuffer& buffer : alias_analysis->buffers_) {
for (const HloValue* value : buffer.values()) {
alias_analysis->value_to_buffer_[value] = &buffer;
}
}
CHECK_EQ(alias_analysis->value_to_buffer_.size(), num_values);
TF_DCHECK_OK(alias_analysis->Verify());
HloInstruction* root = module->entry_computation()->root_instruction();
ShapeUtil::ForEachSubshape(root->shape(), [&](const Shape& ,
const ShapeIndex& index) {
std::vector<const HloBuffer*> buffers =
alias_analysis->ComputeBuffersAt(root, index);
alias_analysis->live_out_buffers_.insert(buffers.begin(), buffers.end());
});
XLA_VLOG_LINES(2, alias_analysis->ToString());
return std::move(alias_analysis);
}
} | #include "xla/service/hlo_alias_analysis.h"
#include <memory>
#include <set>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/flatten_call_graph.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class HloAliasAnalysisTest : public HloTestBase {
protected:
HloAliasAnalysisTest() : HloTestBase() {
module_ = CreateNewVerifiedModule();
}
HloAliasAnalysis& RunAnalysis() {
analysis_ = HloAliasAnalysis::Run(module_.get(),
nullptr)
.value();
return *analysis_;
}
std::vector<HloBuffer> GetBuffersAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
std::set<HloBuffer::Id> buffer_ids;
for (const HloValue* value : analysis_->dataflow_analysis()
.GetValueSet(instruction, index)
.values()) {
buffer_ids.insert(analysis_->GetBufferContainingValue(*value).id());
}
std::vector<HloBuffer> buffers;
buffers.reserve(buffer_ids.size());
for (HloBuffer::Id id : buffer_ids) {
buffers.push_back(analysis_->GetBuffer(id));
}
return buffers;
}
const HloValue& GetValueDefinedAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
return analysis_->dataflow_analysis().GetValueDefinedAt(instruction, index);
}
bool AnyValuesInSameBufferInterfere() {
DependencyHloOrdering ordering(module_.get());
for (const HloBuffer& buffer : analysis_->buffers()) {
for (const HloValue* value_a : buffer.values()) {
for (const HloValue* value_b : buffer.values()) {
if (*value_a != *value_b &&
ordering.MayInterfere(*value_a, *value_b,
analysis_->dataflow_analysis())) {
VLOG(1) << *value_a << " interferes with " << *value_b
<< " in buffer: " << buffer;
return true;
}
}
}
}
return false;
}
bool InstructionBuffersAreAmbiguous(const HloInstruction* instruction) const {
for (const auto& pair :
analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) {
const HloValueSet& value_set = pair.second;
const HloBuffer* buffer = nullptr;
for (const HloValue* value : value_set.values()) {
if (buffer == nullptr) {
buffer = &analysis_->GetBufferContainingValue(*value);
} else if (buffer != &analysis_->GetBufferContainingValue(*value)) {
return true;
}
}
}
return false;
}
bool InstructionBuffersAreDistinct(const HloInstruction* instruction) const {
absl::flat_hash_set<const HloBuffer*> buffers_seen;
for (const auto& pair :
analysis_->dataflow_analysis().GetInstructionValueSet(instruction)) {
const HloValueSet& value_set = pair.second;
absl::flat_hash_set<const HloBuffer*> buffers_at_this_index;
for (const HloValue* value : value_set.values()) {
buffers_at_this_index.insert(
&analysis_->GetBufferContainingValue(*value));
}
buffers_seen.merge(buffers_at_this_index);
if (!buffers_at_this_index.empty()) return false;
}
return true;
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloAliasAnalysis> analysis_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloAliasAnalysisTest, BinaryOperation) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, constant1, constant2));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 3);
for (const HloInstruction* instruction : {constant1, constant2, add}) {
EXPECT_EQ(analysis.GetUniqueBufferAt(instruction).GetUniqueValue(),
GetValueDefinedAt(instruction));
}
EXPECT_FALSE(InstructionBuffersAreAmbiguous(add));
EXPECT_TRUE(InstructionBuffersAreDistinct(add));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, TupleAndGtes) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({param0, param1}));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, tuple, 1));
builder.AddInstruction(
HloInstruction::CreateBinary(scalar_shape_, HloOpcode::kAdd, gte0, gte1));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 4);
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}).GetUniqueValue(),
GetValueDefinedAt(tuple, {}));
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {0}).GetUniqueValue(),
GetValueDefinedAt(param0));
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {1}).GetUniqueValue(),
GetValueDefinedAt(param1));
EXPECT_EQ(analysis.GetUniqueBufferAt(param0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(param0),
analysis.GetUniqueBufferAt(gte0));
EXPECT_THAT(
analysis.GetUniqueBufferAt(param0).ComputePositions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{gte0, {}}));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple));
EXPECT_TRUE(InstructionBuffersAreDistinct(tuple));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, NondistinctTuple) {
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({param0, param1, param0}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.GetUniqueBufferAt(param0).ComputePositions(),
UnorderedElementsAre(HloPosition{param0, {}}, HloPosition{tuple, {0}},
HloPosition{tuple, {2}}));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(tuple));
EXPECT_FALSE(InstructionBuffersAreDistinct(tuple));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, ParametersWithAliasing) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {0}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {1}));
}
TEST_F(HloAliasAnalysisTest, ParametersWithCrossAliasing) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {1}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {0}));
ASSERT_IS_NOT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte0),
analysis.GetUniqueBufferAt(tuple, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(gte1),
analysis.GetUniqueBufferAt(tuple, {1}));
}
TEST_F(HloAliasAnalysisTest, InputOutputAliasingWithWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_tuple = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, param));
auto while_element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 0));
auto while_element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, xla_while, 1));
auto negate_1 = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, while_element_1));
auto negate_2 = builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, while_element_2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({negate_1, negate_2}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(),
UnorderedElementsAre(&GetValueDefinedAt(param, {1}),
&GetValueDefinedAt(xla_while, {1}),
&GetValueDefinedAt(body_param, {1}),
&GetValueDefinedAt(cond_param, {1}),
&GetValueDefinedAt(add),
&GetValueDefinedAt(negate_2)));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(),
UnorderedElementsAre(
HloPosition{param, {1}}, HloPosition{xla_while, {1}},
HloPosition{while_element_2, {}}, HloPosition{body_param, {1}},
HloPosition{body_element_1, {}}, HloPosition{add, {}},
HloPosition{body_tuple, {1}}, HloPosition{tuple, {1}},
HloPosition{cond_param, {1}}, HloPosition{negate_2, {}}));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SingleCall) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(),
UnorderedElementsAre(HloPosition{constant1, {}},
HloPosition{subparam0, {}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(),
UnorderedElementsAre(HloPosition{constant2, {}},
HloPosition{subparam1, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(add).ComputePositions(),
UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call, {}}));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, ComputationCalledTwice) {
auto subbuilder = HloComputation::Builder("Subcomputation");
auto subparam0 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param0"));
auto subparam1 = subbuilder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "param1"));
auto add = subbuilder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, subparam0, subparam1));
HloComputation* called_computation =
module_->AddEmbeddedComputation(subbuilder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto call1 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {constant1, constant2}, called_computation));
auto call2 = builder.AddInstruction(HloInstruction::CreateCall(
scalar_shape_, {call1, constant2}, called_computation));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(analysis.GetUniqueBufferAt(constant1).ComputePositions(),
UnorderedElementsAre(HloPosition{constant1, {}},
HloPosition{subparam0, {}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(constant2).ComputePositions(),
UnorderedElementsAre(HloPosition{constant2, {}},
HloPosition{subparam1, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(add).ComputePositions(),
UnorderedElementsAre(HloPosition{add, {}}, HloPosition{call1, {}},
HloPosition{subparam0, {}}, HloPosition{call2, {}}));
EXPECT_THAT(GetBuffersAt(subparam0),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(add)));
EXPECT_THAT(GetBuffersAt(subparam1),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant2)));
EXPECT_TRUE(InstructionBuffersAreAmbiguous(subparam0));
EXPECT_FALSE(InstructionBuffersAreAmbiguous(subparam1));
EXPECT_TRUE(InstructionBuffersAreDistinct(subparam0));
EXPECT_TRUE(InstructionBuffersAreDistinct(subparam1));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SingleWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
auto body_tuple = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
auto cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {}).ComputePositions(),
UnorderedElementsAre(HloPosition{tuple, {}}, HloPosition{xla_while, {}},
HloPosition{body_param, {}},
HloPosition{body_tuple, {}},
HloPosition{cond_param, {}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {0}).ComputePositions(),
UnorderedElementsAre(
HloPosition{constant1, {}}, HloPosition{tuple, {0}},
HloPosition{xla_while, {0}}, HloPosition{body_param, {0}},
HloPosition{body_element_0, {}}, HloPosition{body_tuple, {0}},
HloPosition{cond_param, {0}}));
EXPECT_THAT(
analysis.GetUniqueBufferAt(xla_while, {1}).ComputePositions(),
UnorderedElementsAre(
HloPosition{constant2, {}}, HloPosition{tuple, {1}},
HloPosition{xla_while, {1}}, HloPosition{body_param, {1}},
HloPosition{body_element_1, {}}, HloPosition{add, {}},
HloPosition{body_tuple, {1}}, HloPosition{cond_param, {1}}));
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {0}).values(),
UnorderedElementsAre(&GetValueDefinedAt(constant1)));
EXPECT_THAT(analysis.GetUniqueBufferAt(xla_while, {1}).values(),
UnorderedElementsAre(&GetValueDefinedAt(constant2),
&GetValueDefinedAt(xla_while, {1}),
&GetValueDefinedAt(body_param, {1}),
&GetValueDefinedAt(cond_param, {1}),
&GetValueDefinedAt(add)));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SequentialWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto add = body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, body_element_0, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_0, add}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while0 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
auto xla_while1 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while0));
auto xla_while2 = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, xla_while1));
module_->AddEntryComputation(builder.Build());
FlattenCallGraph flattener;
TF_ASSERT_OK(flattener.Run(module_.get()).status());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(tuple, {}),
analysis.GetUniqueBufferAt(xla_while2, {}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(xla_while2, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(xla_while2, {1}));
}
TEST_F(HloAliasAnalysisTest, NestedWhiles) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto build_cond_computation = [&tuple_shape]() {
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
return cond_builder.Build();
};
HloComputation* condition1 =
module_->AddEmbeddedComputation(build_cond_computation());
HloComputation* condition2 =
module_->AddEmbeddedComputation(build_cond_computation());
auto inner_builder = HloComputation::Builder("inner_body");
auto inner_param = inner_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto inner_element_0 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 0));
auto inner_element_1 = inner_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, inner_param, 1));
auto add = inner_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, inner_element_0, inner_element_1));
inner_builder.AddInstruction(
HloInstruction::CreateTuple({inner_element_0, add}));
HloComputation* inner_body =
module_->AddEmbeddedComputation(inner_builder.Build());
auto outer_builder = HloComputation::Builder("outer_body");
auto outer_param = outer_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto outer_element_0 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 0));
auto negate = outer_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, outer_element_0));
auto outer_element_1 = outer_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, outer_param, 1));
auto outer_tuple = outer_builder.AddInstruction(
HloInstruction::CreateTuple({negate, outer_element_1}));
auto nested_while = outer_builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, condition1, inner_body, outer_tuple));
HloComputation* outer_body =
module_->AddEmbeddedComputation(outer_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto entry_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition2, outer_body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(entry_while, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(nested_while, {0}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(inner_element_0));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(entry_while, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(nested_while, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant2),
analysis.GetUniqueBufferAt(inner_element_1));
EXPECT_FALSE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, SwizzlingWhile) {
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto body_element_2 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 2));
body_builder.AddInstruction(HloInstruction::CreateTuple(
{body_element_1, body_element_2, body_element_0}));
HloComputation* body = module_->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module_->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2, constant3}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(tuple_shape, condition, body, tuple));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_THAT(
analysis.buffers(),
UnorderedElementsAre(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(tuple, {}),
analysis.GetUniqueBufferAt(cond_constant)));
EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}),
analysis.GetUniqueBufferAt(xla_while, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}),
analysis.GetUniqueBufferAt(xla_while, {2}));
EXPECT_EQ(analysis.GetUniqueBufferAt(xla_while, {0}),
analysis.GetUniqueBufferAt(constant1));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(constant2));
EXPECT_EQ(analysis.GetUniqueBufferAt(constant1),
analysis.GetUniqueBufferAt(constant3));
EXPECT_TRUE(AnyValuesInSameBufferInterfere());
}
TEST_F(HloAliasAnalysisTest, Bitcast) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(scalar_shape_, constant));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
const HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.buffers().size(), 1);
EXPECT_EQ(analysis.GetUniqueBufferAt(constant),
analysis.GetUniqueBufferAt(bitcast));
}
TEST_F(HloAliasAnalysisTest, DynamicUpdateSlice) {
Shape shape = ShapeUtil::MakeShape(F32, {8});
Shape update_shape = ShapeUtil::MakeShape(F32, {4});
Shape index_shape = ShapeUtil::MakeShape(S32, {});
auto builder = HloComputation::Builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, update_shape, "param1"));
auto param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, index_shape, "param2"));
auto copy0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopy, param0));
auto dynamic_update_slice = builder.AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(shape, copy0, param1, {param2}));
module_->AddEntryComputation(builder.Build());
SCOPED_TRACE(module_->ToString());
HloAliasAnalysis& analysis = RunAnalysis();
EXPECT_EQ(analysis.GetUniqueBufferAt(copy0),
analysis.GetUniqueBufferAt(dynamic_update_slice));
}
TEST_F(HloAliasAnalysisTest, DynamicUpdateSliceMultiOutputFusion) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
param2 = f32[1280,1,128] parameter(2)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add.1 = f32[1280,1,128] add(param0, param0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
negate1 = f32[1280,1,128] negate(param)
negate2 = f32[1280,1,128] negate(param)
ROOT fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
SCOPED_TRACE(module_->ToString());
HloAliasAnalysis& analysis = RunAnalysis();
LOG(INFO) << analysis.ToString();
const HloInstruction* fusion =
module_->entry_computation()->GetInstructionWithName("fusion");
const HloInstruction* negate0 =
module_->entry_computation()->GetInstructionWithName("negate0");
const HloInstruction* negate1 =
module_->entry_computation()->GetInstructionWithName("negate1");
const HloInstruction* negate2 =
module_->entry_computation()->GetInstructionWithName("negate2");
EXPECT_EQ(analysis.GetUniqueBufferAt(negate1),
analysis.GetUniqueBufferAt(fusion, {1}));
EXPECT_EQ(analysis.GetUniqueBufferAt(negate2),
analysis.GetUniqueBufferAt(fusion, {2}));
EXPECT_NE(analysis.GetUniqueBufferAt(negate0),
analysis.GetUniqueBufferAt(fusion, {0}));
}
TEST_F(HloAliasAnalysisTest, ChainedDynamicUpdateSliceFusion) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
ROOT dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
ROOT fusion = f32[1280,1,128] fusion(negate0), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
SCOPED_TRACE(module_->ToString());
HloAliasAnalysis& analysis = RunAnalysis();
LOG(INFO) << analysis.ToString();
const HloInstruction* fusion =
module_->entry_computation()->GetInstructionWithName("fusion");
const HloInstruction* negate0 =
module_->entry_computation()->GetInstructionWithName("negate0");
EXPECT_NE(analysis.GetUniqueBufferAt(negate0),
analysis.GetUniqueBufferAt(fusion));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_alias_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_alias_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f3262866-5969-4226-8165-e5f8fb7d8624 | cpp | abseil/abseil-cpp | time_zone_lookup | absl/time/internal/cctz/src/time_zone_lookup.cc | absl/time/internal/cctz/src/time_zone_lookup_test.cc | #include "absl/base/config.h"
#include "absl/time/internal/cctz/include/cctz/time_zone.h"
#if defined(__ANDROID__)
#include <sys/system_properties.h>
#endif
#if defined(__APPLE__)
#include <CoreFoundation/CFTimeZone.h>
#include <vector>
#endif
#if defined(__Fuchsia__)
#include <fuchsia/intl/cpp/fidl.h>
#include <lib/async-loop/cpp/loop.h>
#include <lib/fdio/directory.h>
#include <zircon/types.h>
#endif
#if defined(_WIN32)
#include <sdkddkver.h>
#if ((defined(_WIN32_WINNT_WIN10) && !defined(__MINGW32__)) || \
(defined(NTDDI_WIN10_NI) && NTDDI_VERSION >= NTDDI_WIN10_NI)) && \
(_WIN32_WINNT >= _WIN32_WINNT_WINXP)
#define USE_WIN32_LOCAL_TIME_ZONE
#include <roapi.h>
#include <tchar.h>
#include <wchar.h>
#include <windows.globalization.h>
#include <windows.h>
#include <winstring.h>
#endif
#endif
#include <cstdlib>
#include <cstring>
#include <string>
#include "time_zone_fixed.h"
#include "time_zone_impl.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
namespace {
#if defined(USE_WIN32_LOCAL_TIME_ZONE)
std::string win32_local_time_zone(const HMODULE combase) {
std::string result;
const auto ro_activate_instance =
reinterpret_cast<decltype(&RoActivateInstance)>(
GetProcAddress(combase, "RoActivateInstance"));
if (!ro_activate_instance) {
return result;
}
const auto windows_create_string_reference =
reinterpret_cast<decltype(&WindowsCreateStringReference)>(
GetProcAddress(combase, "WindowsCreateStringReference"));
if (!windows_create_string_reference) {
return result;
}
const auto windows_delete_string =
reinterpret_cast<decltype(&WindowsDeleteString)>(
GetProcAddress(combase, "WindowsDeleteString"));
if (!windows_delete_string) {
return result;
}
const auto windows_get_string_raw_buffer =
reinterpret_cast<decltype(&WindowsGetStringRawBuffer)>(
GetProcAddress(combase, "WindowsGetStringRawBuffer"));
if (!windows_get_string_raw_buffer) {
return result;
}
HSTRING calendar_class_id;
HSTRING_HEADER calendar_class_id_header;
HRESULT hr = windows_create_string_reference(
RuntimeClass_Windows_Globalization_Calendar,
sizeof(RuntimeClass_Windows_Globalization_Calendar) / sizeof(wchar_t) - 1,
&calendar_class_id_header, &calendar_class_id);
if (FAILED(hr)) {
return result;
}
IInspectable* calendar;
hr = ro_activate_instance(calendar_class_id, &calendar);
if (FAILED(hr)) {
return result;
}
ABI::Windows::Globalization::ITimeZoneOnCalendar* time_zone;
hr = calendar->QueryInterface(IID_PPV_ARGS(&time_zone));
if (FAILED(hr)) {
calendar->Release();
return result;
}
HSTRING tz_hstr;
hr = time_zone->GetTimeZone(&tz_hstr);
if (SUCCEEDED(hr)) {
UINT32 wlen;
const PCWSTR tz_wstr = windows_get_string_raw_buffer(tz_hstr, &wlen);
if (tz_wstr) {
const int size =
WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
nullptr, 0, nullptr, nullptr);
result.resize(static_cast<size_t>(size));
WideCharToMultiByte(CP_UTF8, 0, tz_wstr, static_cast<int>(wlen),
&result[0], size, nullptr, nullptr);
}
windows_delete_string(tz_hstr);
}
time_zone->Release();
calendar->Release();
return result;
}
#endif
}
std::string time_zone::name() const { return effective_impl().Name(); }
time_zone::absolute_lookup time_zone::lookup(
const time_point<seconds>& tp) const {
return effective_impl().BreakTime(tp);
}
time_zone::civil_lookup time_zone::lookup(const civil_second& cs) const {
return effective_impl().MakeTime(cs);
}
bool time_zone::next_transition(const time_point<seconds>& tp,
civil_transition* trans) const {
return effective_impl().NextTransition(tp, trans);
}
bool time_zone::prev_transition(const time_point<seconds>& tp,
civil_transition* trans) const {
return effective_impl().PrevTransition(tp, trans);
}
std::string time_zone::version() const { return effective_impl().Version(); }
std::string time_zone::description() const {
return effective_impl().Description();
}
const time_zone::Impl& time_zone::effective_impl() const {
if (impl_ == nullptr) {
return *time_zone::Impl::UTC().impl_;
}
return *impl_;
}
bool load_time_zone(const std::string& name, time_zone* tz) {
return time_zone::Impl::LoadTimeZone(name, tz);
}
time_zone utc_time_zone() {
return time_zone::Impl::UTC();
}
time_zone fixed_time_zone(const seconds& offset) {
time_zone tz;
load_time_zone(FixedOffsetToName(offset), &tz);
return tz;
}
time_zone local_time_zone() {
const char* zone = ":localtime";
#if defined(__ANDROID__)
char sysprop[PROP_VALUE_MAX];
if (__system_property_get("persist.sys.timezone", sysprop) > 0) {
zone = sysprop;
}
#endif
#if defined(__APPLE__)
std::vector<char> buffer;
CFTimeZoneRef tz_default = CFTimeZoneCopyDefault();
if (CFStringRef tz_name = CFTimeZoneGetName(tz_default)) {
CFStringEncoding encoding = kCFStringEncodingUTF8;
CFIndex length = CFStringGetLength(tz_name);
CFIndex max_size = CFStringGetMaximumSizeForEncoding(length, encoding) + 1;
buffer.resize(static_cast<size_t>(max_size));
if (CFStringGetCString(tz_name, &buffer[0], max_size, encoding)) {
zone = &buffer[0];
}
}
CFRelease(tz_default);
#endif
#if defined(__Fuchsia__)
std::string primary_tz;
[&]() {
const zx::duration kTimeout = zx::msec(500);
async::Loop loop(&kAsyncLoopConfigNeverAttachToThread);
fuchsia::intl::PropertyProviderHandle handle;
zx_status_t status = fdio_service_connect_by_name(
fuchsia::intl::PropertyProvider::Name_,
handle.NewRequest().TakeChannel().release());
if (status != ZX_OK) {
return;
}
fuchsia::intl::PropertyProviderPtr intl_provider;
status = intl_provider.Bind(std::move(handle), loop.dispatcher());
if (status != ZX_OK) {
return;
}
intl_provider->GetProfile(
[&loop, &primary_tz](fuchsia::intl::Profile profile) {
if (!profile.time_zones().empty()) {
primary_tz = profile.time_zones()[0].id;
}
loop.Quit();
});
loop.Run(zx::deadline_after(kTimeout));
}();
if (!primary_tz.empty()) {
zone = primary_tz.c_str();
}
#endif
#if defined(USE_WIN32_LOCAL_TIME_ZONE)
std::string winrt_tz;
const HMODULE combase =
LoadLibraryEx(_T("combase.dll"), nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
if (combase) {
const auto ro_initialize = reinterpret_cast<decltype(&::RoInitialize)>(
GetProcAddress(combase, "RoInitialize"));
const auto ro_uninitialize = reinterpret_cast<decltype(&::RoUninitialize)>(
GetProcAddress(combase, "RoUninitialize"));
if (ro_initialize && ro_uninitialize) {
const HRESULT hr = ro_initialize(RO_INIT_MULTITHREADED);
if (SUCCEEDED(hr) || hr == RPC_E_CHANGED_MODE) {
winrt_tz = win32_local_time_zone(combase);
if (SUCCEEDED(hr)) {
ro_uninitialize();
}
}
}
FreeLibrary(combase);
}
if (!winrt_tz.empty()) {
zone = winrt_tz.c_str();
}
#endif
char* tz_env = nullptr;
#if defined(_MSC_VER)
_dupenv_s(&tz_env, nullptr, "TZ");
#else
tz_env = std::getenv("TZ");
#endif
if (tz_env) zone = tz_env;
if (*zone == ':') ++zone;
char* localtime_env = nullptr;
if (strcmp(zone, "localtime") == 0) {
#if defined(_MSC_VER)
_dupenv_s(&localtime_env, nullptr, "LOCALTIME");
#else
zone = "/etc/localtime";
localtime_env = std::getenv("LOCALTIME");
#endif
if (localtime_env) zone = localtime_env;
}
const std::string name = zone;
#if defined(_MSC_VER)
free(localtime_env);
free(tz_env);
#endif
time_zone tz;
load_time_zone(name, &tz);
return tz;
}
}
}
ABSL_NAMESPACE_END
} | #include <chrono>
#include <cstddef>
#include <cstdlib>
#include <future>
#include <limits>
#include <string>
#include <thread>
#include <vector>
#include "absl/base/config.h"
#include "absl/time/internal/cctz/include/cctz/time_zone.h"
#if defined(__linux__)
#include <features.h>
#endif
#include "gtest/gtest.h"
#include "absl/time/internal/cctz/include/cctz/civil_time.h"
namespace chrono = std::chrono;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace time_internal {
namespace cctz {
namespace {
const char* const kTimeZoneNames[] = {"Africa/Abidjan",
"Africa/Accra",
"Africa/Addis_Ababa",
"Africa/Algiers",
"Africa/Asmara",
"Africa/Bamako",
"Africa/Bangui",
"Africa/Banjul",
"Africa/Bissau",
"Africa/Blantyre",
"Africa/Brazzaville",
"Africa/Bujumbura",
"Africa/Cairo",
"Africa/Casablanca",
"Africa/Ceuta",
"Africa/Conakry",
"Africa/Dakar",
"Africa/Dar_es_Salaam",
"Africa/Djibouti",
"Africa/Douala",
"Africa/El_Aaiun",
"Africa/Freetown",
"Africa/Gaborone",
"Africa/Harare",
"Africa/Johannesburg",
"Africa/Juba",
"Africa/Kampala",
"Africa/Khartoum",
"Africa/Kigali",
"Africa/Kinshasa",
"Africa/Lagos",
"Africa/Libreville",
"Africa/Lome",
"Africa/Luanda",
"Africa/Lubumbashi",
"Africa/Lusaka",
"Africa/Malabo",
"Africa/Maputo",
"Africa/Maseru",
"Africa/Mbabane",
"Africa/Mogadishu",
"Africa/Monrovia",
"Africa/Nairobi",
"Africa/Ndjamena",
"Africa/Niamey",
"Africa/Nouakchott",
"Africa/Ouagadougou",
"Africa/Porto-Novo",
"Africa/Sao_Tome",
"Africa/Timbuktu",
"Africa/Tripoli",
"Africa/Tunis",
"Africa/Windhoek",
"America/Adak",
"America/Anchorage",
"America/Anguilla",
"America/Antigua",
"America/Araguaina",
"America/Argentina/Buenos_Aires",
"America/Argentina/Catamarca",
"America/Argentina/Cordoba",
"America/Argentina/Jujuy",
"America/Argentina/La_Rioja",
"America/Argentina/Mendoza",
"America/Argentina/Rio_Gallegos",
"America/Argentina/Salta",
"America/Argentina/San_Juan",
"America/Argentina/San_Luis",
"America/Argentina/Tucuman",
"America/Argentina/Ushuaia",
"America/Aruba",
"America/Asuncion",
"America/Atikokan",
"America/Atka",
"America/Bahia",
"America/Bahia_Banderas",
"America/Barbados",
"America/Belem",
"America/Belize",
"America/Blanc-Sablon",
"America/Boa_Vista",
"America/Bogota",
"America/Boise",
"America/Cambridge_Bay",
"America/Campo_Grande",
"America/Cancun",
"America/Caracas",
"America/Cayenne",
"America/Cayman",
"America/Chicago",
"America/Chihuahua",
"America/Ciudad_Juarez",
"America/Coral_Harbour",
"America/Costa_Rica",
"America/Creston",
"America/Cuiaba",
"America/Curacao",
"America/Danmarkshavn",
"America/Dawson",
"America/Dawson_Creek",
"America/Denver",
"America/Detroit",
"America/Dominica",
"America/Edmonton",
"America/Eirunepe",
"America/El_Salvador",
"America/Ensenada",
"America/Fort_Nelson",
"America/Fortaleza",
"America/Glace_Bay",
"America/Godthab",
"America/Goose_Bay",
"America/Grand_Turk",
"America/Grenada",
"America/Guadeloupe",
"America/Guatemala",
"America/Guayaquil",
"America/Guyana",
"America/Halifax",
"America/Havana",
"America/Hermosillo",
"America/Indiana/Indianapolis",
"America/Indiana/Knox",
"America/Indiana/Marengo",
"America/Indiana/Petersburg",
"America/Indiana/Tell_City",
"America/Indiana/Vevay",
"America/Indiana/Vincennes",
"America/Indiana/Winamac",
"America/Inuvik",
"America/Iqaluit",
"America/Jamaica",
"America/Juneau",
"America/Kentucky/Louisville",
"America/Kentucky/Monticello",
"America/Kralendijk",
"America/La_Paz",
"America/Lima",
"America/Los_Angeles",
"America/Lower_Princes",
"America/Maceio",
"America/Managua",
"America/Manaus",
"America/Marigot",
"America/Martinique",
"America/Matamoros",
"America/Mazatlan",
"America/Menominee",
"America/Merida",
"America/Metlakatla",
"America/Mexico_City",
"America/Miquelon",
"America/Moncton",
"America/Monterrey",
"America/Montevideo",
"America/Montreal",
"America/Montserrat",
"America/Nassau",
"America/New_York",
"America/Nipigon",
"America/Nome",
"America/Noronha",
"America/North_Dakota/Beulah",
"America/North_Dakota/Center",
"America/North_Dakota/New_Salem",
"America/Nuuk",
"America/Ojinaga",
"America/Panama",
"America/Pangnirtung",
"America/Paramaribo",
"America/Phoenix",
"America/Port-au-Prince",
"America/Port_of_Spain",
"America/Porto_Acre",
"America/Porto_Velho",
"America/Puerto_Rico",
"America/Punta_Arenas",
"America/Rainy_River",
"America/Rankin_Inlet",
"America/Recife",
"America/Regina",
"America/Resolute",
"America/Rio_Branco",
"America/Santa_Isabel",
"America/Santarem",
"America/Santiago",
"America/Santo_Domingo",
"America/Sao_Paulo",
"America/Scoresbysund",
"America/Shiprock",
"America/Sitka",
"America/St_Barthelemy",
"America/St_Johns",
"America/St_Kitts",
"America/St_Lucia",
"America/St_Thomas",
"America/St_Vincent",
"America/Swift_Current",
"America/Tegucigalpa",
"America/Thule",
"America/Thunder_Bay",
"America/Tijuana",
"America/Toronto",
"America/Tortola",
"America/Vancouver",
"America/Virgin",
"America/Whitehorse",
"America/Winnipeg",
"America/Yakutat",
"America/Yellowknife",
"Antarctica/Casey",
"Antarctica/Davis",
"Antarctica/DumontDUrville",
"Antarctica/Macquarie",
"Antarctica/Mawson",
"Antarctica/McMurdo",
"Antarctica/Palmer",
"Antarctica/Rothera",
"Antarctica/Syowa",
"Antarctica/Troll",
"Antarctica/Vostok",
"Arctic/Longyearbyen",
"Asia/Aden",
"Asia/Almaty",
"Asia/Amman",
"Asia/Anadyr",
"Asia/Aqtau",
"Asia/Aqtobe",
"Asia/Ashgabat",
"Asia/Atyrau",
"Asia/Baghdad",
"Asia/Bahrain",
"Asia/Baku",
"Asia/Bangkok",
"Asia/Barnaul",
"Asia/Beirut",
"Asia/Bishkek",
"Asia/Brunei",
"Asia/Chita",
"Asia/Choibalsan",
"Asia/Chongqing",
"Asia/Colombo",
"Asia/Damascus",
"Asia/Dhaka",
"Asia/Dili",
"Asia/Dubai",
"Asia/Dushanbe",
"Asia/Famagusta",
"Asia/Gaza",
"Asia/Harbin",
"Asia/Hebron",
"Asia/Ho_Chi_Minh",
"Asia/Hong_Kong",
"Asia/Hovd",
"Asia/Irkutsk",
"Asia/Istanbul",
"Asia/Jakarta",
"Asia/Jayapura",
"Asia/Jerusalem",
"Asia/Kabul",
"Asia/Kamchatka",
"Asia/Karachi",
"Asia/Kashgar",
"Asia/Kathmandu",
"Asia/Khandyga",
"Asia/Kolkata",
"Asia/Krasnoyarsk",
"Asia/Kuala_Lumpur",
"Asia/Kuching",
"Asia/Kuwait",
"Asia/Macau",
"Asia/Magadan",
"Asia/Makassar",
"Asia/Manila",
"Asia/Muscat",
"Asia/Nicosia",
"Asia/Novokuznetsk",
"Asia/Novosibirsk",
"Asia/Omsk",
"Asia/Oral",
"Asia/Phnom_Penh",
"Asia/Pontianak",
"Asia/Pyongyang",
"Asia/Qatar",
"Asia/Qostanay",
"Asia/Qyzylorda",
"Asia/Riyadh",
"Asia/Sakhalin",
"Asia/Samarkand",
"Asia/Seoul",
"Asia/Shanghai",
"Asia/Singapore",
"Asia/Srednekolymsk",
"Asia/Taipei",
"Asia/Tashkent",
"Asia/Tbilisi",
"Asia/Tehran",
"Asia/Tel_Aviv",
"Asia/Thimphu",
"Asia/Tokyo",
"Asia/Tomsk",
"Asia/Ulaanbaatar",
"Asia/Urumqi",
"Asia/Ust-Nera",
"Asia/Vientiane",
"Asia/Vladivostok",
"Asia/Yakutsk",
"Asia/Yangon",
"Asia/Yekaterinburg",
"Asia/Yerevan",
"Atlantic/Azores",
"Atlantic/Bermuda",
"Atlantic/Canary",
"Atlantic/Cape_Verde",
"Atlantic/Faroe",
"Atlantic/Jan_Mayen",
"Atlantic/Madeira",
"Atlantic/Reykjavik",
"Atlantic/South_Georgia",
"Atlantic/St_Helena",
"Atlantic/Stanley",
"Australia/Adelaide",
"Australia/Brisbane",
"Australia/Broken_Hill",
"Australia/Canberra",
"Australia/Currie",
"Australia/Darwin",
"Australia/Eucla",
"Australia/Hobart",
"Australia/Lindeman",
"Australia/Lord_Howe",
"Australia/Melbourne",
"Australia/Perth",
"Australia/Sydney",
"Australia/Yancowinna",
"Etc/GMT",
"Etc/GMT+0",
"Etc/GMT+1",
"Etc/GMT+10",
"Etc/GMT+11",
"Etc/GMT+12",
"Etc/GMT+2",
"Etc/GMT+3",
"Etc/GMT+4",
"Etc/GMT+5",
"Etc/GMT+6",
"Etc/GMT+7",
"Etc/GMT+8",
"Etc/GMT+9",
"Etc/GMT-0",
"Etc/GMT-1",
"Etc/GMT-10",
"Etc/GMT-11",
"Etc/GMT-12",
"Etc/GMT-13",
"Etc/GMT-14",
"Etc/GMT-2",
"Etc/GMT-3",
"Etc/GMT-4",
"Etc/GMT-5",
"Etc/GMT-6",
"Etc/GMT-7",
"Etc/GMT-8",
"Etc/GMT-9",
"Etc/GMT0",
"Etc/Greenwich",
"Etc/UCT",
"Etc/UTC",
"Etc/Universal",
"Etc/Zulu",
"Europe/Amsterdam",
"Europe/Andorra",
"Europe/Astrakhan",
"Europe/Athens",
"Europe/Belfast",
"Europe/Belgrade",
"Europe/Berlin",
"Europe/Bratislava",
"Europe/Brussels",
"Europe/Bucharest",
"Europe/Budapest",
"Europe/Busingen",
"Europe/Chisinau",
"Europe/Copenhagen",
"Europe/Dublin",
"Europe/Gibraltar",
"Europe/Guernsey",
"Europe/Helsinki",
"Europe/Isle_of_Man",
"Europe/Istanbul",
"Europe/Jersey",
"Europe/Kaliningrad",
"Europe/Kirov",
"Europe/Kyiv",
"Europe/Lisbon",
"Europe/Ljubljana",
"Europe/London",
"Europe/Luxembourg",
"Europe/Madrid",
"Europe/Malta",
"Europe/Mariehamn",
"Europe/Minsk",
"Europe/Monaco",
"Europe/Moscow",
"Europe/Nicosia",
"Europe/Oslo",
"Europe/Paris",
"Europe/Podgorica",
"Europe/Prague",
"Europe/Riga",
"Europe/Rome",
"Europe/Samara",
"Europe/San_Marino",
"Europe/Sarajevo",
"Europe/Saratov",
"Europe/Simferopol",
"Europe/Skopje",
"Europe/Sofia",
"Europe/Stockholm",
"Europe/Tallinn",
"Europe/Tirane",
"Europe/Tiraspol",
"Europe/Ulyanovsk",
"Europe/Vaduz",
"Europe/Vatican",
"Europe/Vienna",
"Europe/Vilnius",
"Europe/Volgograd",
"Europe/Warsaw",
"Europe/Zagreb",
"Europe/Zurich",
"Factory",
"Indian/Antananarivo",
"Indian/Chagos",
"Indian/Christmas",
"Indian/Cocos",
"Indian/Comoro",
"Indian/Kerguelen",
"Indian/Mahe",
"Indian/Maldives",
"Indian/Mauritius",
"Indian/Mayotte",
"Indian/Reunion",
"Pacific/Apia",
"Pacific/Auckland",
"Pacific/Bougainville",
"Pacific/Chatham",
"Pacific/Chuuk",
"Pacific/Easter",
"Pacific/Efate",
"Pacific/Fakaofo",
"Pacific/Fiji",
"Pacific/Funafuti",
"Pacific/Galapagos",
"Pacific/Gambier",
"Pacific/Guadalcanal",
"Pacific/Guam",
"Pacific/Honolulu",
"Pacific/Johnston",
"Pacific/Kanton",
"Pacific/Kiritimati",
"Pacific/Kosrae",
"Pacific/Kwajalein",
"Pacific/Majuro",
"Pacific/Marquesas",
"Pacific/Midway",
"Pacific/Nauru",
"Pacific/Niue",
"Pacific/Norfolk",
"Pacific/Noumea",
"Pacific/Pago_Pago",
"Pacific/Palau",
"Pacific/Pitcairn",
"Pacific/Pohnpei",
"Pacific/Port_Moresby",
"Pacific/Rarotonga",
"Pacific/Saipan",
"Pacific/Samoa",
"Pacific/Tahiti",
"Pacific/Tarawa",
"Pacific/Tongatapu",
"Pacific/Wake",
"Pacific/Wallis",
"Pacific/Yap",
"UTC",
nullptr};
time_zone LoadZone(const std::string& name) {
time_zone tz;
load_time_zone(name, &tz);
return tz;
}
#define ExpectTime(tp, tz, y, m, d, hh, mm, ss, off, isdst, zone) \
do { \
time_zone::absolute_lookup al = tz.lookup(tp); \
EXPECT_EQ(y, al.cs.year()); \
EXPECT_EQ(m, al.cs.month()); \
EXPECT_EQ(d, al.cs.day()); \
EXPECT_EQ(hh, al.cs.hour()); \
EXPECT_EQ(mm, al.cs.minute()); \
EXPECT_EQ(ss, al.cs.second()); \
EXPECT_EQ(off, al.offset); \
EXPECT_TRUE(isdst == al.is_dst); \
\
} while (0)
int VersionCmp(time_zone tz, const std::string& target) {
std::string version = tz.version();
if (version.empty() && !target.empty()) return 1;
return version.compare(target);
}
}
#if !defined(__EMSCRIPTEN__)
TEST(TimeZones, LoadZonesConcurrently) {
std::promise<void> ready_promise;
std::shared_future<void> ready_future(ready_promise.get_future());
auto load_zones = [ready_future](std::promise<void>* started,
std::set<std::string>* failures) {
started->set_value();
ready_future.wait();
for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
std::string zone = *np;
time_zone tz;
if (load_time_zone(zone, &tz)) {
EXPECT_EQ(zone, tz.name());
} else {
failures->insert(zone);
}
}
};
const std::size_t n_threads = 128;
std::vector<std::thread> threads;
std::vector<std::set<std::string>> thread_failures(n_threads);
for (std::size_t i = 0; i != n_threads; ++i) {
std::promise<void> started;
threads.emplace_back(load_zones, &started, &thread_failures[i]);
started.get_future().wait();
}
ready_promise.set_value();
for (auto& thread : threads) {
thread.join();
}
#if defined(__ANDROID__)
const std::size_t max_failures = 20;
#else
const std::size_t max_failures = 3;
#endif
std::set<std::string> failures;
for (const auto& thread_failure : thread_failures) {
failures.insert(thread_failure.begin(), thread_failure.end());
}
EXPECT_LE(failures.size(), max_failures) << testing::PrintToString(failures);
}
#endif
TEST(TimeZone, UTC) {
const time_zone utc = utc_time_zone();
time_zone loaded_utc;
EXPECT_TRUE(load_time_zone("UTC", &loaded_utc));
EXPECT_EQ(loaded_utc, utc);
time_zone loaded_utc0;
EXPECT_TRUE(load_time_zone("UTC0", &loaded_utc0));
EXPECT_EQ(loaded_utc0, utc);
time_zone loaded_bad;
EXPECT_FALSE(load_time_zone("Invalid/TimeZone", &loaded_bad));
EXPECT_EQ(loaded_bad, utc);
}
TEST(TimeZone, NamedTimeZones) {
const time_zone utc = utc_time_zone();
EXPECT_EQ("UTC", utc.name());
const time_zone nyc = LoadZone("America/New_York");
EXPECT_EQ("America/New_York", nyc.name());
const time_zone syd = LoadZone("Australia/Sydney");
EXPECT_EQ("Australia/Sydney", syd.name());
const time_zone fixed0 =
fixed_time_zone(absl::time_internal::cctz::seconds::zero());
EXPECT_EQ("UTC", fixed0.name());
const time_zone fixed_pos = fixed_time_zone(
chrono::hours(3) + chrono::minutes(25) + chrono::seconds(45));
EXPECT_EQ("Fixed/UTC+03:25:45", fixed_pos.name());
const time_zone fixed_neg = fixed_time_zone(
-(chrono::hours(12) + chrono::minutes(34) + chrono::seconds(56)));
EXPECT_EQ("Fixed/UTC-12:34:56", fixed_neg.name());
}
TEST(TimeZone, Failures) {
time_zone tz;
EXPECT_FALSE(load_time_zone(":America/Los_Angeles", &tz));
tz = LoadZone("America/Los_Angeles");
EXPECT_FALSE(load_time_zone("Invalid/TimeZone", &tz));
EXPECT_EQ(chrono::system_clock::from_time_t(0),
convert(civil_second(1970, 1, 1, 0, 0, 0), tz));
tz = LoadZone("America/Los_Angeles");
EXPECT_FALSE(load_time_zone("Invalid/TimeZone", &tz));
EXPECT_EQ(chrono::system_clock::from_time_t(0),
convert(civil_second(1970, 1, 1, 0, 0, 0), tz));
tz = LoadZone("America/Los_Angeles");
EXPECT_FALSE(load_time_zone("", &tz));
EXPECT_EQ(chrono::system_clock::from_time_t(0),
convert(civil_second(1970, 1, 1, 0, 0, 0), tz));
}
TEST(TimeZone, Equality) {
const time_zone a;
const time_zone b;
EXPECT_EQ(a, b);
EXPECT_EQ(a.name(), b.name());
const time_zone implicit_utc;
const time_zone explicit_utc = utc_time_zone();
EXPECT_EQ(implicit_utc, explicit_utc);
EXPECT_EQ(implicit_utc.name(), explicit_utc.name());
const time_zone fixed_zero =
fixed_time_zone(absl::time_internal::cctz::seconds::zero());
EXPECT_EQ(fixed_zero, LoadZone(fixed_zero.name()));
EXPECT_EQ(fixed_zero, explicit_utc);
const time_zone fixed_utc = LoadZone("Fixed/UTC+00:00:00");
EXPECT_EQ(fixed_utc, LoadZone(fixed_utc.name()));
EXPECT_EQ(fixed_utc, explicit_utc);
const time_zone fixed_pos = fixed_time_zone(
chrono::hours(3) + chrono::minutes(25) + chrono::seconds(45));
EXPECT_EQ(fixed_pos, LoadZone(fixed_pos.name()));
EXPECT_NE(fixed_pos, explicit_utc);
const time_zone fixed_neg = fixed_time_zone(
-(chrono::hours(12) + chrono::minutes(34) + chrono::seconds(56)));
EXPECT_EQ(fixed_neg, LoadZone(fixed_neg.name()));
EXPECT_NE(fixed_neg, explicit_utc);
const time_zone fixed_lim = fixed_time_zone(chrono::hours(24));
EXPECT_EQ(fixed_lim, LoadZone(fixed_lim.name()));
EXPECT_NE(fixed_lim, explicit_utc);
const time_zone fixed_ovfl =
fixed_time_zone(chrono::hours(24) + chrono::seconds(1));
EXPECT_EQ(fixed_ovfl, LoadZone(fixed_ovfl.name()));
EXPECT_EQ(fixed_ovfl, explicit_utc);
EXPECT_EQ(fixed_time_zone(chrono::seconds(1)),
fixed_time_zone(chrono::seconds(1)));
const time_zone local = local_time_zone();
EXPECT_EQ(local, LoadZone(local.name()));
time_zone la = LoadZone("America/Los_Angeles");
time_zone nyc = LoadZone("America/New_York");
EXPECT_NE(la, nyc);
}
TEST(StdChronoTimePoint, TimeTAlignment) {
auto diff =
chrono::system_clock::time_point() - chrono::system_clock::from_time_t(0);
EXPECT_EQ(chrono::system_clock::time_point::duration::zero(),
diff % chrono::seconds(1));
}
TEST(BreakTime, TimePointResolution) {
const time_zone utc = utc_time_zone();
const auto t0 = chrono::system_clock::from_time_t(0);
ExpectTime(chrono::time_point_cast<chrono::nanoseconds>(t0), utc, 1970, 1, 1,
0, 0, 0, 0, false, "UTC");
ExpectTime(chrono::time_point_cast<chrono::microseconds>(t0), utc, 1970, 1, 1,
0, 0, 0, 0, false, "UTC");
ExpectTime(chrono::time_point_cast<chrono::milliseconds>(t0), utc, 1970, 1, 1,
0, 0, 0, 0, false, "UTC");
ExpectTime(chrono::time_point_cast<chrono::seconds>(t0), utc, 1970, 1, 1, 0,
0, 0, 0, false, "UTC");
ExpectTime(chrono::time_point_cast<absl::time_internal::cctz::seconds>(t0),
utc, 1970, 1, 1, 0, 0, 0, 0, false, "UTC");
ExpectTime(chrono::time_point_cast<chrono::minutes>(t0), utc, 1970, 1, 1, 0,
0, 0, 0, false, "UTC");
ExpectTime(chrono::time_point_cast<chrono::hours>(t0), utc, 1970, 1, 1, 0, 0,
0, 0, false, "UTC");
}
TEST(BreakTime, LocalTimeInUTC) {
const time_zone tz = utc_time_zone();
const auto tp = chrono::system_clock::from_time_t(0);
ExpectTime(tp, tz, 1970, 1, 1, 0, 0, 0, 0, false, "UTC");
EXPECT_EQ(weekday::thursday, get_weekday(convert(tp, tz)));
}
TEST(BreakTime, LocalTimeInUTCUnaligned) {
const time_zone tz = utc_time_zone();
const auto tp =
chrono::system_clock::from_time_t(0) - chrono::milliseconds(500);
ExpectTime(tp, tz, 1969, 12, 31, 23, 59, 59, 0, false, "UTC");
EXPECT_EQ(weekday::wednesday, get_weekday(convert(tp, tz)));
}
TEST(BreakTime, LocalTimePosix) {
const time_zone tz = utc_time_zone();
const auto tp = chrono::system_clock::from_time_t(536457599);
ExpectTime(tp, tz, 1986, 12, 31, 23, 59, 59, 0, false, "UTC");
EXPECT_EQ(weekday::wednesday, get_weekday(convert(tp, tz)));
}
TEST(TimeZoneImpl, LocalTimeInFixed) {
const absl::time_internal::cctz::seconds offset =
-(chrono::hours(8) + chrono::minutes(33) + chrono::seconds(47));
const time_zone tz = fixed_time_zone(offset);
const auto tp = chrono::system_clock::from_time_t(0);
ExpectTime(tp, tz, 1969, 12, 31, 15, 26, 13, offset.count(), false,
"-083347");
EXPECT_EQ(weekday::wednesday, get_weekday(convert(tp, tz)));
}
TEST(BreakTime, LocalTimeInNewYork) {
const time_zone tz = LoadZone("America/New_York");
const auto tp = chrono::system_clock::from_time_t(45);
ExpectTime(tp, tz, 1969, 12, 31, 19, 0, 45, -5 * 60 * 60, false, "EST");
EXPECT_EQ(weekday::wednesday, get_weekday(convert(tp, tz)));
}
TEST(BreakTime, LocalTimeInMTV) {
const time_zone tz = LoadZone("America/Los_Angeles");
const auto tp = chrono::system_clock::from_time_t(1380855729);
ExpectTime(tp, tz, 2013, 10, 3, 20, 2, 9, -7 * 60 * 60, true, "PDT");
EXPECT_EQ(weekday::thursday, get_weekday(convert(tp, tz)));
}
TEST(BreakTime, LocalTimeInSydney) {
const time_zone tz = LoadZone("Australia/Sydney");
const auto tp = chrono::system_clock::from_time_t(90);
ExpectTime(tp, tz, 1970, 1, 1, 10, 1, 30, 10 * 60 * 60, false, "AEST");
EXPECT_EQ(weekday::thursday, get_weekday(convert(tp, tz)));
}
TEST(MakeTime, TimePointResolution) {
const time_zone utc = utc_time_zone();
const time_point<chrono::nanoseconds> tp_ns =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_ns, utc));
const time_point<chrono::microseconds> tp_us =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_us, utc));
const time_point<chrono::milliseconds> tp_ms =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_ms, utc));
const time_point<chrono::seconds> tp_s =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_s, utc));
const time_point<absl::time_internal::cctz::seconds> tp_s64 =
convert(civil_second(2015, 1, 2, 3, 4, 5), utc);
EXPECT_EQ("04:05", absl::time_internal::cctz::format("%M:%E*S", tp_s64, utc));
const time_point<chrono::minutes> tp_m =
chrono::time_point_cast<chrono::minutes>(
convert(civil_second(2015, 1, 2, 3, 4, 5), utc));
EXPECT_EQ("04:00", absl::time_internal::cctz::format("%M:%E*S", tp_m, utc));
const time_point<chrono::hours> tp_h = chrono::time_point_cast<chrono::hours>(
convert(civil_second(2015, 1, 2, 3, 4, 5), utc));
EXPECT_EQ("00:00", absl::time_internal::cctz::format("%M:%E*S", tp_h, utc));
}
TEST(MakeTime, Normalization) {
const time_zone tz = LoadZone("America/New_York");
const auto tp = convert(civil_second(2009, 2, 13, 18, 31, 30), tz);
EXPECT_EQ(chrono::system_clock::from_time_t(1234567890), tp);
EXPECT_EQ(tp, convert(civil_second(2008, 14, 13, 18, 31, 30), tz));
EXPECT_EQ(tp, convert(civil_second(2009, 1, 44, 18, 31, 30), tz));
EXPECT_EQ(tp, convert(civil_second(2009, 2, 12, 42, 31, 30), tz));
EXPECT_EQ(tp, convert(civil_second(2009, 2, 13, 17, 91, 30), tz));
EXPECT_EQ(tp, convert(civil_second(2009, 2, 13, 18, 30, 90), tz));
}
TEST(MakeTime, SysSecondsLimits) {
const char RFC3339[] = "%Y-%m-%d%ET%H:%M:%S%Ez";
const time_zone utc = utc_time_zone();
const time_zone east = fixed_time_zone(chrono::hours(14));
const time_zone west = fixed_time_zone(-chrono::hours(14));
time_point<absl::time_internal::cctz::seconds> tp;
tp = convert(civil_second(292277026596, 12, 4, 15, 30, 6), utc);
EXPECT_EQ("292277026596-12-04T15:30:06+00:00",
absl::time_internal::cctz::format(RFC3339, tp, utc));
tp = convert(civil_second(292277026596, 12, 4, 15, 30, 7), utc);
EXPECT_EQ("292277026596-12-04T15:30:07+00:00",
absl::time_internal::cctz::format(RFC3339, tp, utc));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 4, 15, 30, 8), utc);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second::max(), utc);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 5, 5, 30, 7), east);
EXPECT_EQ("292277026596-12-05T05:30:07+14:00",
absl::time_internal::cctz::format(RFC3339, tp, east));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 5, 5, 30, 8), east);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second::max(), east);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 4, 1, 30, 7), west);
EXPECT_EQ("292277026596-12-04T01:30:07-14:00",
absl::time_internal::cctz::format(RFC3339, tp, west));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(292277026596, 12, 4, 7, 30, 8), west);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second::max(), west);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::max(), tp);
tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 53), utc);
EXPECT_EQ("-292277022657-01-27T08:29:53+00:00",
absl::time_internal::cctz::format(RFC3339, tp, utc));
tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 52), utc);
EXPECT_EQ("-292277022657-01-27T08:29:52+00:00",
absl::time_internal::cctz::format(RFC3339, tp, utc));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 27, 8, 29, 51), utc);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second::min(), utc);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 27, 22, 29, 52), east);
EXPECT_EQ("-292277022657-01-27T22:29:52+14:00",
absl::time_internal::cctz::format(RFC3339, tp, east));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 27, 22, 29, 51), east);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second::min(), east);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 26, 18, 29, 52), west);
EXPECT_EQ("-292277022657-01-26T18:29:52-14:00",
absl::time_internal::cctz::format(RFC3339, tp, west));
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second(-292277022657, 1, 26, 18, 29, 51), west);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
tp = convert(civil_second::min(), west);
EXPECT_EQ(time_point<absl::time_internal::cctz::seconds>::min(), tp);
if (sizeof(std::time_t) >= 8) {
#if defined(_WIN32) || defined(_WIN64)
#else
const time_zone cut = LoadZone("libc:UTC");
const year_t max_tm_year = year_t{std::numeric_limits<int>::max()} + 1900;
tp = convert(civil_second(max_tm_year, 12, 31, 23, 59, 59), cut);
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__EMSCRIPTEN__)
#else
EXPECT_EQ("2147485547-12-31T23:59:59+00:00",
absl::time_internal::cctz::format(RFC3339, tp, cut));
#endif
const year_t min_tm_year = year_t{std::numeric_limits<int>::min()} + 1900;
tp = convert(civil_second(min_tm_year, 1, 1, 0, 0, 0), cut);
#if defined(__Fuchsia__) || defined(__EMSCRIPTEN__)
#else
EXPECT_EQ("-2147481748-01-01T00:00:00+00:00",
absl::time_internal::cctz::format(RFC3339, tp, cut));
#endif
#endif
}
}
TEST(MakeTime, LocalTimeLibC) {
#if defined(__linux__) && defined(__GLIBC__) && !defined(__ANDROID__)
const char* const ep = getenv("TZ");
std::string tz_name = (ep != nullptr) ? ep : "";
for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
ASSERT_EQ(0, setenv("TZ", *np, 1));
const auto zi = local_time_zone();
const auto lc = LoadZone("libc:localtime");
time_zone::civil_transition transition;
for (auto tp = zi.lookup(civil_second()).trans;
zi.next_transition(tp, &transition);
tp = zi.lookup(transition.to).trans) {
const auto fcl = zi.lookup(transition.from);
const auto tcl = zi.lookup(transition.to);
civil_second cs, us;
if (fcl.kind == time_zone::civil_lookup::UNIQUE) {
if (tcl.kind == time_zone::civil_lookup::UNIQUE) {
ASSERT_EQ(transition.from, transition.to);
const auto trans = fcl.trans;
const auto tal = zi.lookup(trans);
const auto tprev = trans - absl::time_internal::cctz::seconds(1);
const auto pal = zi.lookup(tprev);
if (pal.is_dst == tal.is_dst) {
ASSERT_STRNE(pal.abbr, tal.abbr);
}
continue;
}
ASSERT_EQ(time_zone::civil_lookup::REPEATED, tcl.kind);
cs = transition.to;
us = transition.from;
} else {
ASSERT_EQ(time_zone::civil_lookup::UNIQUE, tcl.kind);
ASSERT_EQ(time_zone::civil_lookup::SKIPPED, fcl.kind);
cs = transition.from;
us = transition.to;
}
if (us.year() > 2037) break;
const auto cl_zi = zi.lookup(cs);
if (zi.lookup(cl_zi.pre).is_dst == zi.lookup(cl_zi.post).is_dst) {
continue;
}
if (cs == civil_second(2037, 10, 4, 2, 0, 0)) {
const std::string tzname = *np;
if (tzname == "Africa/Casablanca" || tzname == "Africa/El_Aaiun") {
continue;
}
}
const auto cl_lc = lc.lookup(cs);
SCOPED_TRACE(testing::Message() << "For " << cs << " in " << *np);
EXPECT_EQ(cl_zi.kind, cl_lc.kind);
EXPECT_EQ(cl_zi.pre, cl_lc.pre);
EXPECT_EQ(cl_zi.trans, cl_lc.trans);
EXPECT_EQ(cl_zi.post, cl_lc.post);
const auto ucl_zi = zi.lookup(us);
const auto ucl_lc = lc.lookup(us);
SCOPED_TRACE(testing::Message() << "For " << us << " in " << *np);
EXPECT_EQ(ucl_zi.kind, ucl_lc.kind);
EXPECT_EQ(ucl_zi.pre, ucl_lc.pre);
EXPECT_EQ(ucl_zi.trans, ucl_lc.trans);
EXPECT_EQ(ucl_zi.post, ucl_lc.post);
}
}
if (ep == nullptr) {
ASSERT_EQ(0, unsetenv("TZ"));
} else {
ASSERT_EQ(0, setenv("TZ", tz_name.c_str(), 1));
}
#endif
}
TEST(NextTransition, UTC) {
const auto tz = utc_time_zone();
time_zone::civil_transition trans;
auto tp = time_point<absl::time_internal::cctz::seconds>::min();
EXPECT_FALSE(tz.next_transition(tp, &trans));
tp = time_point<absl::time_internal::cctz::seconds>::max();
EXPECT_FALSE(tz.next_transition(tp, &trans));
}
TEST(PrevTransition, UTC) {
const auto tz = utc_time_zone();
time_zone::civil_transition trans;
auto tp = time_point<absl::time_internal::cctz::seconds>::max();
EXPECT_FALSE(tz.prev_transition(tp, &trans));
tp = time_point<absl::time_internal::cctz::seconds>::min();
EXPECT_FALSE(tz.prev_transition(tp, &trans));
}
TEST(NextTransition, AmericaNewYork) {
const auto tz = LoadZone("America/New_York");
time_zone::civil_transition trans;
auto tp = convert(civil_second(2018, 6, 30, 0, 0, 0), tz);
EXPECT_TRUE(tz.next_transition(tp, &trans));
EXPECT_EQ(civil_second(2018, 11, 4, 2, 0, 0), trans.from);
EXPECT_EQ(civil_second(2018, 11, 4, 1, 0, 0), trans.to);
tp = time_point<absl::time_internal::cctz::seconds>::max();
EXPECT_FALSE(tz.next_transition(tp, &trans));
tp = time_point<absl::time_internal::cctz::seconds>::min();
EXPECT_TRUE(tz.next_transition(tp, &trans));
if (trans.from == civil_second(1918, 3, 31, 2, 0, 0)) {
EXPECT_EQ(civil_second(1918, 3, 31, 3, 0, 0), trans.to);
} else {
EXPECT_EQ(civil_second(1883, 11, 18, 12, 3, 58), trans.from);
EXPECT_EQ(civil_second(1883, 11, 18, 12, 0, 0), trans.to);
}
}
TEST(PrevTransition, AmericaNewYork) {
const auto tz = LoadZone("America/New_York");
time_zone::civil_transition trans;
auto tp = convert(civil_second(2018, 6, 30, 0, 0, 0), tz);
EXPECT_TRUE(tz.prev_transition(tp, &trans));
EXPECT_EQ(civil_second(2018, 3, 11, 2, 0, 0), trans.from);
EXPECT_EQ(civil_second(2018, 3, 11, 3, 0, 0), trans.to);
tp = time_point<absl::time_internal::cctz::seconds>::min();
EXPECT_FALSE(tz.prev_transition(tp, &trans));
tp = time_point<absl::time_internal::cctz::seconds>::max();
EXPECT_TRUE(tz.prev_transition(tp, &trans));
}
TEST(NextTransition, Scan) {
for (const char* const* np = kTimeZoneNames; *np != nullptr; ++np) {
SCOPED_TRACE(testing::Message() << "In " << *np);
time_zone tz;
if (!load_time_zone(*np, &tz)) {
continue;
}
auto tp = time_point<absl::time_internal::cctz::seconds>::min();
time_zone::civil_transition trans;
while (tz.next_transition(tp, &trans)) {
time_zone::civil_lookup from_cl = tz.lookup(trans.from);
EXPECT_NE(from_cl.kind, time_zone::civil_lookup::REPEATED);
time_zone::civil_lookup to_cl = tz.lookup(trans.to);
EXPECT_NE(to_cl.kind, time_zone::civil_lookup::SKIPPED);
auto trans_tp = to_cl.trans;
time_zone::absolute_lookup trans_al = tz.lookup(trans_tp);
EXPECT_EQ(trans_al.cs, trans.to);
auto pre_trans_tp = trans_tp - absl::time_internal::cctz::seconds(1);
time_zone::absolute_lookup pre_trans_al = tz.lookup(pre_trans_tp);
EXPECT_EQ(pre_trans_al.cs + 1, trans.from);
auto offset_delta = trans_al.offset - pre_trans_al.offset;
EXPECT_EQ(offset_delta, trans.to - trans.from);
if (offset_delta == 0) {
EXPECT_EQ(to_cl.kind, time_zone::civil_lookup::UNIQUE);
if (trans_al.is_dst == pre_trans_al.is_dst) {
EXPECT_STRNE(trans_al.abbr, pre_trans_al.abbr);
}
}
tp = trans_tp;
}
}
}
TEST(TimeZoneEdgeCase, AmericaNewYork) {
const time_zone tz = LoadZone("America/New_York");
auto tp = convert(civil_second(2013, 3, 10, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 3, 10, 1, 59, 59, -5 * 3600, false, "EST");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 3, 10, 3, 0, 0, -4 * 3600, true, "EDT");
tp = convert(civil_second(2013, 11, 3, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 11, 3, 1, 59, 59, -4 * 3600, true, "EDT");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 11, 3, 1, 0, 0, -5 * 3600, false, "EST");
}
TEST(TimeZoneEdgeCase, AmericaLosAngeles) {
const time_zone tz = LoadZone("America/Los_Angeles");
auto tp = convert(civil_second(2013, 3, 10, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 3, 10, 1, 59, 59, -8 * 3600, false, "PST");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 3, 10, 3, 0, 0, -7 * 3600, true, "PDT");
tp = convert(civil_second(2013, 11, 3, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 11, 3, 1, 59, 59, -7 * 3600, true, "PDT");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 11, 3, 1, 0, 0, -8 * 3600, false, "PST");
}
TEST(TimeZoneEdgeCase, ArizonaNoTransition) {
const time_zone tz = LoadZone("America/Phoenix");
auto tp = convert(civil_second(2013, 3, 10, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 3, 10, 1, 59, 59, -7 * 3600, false, "MST");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 3, 10, 2, 0, 0, -7 * 3600, false, "MST");
tp = convert(civil_second(2013, 11, 3, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 11, 3, 1, 59, 59, -7 * 3600, false, "MST");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 11, 3, 2, 0, 0, -7 * 3600, false, "MST");
}
TEST(TimeZoneEdgeCase, AsiaKathmandu) {
const time_zone tz = LoadZone("Asia/Kathmandu");
auto tp = convert(civil_second(1985, 12, 31, 23, 59, 59), tz);
ExpectTime(tp, tz, 1985, 12, 31, 23, 59, 59, 5.5 * 3600, false, "+0530");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 1986, 1, 1, 0, 15, 0, 5.75 * 3600, false, "+0545");
}
TEST(TimeZoneEdgeCase, PacificChatham) {
const time_zone tz = LoadZone("Pacific/Chatham");
auto tp = convert(civil_second(2013, 4, 7, 3, 44, 59), tz);
ExpectTime(tp, tz, 2013, 4, 7, 3, 44, 59, 13.75 * 3600, true, "+1345");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 4, 7, 2, 45, 0, 12.75 * 3600, false, "+1245");
tp = convert(civil_second(2013, 9, 29, 2, 44, 59), tz);
ExpectTime(tp, tz, 2013, 9, 29, 2, 44, 59, 12.75 * 3600, false, "+1245");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 9, 29, 3, 45, 0, 13.75 * 3600, true, "+1345");
}
TEST(TimeZoneEdgeCase, AustraliaLordHowe) {
const time_zone tz = LoadZone("Australia/Lord_Howe");
auto tp = convert(civil_second(2013, 4, 7, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 4, 7, 1, 59, 59, 11 * 3600, true, "+11");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 4, 7, 1, 30, 0, 10.5 * 3600, false, "+1030");
tp = convert(civil_second(2013, 10, 6, 1, 59, 59), tz);
ExpectTime(tp, tz, 2013, 10, 6, 1, 59, 59, 10.5 * 3600, false, "+1030");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2013, 10, 6, 2, 30, 0, 11 * 3600, true, "+11");
}
TEST(TimeZoneEdgeCase, PacificApia) {
const time_zone tz = LoadZone("Pacific/Apia");
auto tp = convert(civil_second(2011, 12, 29, 23, 59, 59), tz);
ExpectTime(tp, tz, 2011, 12, 29, 23, 59, 59, -10 * 3600, true, "-10");
EXPECT_EQ(363, get_yearday(convert(tp, tz)));
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2011, 12, 31, 0, 0, 0, 14 * 3600, true, "+14");
EXPECT_EQ(365, get_yearday(convert(tp, tz)));
}
TEST(TimeZoneEdgeCase, AfricaCairo) {
const time_zone tz = LoadZone("Africa/Cairo");
if (VersionCmp(tz, "2014c") >= 0) {
auto tp = convert(civil_second(2014, 5, 15, 23, 59, 59), tz);
ExpectTime(tp, tz, 2014, 5, 15, 23, 59, 59, 2 * 3600, false, "EET");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2014, 5, 16, 1, 0, 0, 3 * 3600, true, "EEST");
}
}
TEST(TimeZoneEdgeCase, AfricaMonrovia) {
const time_zone tz = LoadZone("Africa/Monrovia");
if (VersionCmp(tz, "2017b") >= 0) {
auto tp = convert(civil_second(1972, 1, 6, 23, 59, 59), tz);
ExpectTime(tp, tz, 1972, 1, 6, 23, 59, 59, -44.5 * 60, false, "MMT");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 1972, 1, 7, 0, 44, 30, 0 * 60, false, "GMT");
}
}
TEST(TimeZoneEdgeCase, AmericaJamaica) {
const time_zone tz = LoadZone("America/Jamaica");
if (!tz.version().empty() && VersionCmp(tz, "2018d") >= 0) {
auto tp = convert(civil_second(1889, 12, 31, 0, 0, 0), tz);
ExpectTime(tp, tz, 1889, 12, 31, 0, 0, 0, -18430, false,
tz.lookup(tp).abbr);
tp = convert(civil_second(1889, 12, 31, 23, 59, 59), tz);
ExpectTime(tp, tz, 1889, 12, 31, 23, 59, 59, -18430, false,
tz.lookup(tp).abbr);
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 1890, 1, 1, 0, 0, 0, -18430, false, "KMT");
}
auto tp = convert(civil_second(1983, 10, 30, 1, 59, 59), tz);
ExpectTime(tp, tz, 1983, 10, 30, 1, 59, 59, -4 * 3600, true, "EDT");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 1983, 10, 30, 1, 0, 0, -5 * 3600, false, "EST");
tp = convert(civil_second(1983, 12, 31, 23, 59, 59), tz);
ExpectTime(tp, tz, 1983, 12, 31, 23, 59, 59, -5 * 3600, false, "EST");
}
TEST(TimeZoneEdgeCase, EuropeLisbon) {
const time_zone tz = LoadZone("Europe/Lisbon");
auto tp = convert(civil_second(1981, 3, 28, 23, 59, 59), tz);
ExpectTime(tp, tz, 1981, 3, 28, 23, 59, 59, 0, false, "WET");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 1981, 3, 29, 1, 0, 0, 1 * 3600, true, "WEST");
time_zone::civil_lookup cl1 = tz.lookup(civil_second(1981, 3, 29, 0, 15, 0));
EXPECT_EQ(time_zone::civil_lookup::SKIPPED, cl1.kind);
ExpectTime(cl1.pre, tz, 1981, 3, 29, 1, 15, 0, 1 * 3600, true, "WEST");
ExpectTime(cl1.trans, tz, 1981, 3, 29, 1, 0, 0, 1 * 3600, true, "WEST");
ExpectTime(cl1.post, tz, 1981, 3, 28, 23, 15, 0, 0 * 3600, false, "WET");
}
TEST(TimeZoneEdgeCase, FixedOffsets) {
const time_zone gmtm5 = LoadZone("Etc/GMT+5");
auto tp = convert(civil_second(1970, 1, 1, 0, 0, 0), gmtm5);
ExpectTime(tp, gmtm5, 1970, 1, 1, 0, 0, 0, -5 * 3600, false, "-05");
EXPECT_EQ(chrono::system_clock::from_time_t(5 * 3600), tp);
const time_zone gmtp5 = LoadZone("Etc/GMT-5");
tp = convert(civil_second(1970, 1, 1, 0, 0, 0), gmtp5);
ExpectTime(tp, gmtp5, 1970, 1, 1, 0, 0, 0, 5 * 3600, false, "+05");
EXPECT_EQ(chrono::system_clock::from_time_t(-5 * 3600), tp);
}
TEST(TimeZoneEdgeCase, NegativeYear) {
const time_zone tz = utc_time_zone();
auto tp = convert(civil_second(0, 1, 1, 0, 0, 0), tz);
ExpectTime(tp, tz, 0, 1, 1, 0, 0, 0, 0 * 3600, false, "UTC");
EXPECT_EQ(weekday::saturday, get_weekday(convert(tp, tz)));
tp -= absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, -1, 12, 31, 23, 59, 59, 0 * 3600, false, "UTC");
EXPECT_EQ(weekday::friday, get_weekday(convert(tp, tz)));
}
TEST(TimeZoneEdgeCase, UTC32bitLimit) {
const time_zone tz = utc_time_zone();
auto tp = convert(civil_second(2038, 1, 19, 3, 14, 7), tz);
ExpectTime(tp, tz, 2038, 1, 19, 3, 14, 7, 0 * 3600, false, "UTC");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 2038, 1, 19, 3, 14, 8, 0 * 3600, false, "UTC");
}
TEST(TimeZoneEdgeCase, UTC5DigitYear) {
const time_zone tz = utc_time_zone();
auto tp = convert(civil_second(9999, 12, 31, 23, 59, 59), tz);
ExpectTime(tp, tz, 9999, 12, 31, 23, 59, 59, 0 * 3600, false, "UTC");
tp += absl::time_internal::cctz::seconds(1);
ExpectTime(tp, tz, 10000, 1, 1, 0, 0, 0, 0 * 3600, false, "UTC");
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/internal/cctz/src/time_zone_lookup.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/internal/cctz/src/time_zone_lookup_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
9f4ce7de-febe-4637-8295-d004dcf2b9b9 | cpp | tensorflow/tensorflow | sparse_ops | tensorflow/core/ops/sparse_ops.cc | tensorflow/core/ops/sparse_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeHandle;
namespace {
Status SparseSparseMinOrMaxShapeFn(InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(4), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(5), 1, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
}
}
REGISTER_OP("SparseAddGrad")
.Input("backprop_val_grad: T")
.Input("a_indices: int64")
.Input("b_indices: int64")
.Input("sum_indices: int64")
.Output("a_val_grad: T")
.Output("b_val_grad: T")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle a_indices;
ShapeHandle b_indices;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &a_indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &b_indices));
c->set_output(0, c->Vector(c->Dim(a_indices, 0)));
c->set_output(1, c->Vector(c->Dim(b_indices, 0)));
return absl::OkStatus();
});
REGISTER_OP("SparseAdd")
.Input("a_indices: int64")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b_indices: int64")
.Input("b_values: T")
.Input("b_shape: int64")
.Input("thresh: Treal")
.Output("sum_indices: int64")
.Output("sum_values: T")
.Output("sum_shape: int64")
.Attr("T: numbertype")
.Attr("Treal: realnumbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle a_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &a_shape));
c->set_output(
0, c->Matrix(InferenceContext::kUnknownDim, c->Dim(a_shape, 0)));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, a_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseTensorDenseMatMul")
.Input("a_indices: Tindices")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b: T")
.Output("product: T")
.Attr("T: type")
.Attr("Tindices: {int32,int64} = DT_INT64")
.Attr("adjoint_a: bool = false")
.Attr("adjoint_b: bool = false")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle unused_dim;
ShapeHandle unused;
ShapeHandle b;
ShapeHandle a_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRank(a_shape, 2, &a_shape));
TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &b));
bool adjoint_a;
bool adjoint_b;
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_a", &adjoint_a));
TF_RETURN_IF_ERROR(c->GetAttr("adjoint_b", &adjoint_b));
DimensionHandle output_right = c->Dim(b, adjoint_b ? 0 : 1);
DimensionHandle output_left = c->Dim(a_shape, adjoint_a ? 1 : 0);
DimensionHandle inner_left = c->Dim(a_shape, adjoint_a ? 0 : 1);
DimensionHandle inner_right = c->Dim(b, adjoint_b ? 1 : 0);
TF_RETURN_IF_ERROR(c->Merge(inner_left, inner_right, &unused_dim));
c->set_output(0, c->Matrix(output_left, output_right));
return absl::OkStatus();
});
REGISTER_OP("SerializeSparse")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Attr("T: type")
.Output("serialized_sparse: out_type")
.Attr("out_type: {string, variant} = DT_STRING")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Vector(3));
return absl::OkStatus();
});
REGISTER_OP("SerializeManySparse")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Attr("T: type")
.Output("serialized_sparse: out_type")
.Attr("out_type: {string, variant} = DT_STRING")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim, 3));
return absl::OkStatus();
});
REGISTER_OP("DeserializeSparse")
.Input("serialized_sparse: Tserialized")
.Output("sparse_indices: int64")
.Output("sparse_values: dtype")
.Output("sparse_shape: int64")
.Attr("dtype: type")
.Attr("Tserialized: {string, variant} = DT_STRING")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused_shape;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(0), 1, &unused_shape));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(c->input(0), -1), 3, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("DeserializeManySparse")
.Input("serialized_sparse: string")
.Output("sparse_indices: int64")
.Output("sparse_values: dtype")
.Output("sparse_shape: int64")
.Attr("dtype: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle serialized_sparse;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &serialized_sparse));
DimensionHandle unused;
TF_RETURN_IF_ERROR(
c->WithValue(c->Dim(serialized_sparse, 1), 3, &unused));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("SparseToDense")
.Input("sparse_indices: Tindices")
.Input("output_shape: Tindices")
.Input("sparse_values: T")
.Input("default_value: T")
.Attr("validate_indices: bool = true")
.Attr("T: type")
.Output("dense: T")
.Attr("Tindices: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle out;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &out));
c->set_output(0, out);
return absl::OkStatus();
});
REGISTER_OP("SparseConcat")
.Input("indices: N * int64")
.Input("values: N * T")
.Input("shapes: N * int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("concat_dim: int")
.Attr("N: int >= 2")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
DimensionHandle output_row_count = c->MakeDim(0ll);
DimensionHandle output_ind_cols = c->UnknownDim();
ShapeHandle output_shape = c->UnknownShape();
const int n = c->num_inputs() / 3;
for (int i = 0; i < n; i++) {
ShapeHandle ind;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 2, &ind));
ShapeHandle val;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i + n), 1, &val));
ShapeHandle shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(i + 2 * n), 1, &shape));
DimensionHandle num_dim;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(ind, 0), c->Dim(val, 0), &num_dim));
TF_RETURN_IF_ERROR(
c->Add(output_row_count, num_dim, &output_row_count));
TF_RETURN_IF_ERROR(
c->Merge(output_ind_cols, c->Dim(ind, 1), &output_ind_cols));
TF_RETURN_IF_ERROR(c->Merge(output_shape, shape, &output_shape));
}
c->set_output(0, c->Matrix(output_row_count, output_ind_cols));
c->set_output(1, c->Vector(output_row_count));
c->set_output(2, output_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseCross")
.Input("indices: N * int64")
.Input("values: sparse_types")
.Input("shapes: N * int64")
.Input("dense_inputs: dense_types")
.Output("output_indices: int64")
.Output("output_values: out_type")
.Output("output_shape: int64")
.Attr("N: int >= 0")
.Attr("hashed_output: bool")
.Attr("num_buckets: int >= 0")
.Attr("hash_key: int")
.Attr("sparse_types: list({int64, string}) >= 0")
.Attr("dense_types: list({int64, string}) >= 0")
.Attr("out_type: {int64, string}")
.Attr("internal_type: {int64, string}")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Matrix(c->UnknownDim(), 2));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseCrossV2")
.Input("indices: N * int64")
.Input("values: sparse_types")
.Input("shapes: N * int64")
.Input("dense_inputs: dense_types")
.Input("sep: string")
.Output("output_indices: int64")
.Output("output_values: string")
.Output("output_shape: int64")
.Attr("N: int >= 0")
.Attr("sparse_types: list({int64, string}) >= 0")
.Attr("dense_types: list({int64, string}) >= 0")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Matrix(c->UnknownDim(), 2));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseCrossHashed")
.Input("indices: N * int64")
.Input("values: sparse_types")
.Input("shapes: N * int64")
.Input("dense_inputs: dense_types")
.Input("num_buckets: int64")
.Input("strong_hash: bool")
.Input("salt: int64")
.Output("output_indices: int64")
.Output("output_values: int64")
.Output("output_shape: int64")
.Attr("N: int >= 0")
.Attr("sparse_types: list({int64, string}) >= 0")
.Attr("dense_types: list({int64, string}) >= 0")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Matrix(c->UnknownDim(), 2));
c->set_output(1, c->Vector(c->UnknownDim()));
c->set_output(2, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("SparseSplit")
.Input("split_dim: int64")
.Input("indices: int64")
.Input("values: T")
.Input("shape: int64")
.Output("output_indices: num_split * int64")
.Output("output_values: num_split * T")
.Output("output_shape: num_split * int64")
.Attr("num_split: int >= 1")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_shape = c->input(3);
ShapeHandle output_indices =
c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape));
ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim);
ShapeHandle output_shape = input_shape;
int num_splits = c->num_outputs() / 3;
int out_idx = 0;
for (int i = 0; i < num_splits; ++i)
c->set_output(out_idx++, output_indices);
for (int i = 0; i < num_splits; ++i)
c->set_output(out_idx++, output_values);
for (int i = 0; i < num_splits; ++i)
c->set_output(out_idx++, output_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseSliceGrad")
.Input("backprop_val_grad: T")
.Input("input_indices: int64")
.Input("input_start: int64")
.Input("output_indices: int64")
.Output("val_grad: T")
.Attr("T: numbertype")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 2, &indices));
c->set_output(0, c->Vector(c->Dim(indices, 0)));
return absl::OkStatus();
});
REGISTER_OP("SparseSlice")
.Input("indices: int64")
.Input("values: T")
.Input("shape: int64")
.Input("start: int64")
.Input("size: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_shape = c->input(2);
ShapeHandle output_indices =
c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape));
ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim);
ShapeHandle output_shape = input_shape;
c->set_output(0, output_indices);
c->set_output(1, output_values);
c->set_output(2, output_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseReorder")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle values;
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, indices);
c->set_output(1, values);
return absl::OkStatus();
});
REGISTER_OP("SparseReshape")
.Input("input_indices: int64")
.Input("input_shape: int64")
.Input("new_shape: int64")
.Output("output_indices: int64")
.Output("output_shape: int64")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle indices;
ShapeHandle unused;
ShapeHandle new_shape;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &indices));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &new_shape));
c->set_output(0, c->Matrix(c->Dim(indices, 0), c->Dim(new_shape, 0)));
c->set_output(1, new_shape);
return absl::OkStatus();
});
REGISTER_OP("SparseTensorDenseAdd")
.Input("a_indices: Tindices")
.Input("a_values: T")
.Input("a_shape: Tindices")
.Input("b: T")
.Output("output: T")
.Attr("T: numbertype")
.Attr("Tindices: {int32, int64}")
.SetShapeFn([](InferenceContext* c) {
c->set_output(0, c->input(3));
return absl::OkStatus();
});
REGISTER_OP("SparseReduceMax")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output: T")
.Attr("T: realnumbertype")
.SetShapeFn(shape_inference::SparseReduceShapeFn);
REGISTER_OP("SparseReduceMaxSparse")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("T: realnumbertype")
.SetShapeFn(shape_inference::UnknownShape);
REGISTER_OP("SparseReduceSum")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output: T")
.Attr("T: numbertype")
.SetShapeFn(shape_inference::SparseReduceShapeFn);
REGISTER_OP("SparseReduceSumSparse")
.Input("input_indices: int64")
.Input("input_values: T")
.Input("input_shape: int64")
.Input("reduction_axes: int32")
.Attr("keep_dims: bool = False")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("output_shape: int64")
.Attr("T: numbertype")
.SetShapeFn(shape_inference::UnknownShape);
#define SPARSE_DENSE_CWISE_SIGNATURE() \
Input("sp_indices: int64") \
.Input("sp_values: T") \
.Input("sp_shape: int64") \
.Input("dense: T") \
.Output("output: T") \
.Attr("T: numbertype") \
.SetShapeFn([](InferenceContext* c) { \
ShapeHandle input; \
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &input)); \
c->set_output(0, c->Vector(c->Dim(input, 0))); \
return OkStatus(); \
})
REGISTER_OP("SparseDenseCwiseMul").SPARSE_DENSE_CWISE_SIGNATURE();
REGISTER_OP("SparseDenseCwiseDiv").SPARSE_DENSE_CWISE_SIGNATURE();
REGISTER_OP("SparseDenseCwiseAdd").SPARSE_DENSE_CWISE_SIGNATURE();
#undef SPARSE_DENSE_CWISE_SIGNATURE
REGISTER_OP("SparseSoftmax")
.Input("sp_indices: int64")
.Input("sp_values: T")
.Input("sp_shape: int64")
.Output("output: T")
.Attr("T: {half, float, double}")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
ShapeHandle values;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &values));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, values);
return absl::OkStatus();
});
REGISTER_OP("SparseSparseMaximum")
.Input("a_indices: int64")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b_indices: int64")
.Input("b_values: T")
.Input("b_shape: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Attr("T: realnumbertype")
.SetShapeFn(SparseSparseMinOrMaxShapeFn);
REGISTER_OP("SparseSparseMinimum")
.Input("a_indices: int64")
.Input("a_values: T")
.Input("a_shape: int64")
.Input("b_indices: int64")
.Input("b_values: T")
.Input("b_shape: int64")
.Output("output_indices: int64")
.Output("output_values: T")
.Attr("T: numbertype")
.SetShapeFn(SparseSparseMinOrMaxShapeFn);
REGISTER_OP("AddSparseToTensorsMap")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Output("sparse_handle: int64")
.Attr("T: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Scalar());
return absl::OkStatus();
});
REGISTER_OP("AddManySparseToTensorsMap")
.Input("sparse_indices: int64")
.Input("sparse_values: T")
.Input("sparse_shape: int64")
.Output("sparse_handles: int64")
.Attr("T: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 2, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &unused));
TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 1, &unused));
c->set_output(0, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("TakeManySparseFromTensorsMap")
.Input("sparse_handles: int64")
.Output("sparse_indices: int64")
.Output("sparse_values: dtype")
.Output("sparse_shape: int64")
.Attr("dtype: type")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetIsStateful()
.SetShapeFn([](InferenceContext* c) {
ShapeHandle sparse_handles;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &sparse_handles));
c->set_output(0, c->Matrix(InferenceContext::kUnknownDim,
InferenceContext::kUnknownDim));
c->set_output(1, c->Vector(InferenceContext::kUnknownDim));
c->set_output(2, c->Vector(InferenceContext::kUnknownDim));
return absl::OkStatus();
});
REGISTER_OP("SparseFillEmptyRows")
.Input("indices: int64")
.Input("values: T")
.Input("dense_shape: int64")
.Input("default_value: T")
.Output("output_indices: int64")
.Output("output_values: T")
.Output("empty_row_indicator: bool")
.Output("reverse_index_map: int64")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle input_indices = c->input(0);
TF_RETURN_IF_ERROR(c->WithRank(input_indices, 2, &input_indices));
ShapeHandle input_values = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(input_values, 1, &input_values));
ShapeHandle input_shape = c->input(2);
TF_RETURN_IF_ERROR(c->WithRank(input_shape, 1, &input_shape));
ShapeHandle default_value = c->input(3);
TF_RETURN_IF_ERROR(c->WithRank(default_value, 0, &default_value));
DimensionHandle N = c->Dim(input_indices, 0);
TF_RETURN_IF_ERROR(c->Merge(N, c->Dim(input_values, 0), &N));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(input_indices, 1),
c->Dim(input_shape, 0), &unused_dim));
if (c->Value(c->NumElements(input_shape)) == 0)
return errors::InvalidArgument("dense_shape must not be empty");
ShapeHandle output_indices =
c->Matrix(InferenceContext::kUnknownDim, c->NumElements(input_shape));
ShapeHandle output_values = c->Vector(InferenceContext::kUnknownDim);
ShapeHandle constant_input_shape;
TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(2, &constant_input_shape));
ShapeHandle empty_row_indicator =
c->Vector(c->Dim(constant_input_shape, 0));
ShapeHandle reverse_index_map = c->Vector(N);
c->set_output(0, output_indices);
c->set_output(1, output_values);
c->set_output(2, empty_row_indicator);
c->set_output(3, reverse_index_map);
return absl::OkStatus();
});
REGISTER_OP("SparseFillEmptyRowsGrad")
.Input("reverse_index_map: int64")
.Input("grad_values: T")
.Output("d_values: T")
.Output("d_default_value: T")
.Attr("T: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle reverse_index_map = c->input(0);
TF_RETURN_IF_ERROR(c->WithRank(reverse_index_map, 1, &reverse_index_map));
ShapeHandle grad_values = c->input(1);
TF_RETURN_IF_ERROR(c->WithRank(grad_values, 1, &grad_values));
c->set_output(0, reverse_index_map);
c->set_output(1, c->Scalar());
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
TEST(SparseOpsTest, SparseTensorDenseAdd_ShapeFn) {
ShapeInferenceTestOp op("SparseTensorDenseAdd");
INFER_OK(op, "?;?;?;?", "in3");
}
TEST(SparseOpsTest, SparseAdd_ShapeFn) {
ShapeInferenceTestOp op("SparseAdd");
INFER_OK(op, "?;?;?;?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;[?];?;?;?;?", "[?,d2_0];[?];in2");
INFER_OK(op, "?;?;[1];?;?;?;?", "[?,d2_0];[?];in2");
}
TEST(SparseOpsTest, SparseAddGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseAddGrad");
INFER_ERROR("must be rank 2", op, "?;?;[1];?");
INFER_ERROR("must be rank 2", op, "?;[1];?;?");
INFER_OK(op, "?;?;?;?", "[?];[?]");
INFER_OK(op, "?;[?,?];[?,?];?", "[d1_0];[d2_0]");
}
TEST(SparseOpsTest, SparseSliceGrad_ShapeFn) {
ShapeInferenceTestOp op("SparseSliceGrad");
INFER_ERROR("must be rank 2", op, "?;[1];?;?");
INFER_OK(op, "?;?;?;?", "[?]");
INFER_OK(op, "?;[?,?];?;?", "[d1_0]");
}
TEST(SparseOpsTest, SparseReorder_ShapeFn) {
ShapeInferenceTestOp op("SparseReorder");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?,?];[?]");
INFER_OK(op, "[?,?];[?];?", "in0;in1");
}
TEST(SparseOpsTest, SparseReshape_ShapeFn) {
ShapeInferenceTestOp op("SparseReshape");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?,?];[?]");
INFER_OK(op, "[?,?];?;[?]", "[d0_0,d2_0];in2");
}
TEST(SparseOpsTest, SparseSplit_ShapeFn) {
ShapeInferenceTestOp op("SparseSplit");
TF_ASSERT_OK(NodeDefBuilder("test", "SparseSplit")
.Input({"split_dim", 0, DT_INT64})
.Input({"indices", 1, DT_INT64})
.Input({"values", 2, DT_INT64})
.Input({"shape", 3, DT_INT64})
.Attr("num_split", 2)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "[?,?];[?,?];[?];[?];in3;in3");
INFER_OK(op, "?;?;?;[5,4,3,2,1]", "[?,120];[?,120];[?];[?];in3;in3");
}
TEST(SparseOpsTest, SparseToDense_ShapeFn) {
ShapeInferenceTestOp op("SparseToDense");
op.input_tensors.resize(4);
INFER_OK(op, "?;?;?;?", "?");
INFER_OK(op, "?;[?];?;?", "?");
INFER_OK(op, "?;[4];?;?", "[?,?,?,?]");
Tensor in_t = test::AsTensor<int32>({1, 2, 3, 4});
op.input_tensors[1] = &in_t;
INFER_OK(op, "?;[4];?;?", "[1,2,3,4]");
}
TEST(SparseOpsTest, SparseReduceSum_ShapeFn) {
ShapeInferenceTestOp op("SparseReduceSum");
TF_ASSERT_OK(NodeDefBuilder("test", "SparseReduceSum")
.Input({"input_indices", 0, DT_INT64})
.Input({"input_values", 1, DT_INT64})
.Input({"input_shape", 2, DT_INT64})
.Input({"reduction_axes", 3, DT_INT32})
.Attr("keep_dims", false)
.Finalize(&op.node_def));
INFER_OK(op, "?;?;?;?", "?");
}
TEST(SparseOpsTest, SerializeSparse_ShapeFn) {
ShapeInferenceTestOp op("SerializeSparse");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[3]");
}
TEST(SparseOpsTest, SerializeManySparse_ShapeFn) {
ShapeInferenceTestOp op("SerializeManySparse");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?,3]");
}
TEST(SparseOpsTest, DeserializeManySparse_ShapeFn) {
ShapeInferenceTestOp op("DeserializeManySparse");
INFER_ERROR("must be rank 2", op, "[1]");
INFER_ERROR("must be 3", op, "[?,4]");
INFER_OK(op, "?", "[?,?];[?];[?]");
INFER_OK(op, "[?,3]", "[?,?];[?];[?]");
}
TEST(SparseOpsTest, SparseTensorDenseMatMul_ShapeFn) {
ShapeInferenceTestOp op("SparseTensorDenseMatMul");
auto set_adjoints = [&op](bool adjoint_a, bool adjoint_b) {
TF_ASSERT_OK(NodeDefBuilder("test", "SparseTensorDenseMatMul")
.Input({"a_indices", 1, DT_INT64})
.Input({"a_values", 2, DT_INT64})
.Input({"a_shape", 3, DT_INT64})
.Input({"b", 3, DT_INT64})
.Attr("adjoint_a", adjoint_a)
.Attr("adjoint_b", adjoint_b)
.Finalize(&op.node_def));
};
set_adjoints(false, false);
INFER_ERROR("must be rank 2", op, "[1];?;?;?");
INFER_ERROR("must be rank 1", op, "?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?");
INFER_ERROR("must be rank 2", op, "?;?;[3];?");
INFER_ERROR("must be rank 2", op, "?;?;?;[]");
INFER_OK(op, "?;?;?;?", "[?,?]");
INFER_OK(op, "?;?;?;[?,?]", "[?,d3_1]");
INFER_OK(op, "?;?;?;[1,2]", "[?,d3_1]");
INFER_OK(op, "?;?;[2];[1,2]", "[?,d3_1]");
set_adjoints(false, true);
INFER_OK(op, "?;?;?;[?,?]", "[?,d3_0]");
INFER_OK(op, "?;?;?;[1,2]", "[?,d3_0]");
Tensor a_shape_t = test::AsTensor<int64_t>(std::vector<int64_t>{3, 1});
op.input_tensors.resize(4);
op.input_tensors[2] = &a_shape_t;
set_adjoints(false, false);
INFER_OK(op, "?;?;[2];[1,2]", "[3,d3_1]");
INFER_OK(op, "?;?;?;[1,2]", "[3,d3_1]");
set_adjoints(true, false);
INFER_ERROR("must be equal", op, "?;?;[2];[1,2]");
a_shape_t = test::AsTensor<int64_t>(std::vector<int64_t>{3, 1, 2});
INFER_ERROR("must be rank 2 but is rank 3", op, "?;?;[3];[1,2]");
}
TEST(SparseOpsTest, SparseSoftmax_ShapeFn) {
ShapeInferenceTestOp op("SparseSoftmax");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?]");
INFER_OK(op, "?;[?];?", "in1");
INFER_OK(op, "?;[5];?", "in1");
}
TEST(SparseOpsTest, SparseSparseMinAndMin_ShapeFn) {
for (const char* op_name : {"SparseSparseMaximum", "SparseSparseMinimum"}) {
ShapeInferenceTestOp op(op_name);
INFER_ERROR("must be rank 2", op, "[1];?;?;?;?;?");
INFER_ERROR("must be rank 1", op, "?;[];?;?;?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?;?;?");
INFER_ERROR("must be rank 2", op, "?;?;?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;[]");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?]");
INFER_OK(op, "?;[?];?;?;?;?", "[?,?];[?]");
INFER_OK(op, "?;[5];?;?;?;?", "[?,?];[?]");
}
}
TEST(SparseOpsTest, SparseConcat_ShapeFn) {
ShapeInferenceTestOp op("SparseConcat");
std::vector<NodeDefBuilder::NodeOut> src_list;
int n = 2;
src_list.reserve(n);
for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_INT64);
TF_ASSERT_OK(NodeDefBuilder("test", "SparseConcat")
.Input(src_list)
.Input(src_list)
.Input(src_list)
.Attr("N", n)
.Finalize(&op.node_def));
INFER_ERROR("must be rank 2", op, "[1];?;?;?;?;?");
INFER_ERROR("must be rank 2", op, "?;[1];?;?;?;?");
INFER_ERROR("must be rank 1", op, "?;?;[];?;?;?");
INFER_ERROR("must be rank 1", op, "?;?;?;[];?;?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;?;?;?;[]");
INFER_OK(op, "?;?;?;?;?;?", "[?,?];[?];[?]");
INFER_OK(op, "?;?;?;?;[?];[?]", "[?,?];[?];in4|in5");
INFER_OK(op, "?;?;?;?;[?];[5]", "[?,?];[?];in5");
INFER_OK(op, "[4,5];[3,?];?;?;?;?", "[7,d0_1];[7];[?]");
INFER_OK(op, "?;?;[4];[3];?;?", "[7,?];[7];[?]");
INFER_OK(op, "[?,2];[3,?];[4];[?];?;?", "[7,d0_1];[7];[?]");
INFER_ERROR("but are 100 and 200", op, "[100,?];[?,?];[200];[?];?;?");
INFER_ERROR("but are 2 and 3", op, "[?,2];[?,3];[?];[?];?;?");
INFER_ERROR("but are 4 and 5", op, "?;?;?;?;[4];[5]");
}
TEST(SparseOpsTest, SparseDenseCwise_ShapeFn) {
for (const char* op_name :
{"SparseDenseCwiseMul", "SparseDenseCwiseDiv", "SparseDenseCwiseAdd"}) {
ShapeInferenceTestOp op(op_name);
INFER_OK(op, "?;?;?;?", "[?]");
INFER_OK(op, "[?,?];?;?;?", "[d0_0]");
INFER_ERROR("must be rank 2", op, "[1];?;?;?");
}
}
TEST(SparseOpsTest, AddSparseToTensorsMap_ShapeFn) {
ShapeInferenceTestOp op("AddSparseToTensorsMap");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[]");
}
TEST(SparseOpsTest, AddManySparseToTensorsMap_ShapeFn) {
ShapeInferenceTestOp op("AddManySparseToTensorsMap");
INFER_ERROR("must be rank 2", op, "[1];?;?");
INFER_ERROR("must be rank 1", op, "?;[];?");
INFER_ERROR("must be rank 1", op, "?;?;[]");
INFER_OK(op, "?;?;?", "[?]");
}
TEST(SparseOpsTest, TakeManySparseFromTensorsMap_ShapeFn) {
ShapeInferenceTestOp op("TakeManySparseFromTensorsMap");
INFER_ERROR("must be rank 1", op, "[?,1]");
INFER_OK(op, "?", "[?,?];[?];[?]");
INFER_OK(op, "[?]", "[?,?];[?];[?]");
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/sparse_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
14548089-c198-40c3-a2d1-e69867fb87bd | cpp | google/arolla | qtype_inference | arolla/expr/operator_loader/qtype_inference.cc | arolla/expr/operator_loader/qtype_inference_test.cc | #include "arolla/expr/operator_loader/qtype_inference.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/operator_loader/helper.h"
#include "arolla/expr/operator_loader/parameter_qtypes.h"
#include "arolla/expr/operator_loader/qtype_constraint.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
namespace {
using expr::ExprNodePtr;
using expr::GetLeafKeys;
using expr::PopulateQTypes;
using expr::ToDebugString;
absl::StatusOr<ExprNodePtr> NormalizeQTypeInferenceExpr(ExprNodePtr expr) {
ASSIGN_OR_RETURN(auto result, ReplacePlaceholdersWithLeaves(expr));
absl::flat_hash_map<std::string, QTypePtr> leaf_qtypes;
for (const auto& leaf_key : GetLeafKeys(result)) {
leaf_qtypes[leaf_key] = GetQTypeQType();
}
const QType* output_qtype = nullptr;
if (const auto annotated_expr = PopulateQTypes(result, leaf_qtypes);
annotated_expr.ok()) {
output_qtype = (*annotated_expr)->qtype();
}
if (output_qtype == GetQType<QTypePtr>()) {
return result;
}
if (output_qtype == nullptr) {
return absl::InvalidArgumentError(
"Error while computing output QType of a QType inference expression: " +
ToDebugString(expr));
}
return absl::InvalidArgumentError(absl::StrFormat(
"expected a qtype inference expression to return %s, got %s: %s",
GetQTypeQType()->name(), output_qtype->name(), ToDebugString(expr)));
}
}
absl::StatusOr<QTypeInferenceFn> MakeQTypeInferenceFn(
absl::Span<const QTypeConstraint> qtype_constraints,
ExprNodePtr qtype_inference_expr) {
ASSIGN_OR_RETURN(auto normalized_qtype_inference_expr,
NormalizeQTypeInferenceExpr(qtype_inference_expr));
std::vector<std::string> required_args =
GetLeafKeys(normalized_qtype_inference_expr);
ASSIGN_OR_RETURN(auto qtype_constraint_fn,
MakeQTypeConstraintFn(qtype_constraints));
ASSIGN_OR_RETURN(auto executor, MakeParameterQTypeModelExecutor(std::move(
normalized_qtype_inference_expr)));
return
[qtype_constraint_fn = std::move(qtype_constraint_fn),
executor = std::move(executor),
qtype_inference_expr = std::move(qtype_inference_expr),
required_args =
std::move(required_args)](const ParameterQTypes& parameter_qtypes)
-> absl::StatusOr<const QType* > {
ASSIGN_OR_RETURN(bool constraints_result,
qtype_constraint_fn(parameter_qtypes));
if (!constraints_result) {
return nullptr;
}
for (const std::string& name : required_args) {
if (!parameter_qtypes.contains(name)) {
return nullptr;
}
}
ASSIGN_OR_RETURN(auto qtype_typed_value, executor(parameter_qtypes));
DCHECK_EQ(
qtype_typed_value.GetType(),
GetQTypeQType());
auto* qtype = qtype_typed_value.UnsafeAs<QTypePtr>();
if (qtype == nullptr || qtype == GetNothingQType()) {
return absl::InvalidArgumentError(absl::StrFormat(
"qtype inference expression produced no qtype: %s, %s",
ToDebugString(qtype_inference_expr),
FormatParameterQTypes(parameter_qtypes)));
}
return qtype;
};
}
} | #include "arolla/expr/operator_loader/qtype_inference.h"
#include <cstdint>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/expr.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::operator_loader {
namespace {
using expr::CallOp;
using expr::Literal;
using expr::Placeholder;
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
class QTypeInferenceTest : public ::testing::Test {
protected:
static absl::StatusOr<QTypeInferenceFn> SampleInferenceFn() {
ASSIGN_OR_RETURN(auto x_is_scalar_qtype_expr,
CallOp("qtype.is_scalar_qtype", {Placeholder("x")}));
ASSIGN_OR_RETURN(auto y_is_scalar_qtype_expr,
CallOp("qtype.is_scalar_qtype", {Placeholder("y")}));
ASSIGN_OR_RETURN(
auto x_y_common_qtype_expr,
CallOp("qtype.common_qtype", {Placeholder("x"), Placeholder("y")}));
return MakeQTypeInferenceFn(
{
{x_is_scalar_qtype_expr, "expected `x` to be scalar, got {x}"},
{y_is_scalar_qtype_expr, "expected `y` to be scalar, got {y}"},
},
x_y_common_qtype_expr);
}
};
TEST_F(QTypeInferenceTest, Ok) {
ASSERT_OK_AND_ASSIGN(auto fn, SampleInferenceFn());
EXPECT_THAT(fn({
{"x", GetQType<int64_t>()},
{"y", GetQType<int32_t>()},
}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(fn({
{"y", GetQType<int32_t>()},
}),
IsOkAndHolds(nullptr));
EXPECT_THAT(fn({
{"x", GetQType<int64_t>()},
{"y", GetNothingQType()},
}),
IsOkAndHolds(nullptr));
EXPECT_THAT(fn({}), IsOkAndHolds(nullptr));
}
TEST_F(QTypeInferenceTest, ErrorMessage) {
ASSERT_OK_AND_ASSIGN(auto fn, SampleInferenceFn());
EXPECT_THAT(
fn({
{"x", GetQType<int32_t>()},
{"y", GetQType<ScalarShape>()},
}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected `y` to be scalar, got SCALAR_SHAPE")));
EXPECT_THAT(
fn({
{"x", GetQType<int32_t>()},
{"y", GetQType<Bytes>()},
}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"qtype inference expression produced no "
"qtype: M.qtype.common_qtype(P.x, P.y), x:INT32, y:BYTES")));
}
TEST_F(QTypeInferenceTest, NoOutputQType) {
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("core.get_nth", {Placeholder("x"), Placeholder("y")}));
EXPECT_THAT(
MakeQTypeInferenceFn({}, expr),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("Error while computing output QType of a QType inference "
"expression: M.core.get_nth(P.x, P.y)")));
}
TEST_F(QTypeInferenceTest, BadOutputQType) {
auto x = Literal(1.f);
EXPECT_THAT(MakeQTypeInferenceFn({}, x),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected a qtype inference expression to "
"return QTYPE, got FLOAT32: 1.")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/qtype_inference.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_loader/qtype_inference_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c721652f-e92d-4952-917a-d4fcc38955ce | cpp | tensorflow/tensorflow | broadcast | tensorflow/compiler/tf2xla/lib/broadcast.cc | third_party/xla/xla/tests/broadcast_test.cc | #include "tensorflow/compiler/tf2xla/lib/broadcast.h"
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "xla/client/lib/broadcast.h"
#include "xla/client/xla_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/bcast.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
absl::StatusOr<xla::XlaOp> BroadcastTo(xla::XlaOp input,
absl::Span<int64_t const> output_dims) {
return xla::BroadcastTo(input, output_dims);
}
Status BroadcastOpsToSame(xla::XlaOp* lhs, xla::XlaOp* rhs) {
TF_ASSIGN_OR_RETURN(auto lhs_xla_shape, lhs->builder()->GetShape(*lhs));
TF_ASSIGN_OR_RETURN(auto rhs_xla_shape, rhs->builder()->GetShape(*rhs));
tensorflow::TensorShape lhs_tf_shape;
tensorflow::TensorShape rhs_tf_shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(lhs_xla_shape, &lhs_tf_shape));
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(rhs_xla_shape, &rhs_tf_shape));
if (!lhs_tf_shape.IsSameSize(rhs_tf_shape)) {
tensorflow::BCast bcast(tensorflow::BCast::FromShape(lhs_tf_shape),
tensorflow::BCast::FromShape(rhs_tf_shape));
if (!bcast.IsValid()) {
return tensorflow::errors::InvalidArgument(
"Dimensions cannot be made to match through broadcasting");
}
TF_ASSIGN_OR_RETURN(*lhs, xla::BroadcastTo(*lhs, bcast.output_shape()));
TF_ASSIGN_OR_RETURN(*rhs, xla::BroadcastTo(*rhs, bcast.output_shape()));
}
return absl::OkStatus();
}
} | #include <memory>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class BroadcastTest : public HloTestBase {};
XLA_TEST_F(BroadcastTest, BroadcastScalarToScalar) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(LiteralUtil::CreateR0<float>(42.0), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastScalarTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{42.0, 42.0}, {42.0, 42.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, BroadcastVectorTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto element1 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 2}), input, {0}));
auto element2 = builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3}), input, {1}));
builder.AddInstruction(HloInstruction::CreateTuple({element1, element2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 1.0}, {2.0, 2.0}, {3.0, 3.0}}),
LiteralSlice(result, {0}), error_spec_));
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 2.0, 3.0}, {1.0, 2.0, 3.0}}),
LiteralSlice(result, {1}), error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {0, 1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo2DTranspose) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2}), input, {1, 0}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR2<float>({{1.0, 3.0}, {2.0, 4.0}}), result,
error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast2DTo3D) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 2}), input, {0, 2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR3<float>({{{1.0, 2.0}, {1.0, 2.0}, {1.0, 2.0}},
{{3.0, 4.0}, {3.0, 4.0}, {3.0, 4.0}}}),
result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_2_To_R4_2x2x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>({1.0, 2.0})));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 2, 3, 3}), input, {1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(2, 2, 3, 3);
Array2D<float> pz({{1, 2}, {1, 2}});
expected.FillWithPZ(pz);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R1_1025_To_R4_3x3x3x1025) {
auto builder = HloComputation::Builder(TestName());
std::vector<float> input_data(1025);
int64_t r1_size = input_data.size();
std::iota(input_data.begin(), input_data.end(), 0.0f);
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(input_data)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 3, 3, r1_size}), input, {3}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(3, 3, 3, 1025);
Array2D<float> yx(3, r1_size);
for (int64_t y = 0; y < 3; ++y) {
for (int64_t x = 0; x < r1_size; ++x) {
yx(y, x) = input_data[x];
}
}
expected.FillWithYX(yx);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
XLA_TEST_F(BroadcastTest, Broadcast_R1_64_To_R4_32x64x7x7) {
auto builder = HloComputation::Builder(TestName());
Array4D<float> r4_array(32, 64, 7, 7);
r4_array.Fill(42.0);
std::vector<float> r1_array(64, 42.0);
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(r1_array)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {32, 64, 7, 7}), input, {1}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(LiteralUtil::CreateR4FromArray4D(r4_array),
result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R0_to_R4_64x64x3x3) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {64, 64, 3, 3}), input, {}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
LOG(INFO) << hlo_module->ToString();
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(64, 64, 3, 3);
expected.Fill(1.0f);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R2_2x2_To_R4_3x3x2x2) {
auto builder = HloComputation::Builder(TestName());
Array2D<float> to_broadcast({{1.0f, 2.0f}, {3.0f, 4.0f}});
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2FromArray2D<float>(to_broadcast)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {3, 3, 2, 2}), input, {2, 3}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
Array4D<float> expected(3, 3, 2, 2);
expected.FillWithYX(to_broadcast);
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
TEST_F(BroadcastTest, Broadcast_R3_2x3x4_to_R4_2x3x4x5) {
auto builder = HloComputation::Builder(TestName());
Array3D<float> input_vals(2, 3, 4);
input_vals.FillRandom(1.0);
Array4D<float> expected(2, 3, 4, 5);
for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 4; ++k) {
for (int m = 0; m < 5; ++m) {
expected(i, j, k, m) = input_vals(i, j, k);
}
}
}
}
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR3FromArray3D<float>(input_vals)));
builder.AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(F32, {2, 3, 4, 5}), input, {0, 1, 2}));
auto hlo_module = CreateNewVerifiedModule();
hlo_module->AddEntryComputation(builder.Build());
auto result = ExecuteAndTransfer(std::move(hlo_module), {});
EXPECT_TRUE(LiteralTestUtil::Near(
LiteralUtil::CreateR4FromArray4D<float>(expected), result, error_spec_));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/lib/broadcast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/broadcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a4c9281-b303-45c5-9292-6c8a9210ff7f | cpp | tensorflow/tensorflow | right_shift | tensorflow/lite/kernels/right_shift.cc | tensorflow/lite/kernels/right_shift_test.cc | #include <climits>
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace right_shift {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input1->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
T RightShift(T x, T y) {
T y_clamped = y;
if (y_clamped < 0) {
y_clamped = 0;
} else if (y_clamped > sizeof(T) * CHAR_BIT - 1) {
y_clamped = sizeof(T) * CHAR_BIT - 1;
}
return x >> y_clamped;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteType type = output->type;
switch (type) {
case kTfLiteUInt8: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<uint8_t, uint8_t, uint8_t>(
GetTensorShape(input1), GetTensorData<uint8_t>(input1),
GetTensorShape(input2), GetTensorData<uint8_t>(input2),
GetTensorShape(output), GetTensorData<uint8_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<uint8_t, uint8_t, uint8_t>(
GetTensorShape(input1), GetTensorData<uint8_t>(input1),
GetTensorShape(input2), GetTensorData<uint8_t>(input2),
GetTensorShape(output), GetTensorData<uint8_t>(output), RightShift);
}
break;
}
case kTfLiteInt8: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), RightShift);
}
break;
}
case kTfLiteUInt16: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<uint16_t, uint16_t,
uint16_t>(
GetTensorShape(input1), GetTensorData<uint16_t>(input1),
GetTensorShape(input2), GetTensorData<uint16_t>(input2),
GetTensorShape(output), GetTensorData<uint16_t>(output),
RightShift);
} else {
reference_ops::BinaryFunction<uint16_t, uint16_t, uint16_t>(
GetTensorShape(input1), GetTensorData<uint16_t>(input1),
GetTensorShape(input2), GetTensorData<uint16_t>(input2),
GetTensorShape(output), GetTensorData<uint16_t>(output),
RightShift);
}
break;
}
case kTfLiteInt16: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), RightShift);
}
break;
}
case kTfLiteUInt32: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<uint32_t, uint32_t,
uint32_t>(
GetTensorShape(input1), GetTensorData<uint32_t>(input1),
GetTensorShape(input2), GetTensorData<uint32_t>(input2),
GetTensorShape(output), GetTensorData<uint32_t>(output),
RightShift);
} else {
reference_ops::BinaryFunction<uint32_t, uint32_t, uint32_t>(
GetTensorShape(input1), GetTensorData<uint32_t>(input1),
GetTensorShape(input2), GetTensorData<uint32_t>(input2),
GetTensorShape(output), GetTensorData<uint32_t>(output),
RightShift);
}
break;
}
case kTfLiteInt32: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), RightShift);
} else {
reference_ops::BinaryFunction<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), RightShift);
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"RightShift currently only supports "
"8-bit/16-bit/32-bit integer/unsigned integer, got %s",
TfLiteTypeGetName(type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_RIGHT_SHIFT() {
static TfLiteRegistration r = {right_shift::Init, right_shift::Free,
right_shift::Prepare, right_shift::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class RightShiftOpModel : public SingleOpModel {
public:
RightShiftOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape,
TensorType tensor_type) {
input1_ = AddInput(tensor_type);
input2_ = AddInput(tensor_type);
output_ = AddOutput(tensor_type);
SetBuiltinOp(BuiltinOperator_RIGHT_SHIFT, BuiltinOptions_RightShiftOptions,
CreateRightShiftOptions(builder_).Union());
BuildInterpreter({input1_shape, input2_shape});
}
int input1() const { return input1_; }
int input2() const { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(RightShiftOpTest, SimpleTestInt8) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT8);
model.PopulateTensor<int8_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAreArray({-1, -5, -1, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestInt16) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16);
model.PopulateTensor<int16_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({-1, -5, -1, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestInt32) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({-1, -5, -1, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestUInt8) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT8);
model.PopulateTensor<uint8_t>(model.input1(), {1, 5, 3, 14});
model.PopulateTensor<uint8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint8_t>(), ElementsAreArray({0, 5, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestUInt16) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT16);
model.PopulateTensor<uint16_t>(model.input1(), {1, 5, 3, 14});
model.PopulateTensor<uint16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint16_t>(), ElementsAreArray({0, 5, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(RightShiftOpTest, SimpleTestUInt32) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {1, 5, 3, 14});
model.PopulateTensor<uint32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({0, 5, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastRhsInt) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {-1, -5, -3, -14});
model.PopulateTensor<int32_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({-1, -2, -1, -4}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastLhsInt) {
RightShiftOpModel model({1, 1, 1, 1}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {4});
model.PopulateTensor<int32_t>(model.input2(), {1, -2, 3, -4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({2, 4, 0, 4}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastRhsUInt) {
RightShiftOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {5, 0, 7, 11});
model.PopulateTensor<uint32_t>(model.input2(), {2});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({1, 0, 1, 2}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastLhsUInt) {
RightShiftOpModel model({1, 1, 1, 1}, {1, 1, 1, 4}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {4});
model.PopulateTensor<uint32_t>(model.input2(), {1, 2, 3, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({2, 1, 0, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/right_shift.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/right_shift_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c05ab69-2780-41fa-a46f-a2a323fc8a0d | cpp | google/googletest | gmock-cardinalities | googlemock/src/gmock-cardinalities.cc | googlemock/test/gmock-cardinalities_test.cc | #include "gmock/gmock-cardinalities.h"
#include <limits.h>
#include <ostream>
#include <sstream>
#include <string>
#include "gmock/internal/gmock-internal-utils.h"
#include "gtest/gtest.h"
namespace testing {
namespace {
class BetweenCardinalityImpl : public CardinalityInterface {
public:
BetweenCardinalityImpl(int min, int max)
: min_(min >= 0 ? min : 0), max_(max >= min_ ? max : min_) {
std::stringstream ss;
if (min < 0) {
ss << "The invocation lower bound must be >= 0, " << "but is actually "
<< min << ".";
internal::Expect(false, __FILE__, __LINE__, ss.str());
} else if (max < 0) {
ss << "The invocation upper bound must be >= 0, " << "but is actually "
<< max << ".";
internal::Expect(false, __FILE__, __LINE__, ss.str());
} else if (min > max) {
ss << "The invocation upper bound (" << max
<< ") must be >= the invocation lower bound (" << min << ").";
internal::Expect(false, __FILE__, __LINE__, ss.str());
}
}
int ConservativeLowerBound() const override { return min_; }
int ConservativeUpperBound() const override { return max_; }
bool IsSatisfiedByCallCount(int call_count) const override {
return min_ <= call_count && call_count <= max_;
}
bool IsSaturatedByCallCount(int call_count) const override {
return call_count >= max_;
}
void DescribeTo(::std::ostream* os) const override;
private:
const int min_;
const int max_;
BetweenCardinalityImpl(const BetweenCardinalityImpl&) = delete;
BetweenCardinalityImpl& operator=(const BetweenCardinalityImpl&) = delete;
};
inline std::string FormatTimes(int n) {
if (n == 1) {
return "once";
} else if (n == 2) {
return "twice";
} else {
std::stringstream ss;
ss << n << " times";
return ss.str();
}
}
void BetweenCardinalityImpl::DescribeTo(::std::ostream* os) const {
if (min_ == 0) {
if (max_ == 0) {
*os << "never called";
} else if (max_ == INT_MAX) {
*os << "called any number of times";
} else {
*os << "called at most " << FormatTimes(max_);
}
} else if (min_ == max_) {
*os << "called " << FormatTimes(min_);
} else if (max_ == INT_MAX) {
*os << "called at least " << FormatTimes(min_);
} else {
*os << "called between " << min_ << " and " << max_ << " times";
}
}
}
void Cardinality::DescribeActualCallCountTo(int actual_call_count,
::std::ostream* os) {
if (actual_call_count > 0) {
*os << "called " << FormatTimes(actual_call_count);
} else {
*os << "never called";
}
}
GTEST_API_ Cardinality AtLeast(int n) { return Between(n, INT_MAX); }
GTEST_API_ Cardinality AtMost(int n) { return Between(0, n); }
GTEST_API_ Cardinality AnyNumber() { return AtLeast(0); }
GTEST_API_ Cardinality Between(int min, int max) {
return Cardinality(new BetweenCardinalityImpl(min, max));
}
GTEST_API_ Cardinality Exactly(int n) { return Between(n, n); }
} | #include <ostream>
#include "gmock/gmock.h"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
namespace {
using std::stringstream;
using testing::AnyNumber;
using testing::AtLeast;
using testing::AtMost;
using testing::Between;
using testing::Cardinality;
using testing::CardinalityInterface;
using testing::Exactly;
using testing::IsSubstring;
using testing::MakeCardinality;
class MockFoo {
public:
MockFoo() = default;
MOCK_METHOD0(Bar, int());
private:
MockFoo(const MockFoo&) = delete;
MockFoo& operator=(const MockFoo&) = delete;
};
TEST(CardinalityTest, IsDefaultConstructable) { Cardinality c; }
TEST(CardinalityTest, IsCopyable) {
Cardinality c = Exactly(1);
EXPECT_FALSE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
c = Exactly(2);
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
}
TEST(CardinalityTest, IsOverSaturatedByCallCountWorks) {
const Cardinality c = AtMost(5);
EXPECT_FALSE(c.IsOverSaturatedByCallCount(4));
EXPECT_FALSE(c.IsOverSaturatedByCallCount(5));
EXPECT_TRUE(c.IsOverSaturatedByCallCount(6));
}
TEST(CardinalityTest, CanDescribeActualCallCount) {
stringstream ss0;
Cardinality::DescribeActualCallCountTo(0, &ss0);
EXPECT_EQ("never called", ss0.str());
stringstream ss1;
Cardinality::DescribeActualCallCountTo(1, &ss1);
EXPECT_EQ("called once", ss1.str());
stringstream ss2;
Cardinality::DescribeActualCallCountTo(2, &ss2);
EXPECT_EQ("called twice", ss2.str());
stringstream ss3;
Cardinality::DescribeActualCallCountTo(3, &ss3);
EXPECT_EQ("called 3 times", ss3.str());
}
TEST(AnyNumber, Works) {
const Cardinality c = AnyNumber();
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(9));
EXPECT_FALSE(c.IsSaturatedByCallCount(9));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called any number of times", ss.str());
}
TEST(AnyNumberTest, HasCorrectBounds) {
const Cardinality c = AnyNumber();
EXPECT_EQ(0, c.ConservativeLowerBound());
EXPECT_EQ(INT_MAX, c.ConservativeUpperBound());
}
TEST(AtLeastTest, OnNegativeNumber) {
EXPECT_NONFATAL_FAILURE(
{
AtLeast(-1);
},
"The invocation lower bound must be >= 0");
}
TEST(AtLeastTest, OnZero) {
const Cardinality c = AtLeast(0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "any number of times", ss.str());
}
TEST(AtLeastTest, OnPositiveNumber) {
const Cardinality c = AtLeast(2);
EXPECT_FALSE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSaturatedByCallCount(2));
stringstream ss1;
AtLeast(1).DescribeTo(&ss1);
EXPECT_PRED_FORMAT2(IsSubstring, "at least once", ss1.str());
stringstream ss2;
c.DescribeTo(&ss2);
EXPECT_PRED_FORMAT2(IsSubstring, "at least twice", ss2.str());
stringstream ss3;
AtLeast(3).DescribeTo(&ss3);
EXPECT_PRED_FORMAT2(IsSubstring, "at least 3 times", ss3.str());
}
TEST(AtLeastTest, HasCorrectBounds) {
const Cardinality c = AtLeast(2);
EXPECT_EQ(2, c.ConservativeLowerBound());
EXPECT_EQ(INT_MAX, c.ConservativeUpperBound());
}
TEST(AtMostTest, OnNegativeNumber) {
EXPECT_NONFATAL_FAILURE(
{
AtMost(-1);
},
"The invocation upper bound must be >= 0");
}
TEST(AtMostTest, OnZero) {
const Cardinality c = AtMost(0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str());
}
TEST(AtMostTest, OnPositiveNumber) {
const Cardinality c = AtMost(2);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(1));
EXPECT_FALSE(c.IsSaturatedByCallCount(1));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
stringstream ss1;
AtMost(1).DescribeTo(&ss1);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most once", ss1.str());
stringstream ss2;
c.DescribeTo(&ss2);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", ss2.str());
stringstream ss3;
AtMost(3).DescribeTo(&ss3);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most 3 times", ss3.str());
}
TEST(AtMostTest, HasCorrectBounds) {
const Cardinality c = AtMost(2);
EXPECT_EQ(0, c.ConservativeLowerBound());
EXPECT_EQ(2, c.ConservativeUpperBound());
}
TEST(BetweenTest, OnNegativeStart) {
EXPECT_NONFATAL_FAILURE(
{
Between(-1, 2);
},
"The invocation lower bound must be >= 0, but is actually -1");
}
TEST(BetweenTest, OnNegativeEnd) {
EXPECT_NONFATAL_FAILURE(
{
Between(1, -2);
},
"The invocation upper bound must be >= 0, but is actually -2");
}
TEST(BetweenTest, OnStartBiggerThanEnd) {
EXPECT_NONFATAL_FAILURE(
{
Between(2, 1);
},
"The invocation upper bound (1) must be >= "
"the invocation lower bound (2)");
}
TEST(BetweenTest, OnZeroStartAndZeroEnd) {
const Cardinality c = Between(0, 0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str());
}
TEST(BetweenTest, OnZeroStartAndNonZeroEnd) {
const Cardinality c = Between(0, 2);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
EXPECT_FALSE(c.IsSatisfiedByCallCount(4));
EXPECT_TRUE(c.IsSaturatedByCallCount(4));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called at most twice", ss.str());
}
TEST(BetweenTest, OnSameStartAndEnd) {
const Cardinality c = Between(3, 3);
EXPECT_FALSE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSaturatedByCallCount(2));
EXPECT_TRUE(c.IsSatisfiedByCallCount(3));
EXPECT_TRUE(c.IsSaturatedByCallCount(3));
EXPECT_FALSE(c.IsSatisfiedByCallCount(4));
EXPECT_TRUE(c.IsSaturatedByCallCount(4));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", ss.str());
}
TEST(BetweenTest, OnDifferentStartAndEnd) {
const Cardinality c = Between(3, 5);
EXPECT_FALSE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSaturatedByCallCount(2));
EXPECT_TRUE(c.IsSatisfiedByCallCount(3));
EXPECT_FALSE(c.IsSaturatedByCallCount(3));
EXPECT_TRUE(c.IsSatisfiedByCallCount(5));
EXPECT_TRUE(c.IsSaturatedByCallCount(5));
EXPECT_FALSE(c.IsSatisfiedByCallCount(6));
EXPECT_TRUE(c.IsSaturatedByCallCount(6));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "called between 3 and 5 times", ss.str());
}
TEST(BetweenTest, HasCorrectBounds) {
const Cardinality c = Between(3, 5);
EXPECT_EQ(3, c.ConservativeLowerBound());
EXPECT_EQ(5, c.ConservativeUpperBound());
}
TEST(ExactlyTest, OnNegativeNumber) {
EXPECT_NONFATAL_FAILURE(
{
Exactly(-1);
},
"The invocation lower bound must be >= 0");
}
TEST(ExactlyTest, OnZero) {
const Cardinality c = Exactly(0);
EXPECT_TRUE(c.IsSatisfiedByCallCount(0));
EXPECT_TRUE(c.IsSaturatedByCallCount(0));
EXPECT_FALSE(c.IsSatisfiedByCallCount(1));
EXPECT_TRUE(c.IsSaturatedByCallCount(1));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_PRED_FORMAT2(IsSubstring, "never called", ss.str());
}
TEST(ExactlyTest, OnPositiveNumber) {
const Cardinality c = Exactly(2);
EXPECT_FALSE(c.IsSatisfiedByCallCount(0));
EXPECT_FALSE(c.IsSaturatedByCallCount(0));
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_TRUE(c.IsSaturatedByCallCount(2));
stringstream ss1;
Exactly(1).DescribeTo(&ss1);
EXPECT_PRED_FORMAT2(IsSubstring, "called once", ss1.str());
stringstream ss2;
c.DescribeTo(&ss2);
EXPECT_PRED_FORMAT2(IsSubstring, "called twice", ss2.str());
stringstream ss3;
Exactly(3).DescribeTo(&ss3);
EXPECT_PRED_FORMAT2(IsSubstring, "called 3 times", ss3.str());
}
TEST(ExactlyTest, HasCorrectBounds) {
const Cardinality c = Exactly(3);
EXPECT_EQ(3, c.ConservativeLowerBound());
EXPECT_EQ(3, c.ConservativeUpperBound());
}
class EvenCardinality : public CardinalityInterface {
public:
bool IsSatisfiedByCallCount(int call_count) const override {
return (call_count % 2 == 0);
}
bool IsSaturatedByCallCount(int ) const override {
return false;
}
void DescribeTo(::std::ostream* ss) const override {
*ss << "called even number of times";
}
};
TEST(MakeCardinalityTest, ConstructsCardinalityFromInterface) {
const Cardinality c = MakeCardinality(new EvenCardinality);
EXPECT_TRUE(c.IsSatisfiedByCallCount(2));
EXPECT_FALSE(c.IsSatisfiedByCallCount(3));
EXPECT_FALSE(c.IsSaturatedByCallCount(10000));
stringstream ss;
c.DescribeTo(&ss);
EXPECT_EQ("called even number of times", ss.str());
}
} | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/src/gmock-cardinalities.cc | https://github.com/google/googletest/blob/a1e255a582377e1006bb88a408ac3f933ba7c916/googlemock/test/gmock-cardinalities_test.cc | a1e255a582377e1006bb88a408ac3f933ba7c916 |
78b40b32-1ec5-4330-b01f-7f1832369dd2 | cpp | tensorflow/tensorflow | tensor_slice | tensorflow/core/framework/tensor_slice.cc | tensorflow/core/framework/tensor_slice_test.cc | #include "tensorflow/core/framework/tensor_slice.h"
#include <limits>
#include <vector>
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
TensorSlice::TensorSlice(int dim) { SetFullSlice(dim); }
TensorSlice::TensorSlice(const TensorSliceProto& proto) {
starts_.reserve(proto.extent_size());
lengths_.reserve(proto.extent_size());
for (const auto& e : proto.extent()) {
starts_.push_back(e.start());
lengths_.push_back(GetExtentLength(e));
}
}
TensorSlice::TensorSlice(
std::initializer_list<std::pair<int64_t, int64_t>> extents) {
starts_.reserve(extents.size());
lengths_.reserve(extents.size());
for (const auto& e : extents) {
starts_.push_back(e.first);
lengths_.push_back(e.second);
}
}
Status TensorSlice::BuildTensorSlice(const TensorSliceProto& proto,
TensorSlice* output) {
output->Clear();
output->starts_.reserve(proto.extent_size());
output->lengths_.reserve(proto.extent_size());
for (const auto& e : proto.extent()) {
int64_t l = GetExtentLength(e);
if (e.start() != 0 || l != kFullExtent) {
if (e.start() < 0 || l <= 0) {
return errors::InvalidArgument(
"Expected non-negative start and positive length but got start = ",
e.start(), ", length = ", l, ": extent = ", e.ShortDebugString());
}
if (static_cast<uint64_t>(e.start()) + static_cast<uint64_t>(e.length()) >
std::numeric_limits<int64_t>::max()) {
return errors::InvalidArgument(
"Extent end exceeds the maximum possible size: extent = ",
e.ShortDebugString());
}
}
output->starts_.push_back(e.start());
output->lengths_.push_back(l);
}
return absl::OkStatus();
}
Status TensorSlice::Parse(const string& str, TensorSlice* slice) {
std::vector<string> items = str_util::Split(str, ':', str_util::SkipEmpty());
slice->starts_.reserve(items.size());
slice->lengths_.reserve(items.size());
for (const string& x : items) {
int64_t s, l;
if (x == "-") {
s = 0;
l = kFullExtent;
} else {
std::vector<string> sl = str_util::Split(x, ',', str_util::SkipEmpty());
if (sl.size() != 2 || !strings::safe_strto64(sl[0], &s) ||
!strings::safe_strto64(sl[1], &l)) {
return errors::InvalidArgument(
"Expected a pair of numbers or '-' "
"but got '",
x, "': string = ", str);
}
if (s < 0 || l <= 0) {
return errors::InvalidArgument(
"Expected non-negative start and "
"positive length but got start = ",
s, ", length = ", l, ": string = ", str);
}
}
slice->starts_.push_back(s);
slice->lengths_.push_back(l);
}
return absl::OkStatus();
}
void TensorSlice::Clear() {
starts_.clear();
lengths_.clear();
}
bool TensorSlice::IsFull() const {
for (int d = 0; d < dims(); ++d) {
if (!IsFullAt(d)) return false;
}
return true;
}
void TensorSlice::SetFullSlice(int dim) {
Clear();
starts_.reserve(dim);
lengths_.reserve(dim);
for (int d = 0; d < dim; ++d) {
starts_.push_back(0);
lengths_.push_back(kFullExtent);
}
}
void TensorSlice::Extend(int dim) {
int old_dim = dims();
DCHECK_LE(old_dim, dim);
starts_.resize(dim);
lengths_.resize(dim);
for (int d = old_dim; d < dim; ++d) {
starts_[d] = 0;
lengths_[d] = kFullExtent;
}
}
void TensorSlice::AsProto(TensorSliceProto* proto) const {
for (int d = 0; d < dims(); ++d) {
TensorSliceProto::Extent* e = proto->add_extent();
if (!IsFullAt(d)) {
e->set_start(starts_[d]);
e->set_length(lengths_[d]);
}
}
}
string TensorSlice::DebugString() const {
string buffer;
bool first = true;
for (int d = 0; d < dims(); ++d) {
if (!first) {
buffer.append(":");
}
if (IsFullAt(d)) {
buffer.append("-");
} else {
strings::StrAppend(&buffer, starts_[d], ",", lengths_[d]);
}
first = false;
}
return buffer;
}
bool TensorSlice::Intersect(const TensorSlice& other,
TensorSlice* result) const {
if (dims() != other.dims()) {
return false;
}
if (result) {
result->SetFullSlice(dims());
}
for (int d = 0; d < dims(); ++d) {
if (IsFullAt(d)) {
if (result) {
result->set_start(d, other.start(d));
result->set_length(d, other.length(d));
}
} else if (other.IsFullAt(d)) {
if (result) {
result->set_start(d, start(d));
result->set_length(d, length(d));
}
} else {
int64_t s = std::max(start(d), other.start(d));
int64_t l = std::min(end(d), other.end(d)) - s;
if (l > 0) {
if (result) {
result->set_start(d, s);
result->set_length(d, l);
}
} else {
if (result) {
result->Clear();
}
return false;
}
}
}
return true;
}
bool TensorSlice::operator==(const TensorSlice& other) const {
return dims() == other.dims() && starts_ == other.starts_ &&
lengths_ == other.lengths_;
}
void TensorSlice::ComputeRelative(const TensorSlice& sub,
TensorSlice* relative) const {
DCHECK_EQ(dims(), sub.dims());
relative->SetFullSlice(dims());
for (int d = 0; d < dims(); ++d) {
if (IsFullAt(d)) {
relative->set_start(d, sub.start(d));
relative->set_length(d, sub.length(d));
} else {
relative->set_start(d, sub.start(d) - start(d));
relative->set_length(d, sub.length(d));
}
}
}
void TensorSlice::UpdateToCover(const TensorSlice& other) {
DCHECK_EQ(dims(), other.dims());
for (int d = 0; d < dims(); ++d) {
if (!IsFullAt(d)) {
if (other.IsFullAt(d)) {
starts_[d] = 0;
lengths_[d] = kFullExtent;
} else {
const auto new_end = std::max(end(d), other.end(d));
set_start(d, std::min(start(d), other.start(d)));
set_length(d, new_end - start(d));
}
}
}
}
bool TensorSlice::HasExtentLength(const TensorSliceProto::Extent& extent) {
return extent.has_length_case() == TensorSliceProto::Extent::kLength;
}
int64_t TensorSlice::GetExtentLength(const TensorSliceProto::Extent& extent) {
if (!HasExtentLength(extent)) return -1;
return extent.length();
}
Status TensorSlice::SliceTensorShape(const TensorShape& shape,
TensorShape* result_shape) const {
result_shape->Clear();
if (shape.dims() != dims()) {
return errors::Internal("Mismatching ranks: shape = ", shape.DebugString(),
", slice = ", DebugString());
}
for (int d = 0; d < dims(); ++d) {
if (IsFullAt(d)) {
result_shape->AddDim(shape.dim_size(d));
} else {
if (end(d) <= shape.dim_size(d)) {
result_shape->AddDim(length(d));
} else {
result_shape->Clear();
return errors::Internal("Extent in dimension ", d,
" out of bounds: shape = ", shape.DebugString(),
", slice = ", DebugString());
}
}
}
return absl::OkStatus();
}
const int64_t TensorSlice::kFullExtent = -1;
} | #include "tensorflow/core/framework/tensor_slice.h"
#include <limits>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
TEST(TensorSliceTest, Basic) {
{
TensorSlice s(3);
EXPECT_EQ("-:-:-", s.DebugString());
EXPECT_TRUE(s.IsFull());
s.SetFullSlice(4);
EXPECT_EQ("-:-:-:-", s.DebugString());
EXPECT_TRUE(s.IsFull());
}
}
TEST(TensorSliceTest, Serialization) {
{
TensorSlice s({{0, -1}, {0, 10}, {14, 1}, {0, -1}});
EXPECT_EQ("-:0,10:14,1:-", s.DebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSliceProto proto;
const char* ptxt = R"pb(
extent {}
extent { start: 0 length: 10 }
extent { start: 14 length: 1 }
extent {}
)pb";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(ptxt, &proto));
TensorSlice s(proto);
EXPECT_EQ("-:0,10:14,1:-", s.DebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSlice s = TensorSlice::ParseOrDie("-:-:1,3:4,5");
TensorSliceProto proto;
s.AsProto(&proto);
TensorSliceProto expected_slice_proto;
protobuf::TextFormat::ParseFromString(
"extent { } "
"extent { } "
"extent { start: 1 length: 3 } "
"extent { start: 4 length: 5 }",
&expected_slice_proto);
EXPECT_EQ(proto.ShortDebugString(),
expected_slice_proto.ShortDebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSlice slice;
Status s = TensorSlice::Parse("-:-:1,3:4:5", &slice);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(
absl::StrContains(s.message(),
"Expected a pair of numbers or '-' but got '4': "
"string = -:-:1,3:4:5"));
}
{
TensorSlice slice;
Status s = TensorSlice::Parse("-:-1,3", &slice);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(),
"Expected non-negative start and positive length but got "
"start = -1, length = 3: string = -:-1,3"));
}
{
TensorSlice s =
TensorSlice::ParseOrDie("9223372036854775807,9223372036854775807");
TensorSliceProto proto;
s.AsProto(&proto);
TensorSliceProto expected_slice_proto;
protobuf::TextFormat::ParseFromString(
"extent { start: 9223372036854775807 length: 9223372036854775807 }",
&expected_slice_proto);
EXPECT_EQ(proto.ShortDebugString(),
expected_slice_proto.ShortDebugString());
EXPECT_TRUE(!s.IsFull());
}
{
TensorSlice slice;
Status s =
TensorSlice::Parse("19223372036854775808,19223372036854775808", &slice);
EXPECT_EQ(s.code(), error::INVALID_ARGUMENT);
EXPECT_TRUE(absl::StrContains(
s.message(),
"Expected a pair of numbers or '-' but got "
"'19223372036854775808,19223372036854775808': string = "
"19223372036854775808,19223372036854775808"));
}
}
TEST(TensorSliceTest, BuildTensorSlice) {
TensorSliceProto proto;
TensorSlice({{0, -1}, {0, 10}, {14, 1}}).AsProto(&proto);
TensorSlice s;
{
TF_ASSERT_OK(TensorSlice::BuildTensorSlice(proto, &s));
EXPECT_EQ("-:0,10:14,1", s.DebugString());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(0)->set_start(-1);
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(2)->set_length(-1);
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(2)->clear_length();
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
{
TensorSliceProto invalid_proto = proto;
invalid_proto.mutable_extent(2)->set_length(
std::numeric_limits<int64_t>::max());
EXPECT_FALSE(TensorSlice::BuildTensorSlice(invalid_proto, &s).ok());
}
}
TEST(TensorSliceTest, Intersection) {
{
TensorSlice a = TensorSlice::ParseOrDie("-:-");
TensorSlice b = TensorSlice::ParseOrDie("1,2:3,4");
TensorSlice c;
EXPECT_TRUE(a.Intersect(b, &c));
EXPECT_EQ("1,2:3,4", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("-:-");
TensorSlice b = TensorSlice::ParseOrDie("1,2:3,4");
TensorSlice c;
EXPECT_TRUE(b.Intersect(a, &c));
EXPECT_EQ("1,2:3,4", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,5:2,6:3,7:5,10");
TensorSlice b = TensorSlice::ParseOrDie("1,2:3,4:9,10:12,1");
TensorSlice c;
EXPECT_TRUE(a.Intersect(b, &c));
EXPECT_EQ("1,2:3,4:9,1:12,1", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("-:1,1");
TensorSlice b = TensorSlice::ParseOrDie("-:0,2");
TensorSlice c;
EXPECT_TRUE(a.Intersect(b, &c));
EXPECT_EQ("-:1,1", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,2:3,1:5,6");
TensorSlice b = TensorSlice::ParseOrDie("1,3:4,5:1,6");
TensorSlice c;
EXPECT_FALSE(a.Intersect(b, &c));
EXPECT_EQ("", c.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,2:3,1:-");
TensorSlice b = TensorSlice::ParseOrDie("-:-");
TensorSlice c;
EXPECT_FALSE(a.Intersect(b, &c));
EXPECT_EQ("", c.DebugString());
}
}
TEST(TensorSliceTest, SliceTensorShape) {
{
TensorSlice a = TensorSlice::ParseOrDie("1,1:-:4,1:2,6");
TensorShape x({2, 4, 5, 8});
TensorShape y;
TF_EXPECT_OK(a.SliceTensorShape(x, &y));
EXPECT_EQ("[1,4,1,6]", y.DebugString());
}
{
TensorSlice a = TensorSlice::ParseOrDie("1,1:1,4:-:-");
TensorShape x({2, 4, 5, 8});
TensorShape y;
Status s = a.SliceTensorShape(x, &y);
EXPECT_EQ(s.code(), error::INTERNAL);
EXPECT_TRUE(absl::StrContains(s.message(),
"Extent in dimension 1 out of bounds: "
"shape = [2,4,5,8], slice = 1,1:1,4:-:-"));
EXPECT_EQ("[]", y.DebugString());
}
}
TEST(TensorSliceTest, ComputeRelative) {
{
TensorSlice base = TensorSlice::ParseOrDie("-:-:-:-");
TensorSlice sub = TensorSlice::ParseOrDie("-:1,2:-:3,4");
TensorSlice relative;
base.ComputeRelative(sub, &relative);
EXPECT_EQ("-:1,2:-:3,4", relative.DebugString());
}
{
TensorSlice base = TensorSlice::ParseOrDie("1,2:3,4:-:5,1");
TensorSlice sub = TensorSlice::ParseOrDie("1,1:4,2:3,3:5,1");
TensorSlice relative;
base.ComputeRelative(sub, &relative);
EXPECT_EQ("0,1:1,2:3,3:0,1", relative.DebugString());
}
}
TEST(TensorSliceTest, ExtentLength) {
TensorSliceProto proto;
const char* ptxt = R"pb(
extent {}
extent { start: 0 length: 10 }
extent { start: 14 length: 1 }
extent {}
)pb";
ASSERT_TRUE(protobuf::TextFormat::ParseFromString(ptxt, &proto));
EXPECT_FALSE(TensorSlice::HasExtentLength(proto.extent(0)));
EXPECT_TRUE(TensorSlice::HasExtentLength(proto.extent(1)));
EXPECT_TRUE(TensorSlice::HasExtentLength(proto.extent(2)));
EXPECT_FALSE(TensorSlice::HasExtentLength(proto.extent(3)));
EXPECT_EQ(-1, TensorSlice::GetExtentLength(proto.extent(0)));
EXPECT_EQ(10, TensorSlice::GetExtentLength(proto.extent(1)));
EXPECT_EQ(1, TensorSlice::GetExtentLength(proto.extent(2)));
EXPECT_EQ(-1, TensorSlice::GetExtentLength(proto.extent(3)));
}
TEST(TensorSliceTest, Deserialization) {
const char pb2[] =
"\x0A\x02\x10\x05\x0A\x04\x08\x00"
"\x10\x0A\x0A\x04\x08\x0E\x10\x01\x0A\x02\x08\x01\x0A\x00";
const char pb3[] =
"\x0A\x02\x10\x05\x0A\x02"
"\x10\x0A\x0A\x04\x08\x0E\x10\x01\x0A\x02\x08\x01\x0A\x00";
TensorSliceProto proto2;
ASSERT_TRUE(proto2.ParseFromArray(pb2, sizeof(pb2) - 1));
TensorSlice ts2(proto2);
TensorSliceProto proto3;
ASSERT_TRUE(proto3.ParseFromArray(pb3, sizeof(pb3) - 1));
TensorSlice ts3(proto3);
EXPECT_EQ("0,5:0,10:14,1:1,-1:-", ts2.DebugString());
EXPECT_EQ("0,5:0,10:14,1:1,-1:-", ts3.DebugString());
}
TEST(TensorSliceTest, UpdateToCover) {
TensorSlice s({{2, 2}, {0, -1}, {3, 7}});
TensorSlice other({{0, -1}, {1, 3}, {2, 2}});
s.UpdateToCover(other);
EXPECT_EQ("-:-:2,8", s.DebugString());
}
TEST(TensorSliceTest, IsFull) {
TensorSlice slice(3);
EXPECT_TRUE(slice.IsFull());
TensorSlice slice2({{0, -1}});
EXPECT_TRUE(slice2.IsFull());
TensorSlice slice3({{0, -1}, {0, -1}, {14, 1}});
EXPECT_TRUE(!slice3.IsFull());
}
TEST(TensorSliceTest, Equality) {
{
TensorSlice slice1(3);
TensorSlice slice2(2);
EXPECT_TRUE(slice1 != slice2);
EXPECT_TRUE(slice2 != slice1);
}
{
TensorSlice slice1(3);
TensorSlice slice2({{0, -1}, {0, -1}, {0, -1}});
EXPECT_TRUE(slice1 == slice2);
EXPECT_TRUE(slice2 == slice1);
}
{
TensorSlice slice1(3);
TensorSlice slice2({{0, -1}, {0, 1}, {0, -1}});
EXPECT_TRUE(slice1 != slice2);
EXPECT_TRUE(slice2 != slice1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_slice.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tensor_slice_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f515b3d7-ce2b-46d9-b0e6-1e32e8e5fac1 | cpp | abseil/abseil-cpp | wide_multiply | absl/random/internal/wide_multiply.h | absl/random/internal/wide_multiply_test.cc | #ifndef ABSL_RANDOM_INTERNAL_WIDE_MULTIPLY_H_
#define ABSL_RANDOM_INTERNAL_WIDE_MULTIPLY_H_
#include <cstdint>
#include <limits>
#include <type_traits>
#if (defined(_WIN32) || defined(_WIN64)) && defined(_M_IA64)
#include <intrin.h>
#pragma intrinsic(_umul128)
#define ABSL_INTERNAL_USE_UMUL128 1
#endif
#include "absl/base/config.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
template <typename UIntType>
struct wide_multiply {
static constexpr size_t kN = std::numeric_limits<UIntType>::digits;
using input_type = UIntType;
using result_type = typename random_internal::unsigned_bits<kN * 2>::type;
static result_type multiply(input_type a, input_type b) {
return static_cast<result_type>(a) * b;
}
static input_type hi(result_type r) {
return static_cast<input_type>(r >> kN);
}
static input_type lo(result_type r) { return static_cast<input_type>(r); }
static_assert(std::is_unsigned<UIntType>::value,
"Class-template wide_multiply<> argument must be unsigned.");
};
inline U256 MultiplyU128ToU256(uint128 a, uint128 b) {
const uint128 a00 = static_cast<uint64_t>(a);
const uint128 a64 = a >> 64;
const uint128 b00 = static_cast<uint64_t>(b);
const uint128 b64 = b >> 64;
const uint128 c00 = a00 * b00;
const uint128 c64a = a00 * b64;
const uint128 c64b = a64 * b00;
const uint128 c128 = a64 * b64;
const uint64_t carry =
static_cast<uint64_t>(((c00 >> 64) + static_cast<uint64_t>(c64a) +
static_cast<uint64_t>(c64b)) >>
64);
return {c128 + (c64a >> 64) + (c64b >> 64) + carry,
c00 + (c64a << 64) + (c64b << 64)};
}
template <>
struct wide_multiply<uint128> {
using input_type = uint128;
using result_type = U256;
static result_type multiply(input_type a, input_type b) {
return MultiplyU128ToU256(a, b);
}
static input_type hi(result_type r) { return r.hi; }
static input_type lo(result_type r) { return r.lo; }
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/internal/wide_multiply.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/numeric/int128.h"
using absl::random_internal::MultiplyU128ToU256;
using absl::random_internal::U256;
namespace {
U256 LeftShift(U256 v, int s) {
if (s == 0) {
return v;
} else if (s < 128) {
return {(v.hi << s) | (v.lo >> (128 - s)), v.lo << s};
} else {
return {v.lo << (s - 128), 0};
}
}
MATCHER_P2(Eq256, hi, lo, "") { return arg.hi == hi && arg.lo == lo; }
MATCHER_P(Eq256, v, "") { return arg.hi == v.hi && arg.lo == v.lo; }
TEST(WideMultiplyTest, MultiplyU128ToU256Test) {
using absl::uint128;
constexpr uint128 k1 = 1;
constexpr uint128 kMax = ~static_cast<uint128>(0);
EXPECT_THAT(MultiplyU128ToU256(0, 0), Eq256(0, 0));
EXPECT_THAT(MultiplyU128ToU256(kMax, kMax), Eq256(kMax << 1, 1));
EXPECT_THAT(MultiplyU128ToU256(kMax, 1), Eq256(0, kMax));
EXPECT_THAT(MultiplyU128ToU256(1, kMax), Eq256(0, kMax));
for (int i = 0; i < 64; ++i) {
SCOPED_TRACE(i);
EXPECT_THAT(MultiplyU128ToU256(kMax, k1 << i),
Eq256(LeftShift({0, kMax}, i)));
EXPECT_THAT(MultiplyU128ToU256(k1 << i, kMax),
Eq256(LeftShift({0, kMax}, i)));
}
for (int i = 0; i < 64; ++i) {
for (int j = 0; j < 64; ++j) {
EXPECT_THAT(MultiplyU128ToU256(k1 << i, k1 << j),
Eq256(LeftShift({0, 1}, i + j)));
}
}
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xc502da0d6ea99fe8, 0xfa3c9141a1f50912),
absl::MakeUint128(0x96bcf1ac37f97bd6, 0x27e2cdeb5fb2299e)),
Eq256(absl::MakeUint128(0x740113d838f96a64, 0x22e8cfa4d71f89ea),
absl::MakeUint128(0x19184a345c62e993, 0x237871b630337b1c)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x6f29e670cee07230, 0xc3d8e6c3e4d86759),
absl::MakeUint128(0x3227d29fa6386db1, 0x231682bb1e4b764f)),
Eq256(absl::MakeUint128(0x15c779d9d5d3b07c, 0xd7e6c827f0c81cbe),
absl::MakeUint128(0xf88e3914f7fa287a, 0x15b79975137dea77)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xafb77107215646e1, 0x3b844cb1ac5769e7),
absl::MakeUint128(0x1ff7b2d888b62479, 0x92f758ae96fcba0b)),
Eq256(absl::MakeUint128(0x15f13b70181f6985, 0x2adb36bbabce7d02),
absl::MakeUint128(0x6c470d72e13aad04, 0x63fba3f5841762ed)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xd85d5558d67ac905, 0xf88c70654dae19b1),
absl::MakeUint128(0x17252c6727db3738, 0x399ff658c511eedc)),
Eq256(absl::MakeUint128(0x138fcdaf8b0421ee, 0x1b465ddf2a0d03f6),
absl::MakeUint128(0x8f573ba68296860f, 0xf327d2738741a21c)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x46f0421a37ff6bee, 0xa61df89f09d140b1),
absl::MakeUint128(0x3d712ec9f37ca2e1, 0x9658a2cba47ef4b1)),
Eq256(absl::MakeUint128(0x11069cc48ee7c95d, 0xd35fb1c7aa91c978),
absl::MakeUint128(0xbe2f4a6de874b015, 0xd2f7ac1b76746e61)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x730d27c72d58fa49, 0x3ebeda7498f8827c),
absl::MakeUint128(0xa2c959eca9f503af, 0x189c687eb842bbd8)),
Eq256(absl::MakeUint128(0x4928d0ea356ba022, 0x1546d34a2963393),
absl::MakeUint128(0x7481531e1e0a16d1, 0xdd8025015cf6aca0)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x6ca41020f856d2f1, 0xb9b0838c04a7f4aa),
absl::MakeUint128(0x9cf41d28a8396f54, 0x1d681695e377ffe6)),
Eq256(absl::MakeUint128(0x429b92934d9be6f1, 0xea182877157c1e7),
absl::MakeUint128(0x7135c23f0a4a475, 0xc1adc366f4a126bc)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0x57472833797c332, 0x6c79272fdec4687a),
absl::MakeUint128(0xb5f022ea3838e46b, 0x16face2f003e27a6)),
Eq256(absl::MakeUint128(0x3e072e0962b3400, 0x5d9fe8fdc3d0e1f4),
absl::MakeUint128(0x7dc0df47cedafd62, 0xbe6501f1acd2551c)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xf0fb4198322eb1c2, 0xfe7f5f31f3885938),
absl::MakeUint128(0xd99012b71bb7aa31, 0xac7a6f9eb190789)),
Eq256(absl::MakeUint128(0xcccc998cf075ca01, 0x642d144322fb873a),
absl::MakeUint128(0xc79dc12b69d91ed4, 0xa83459132ce046f8)));
EXPECT_THAT(MultiplyU128ToU256(
absl::MakeUint128(0xb5c04120848cdb47, 0x8aa62a827bf52635),
absl::MakeUint128(0x8d07a359be2f1380, 0x467bb90d59da0dea)),
Eq256(absl::MakeUint128(0x64205019d139a9ce, 0x99425c5fb6e7a977),
absl::MakeUint128(0xd3e99628a9e5fca7, 0x9c7824cb7279d72)));
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/wide_multiply.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/internal/wide_multiply_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
07a76b80-b96d-4919-abec-99658cb925d4 | cpp | tensorflow/tensorflow | onednn_matmul | third_party/xla/xla/service/cpu/onednn_matmul.cc | third_party/xla/xla/service/cpu/tests/onednn_matmul_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_matmul.h"
#include <algorithm>
#include <cmath>
#include <cstring>
#include <initializer_list>
#include <iterator>
#include <utility>
#include <vector>
#include "absl/base/dynamic_annotations.h"
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "tsl/platform/logging.h"
#define EIGEN_USE_THREADS
namespace xla {
namespace cpu {
namespace {
using dnnl::engine;
using dnnl::matmul;
using dnnl::memory;
using dnnl::stream;
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const dnnl::memory::desc& input_md,
const dnnl::memory::desc& weights_md, const dnnl::memory::desc& bias_md,
const dnnl::memory::desc& output_md) {
auto weights_any_md =
memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
dnnl::memory::format_tag::any);
auto matmul_pd = matmul::primitive_desc(engine, input_md, weights_any_md,
bias_md, output_md);
return matmul_pd.weights_desc();
}
dnnl::memory::desc OneDnnMatMulOptWeightsDesc(
const dnnl::engine& engine, const Shape& input_shape,
const Shape& weights_shape, const Shape& bias_shape,
const Shape& output_shape, const OneDnnMatMulConfig* matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config->transpose_b(), weights_md);
auto bias_md = absl::c_count(matmul_config->fusions().ops(),
OneDnnFusionConfig::BIAS) > 0
? ShapeToMemDesc(bias_shape)
: dnnl::memory::desc{};
auto output_md = ShapeToMemDesc(output_shape);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (!bias_md.is_zero() && missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
return OneDnnMatMulOptWeightsDesc(engine, input_md, weights_md, bias_md,
output_md);
}
}
Shape OneDnnMatMulOptWeightsShape(const Shape& input_shape,
const Shape& weights_shape,
const Shape& bias_shape,
const Shape& output_shape,
const OneDnnMatMulConfig* matmul_config) {
engine cpu_engine(engine::kind::cpu, 0);
auto optimized_weights_md =
OneDnnMatMulOptWeightsDesc(cpu_engine, input_shape, weights_shape,
bias_shape, output_shape, matmul_config);
return MemDescToXlaShapeFlattened(optimized_weights_md);
}
struct FusedOperandsRef {
const std::vector<void*>& bufs;
std::vector<std::pair<int, dnnl::memory>>& postop_args;
};
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const engine& cpu_engine, const memory::desc& input_md,
const memory::desc& plain_weights_md, const memory::desc& output_md,
const std::vector<memory::desc>& fused_mds,
const OneDnnMatMulConfig& matmul_config,
FusedOperandsRef* fused_operands_ref = nullptr) {
auto bias_md = memory::desc();
bool weights_packed = matmul_config.optimization_config().weights_prepacked();
auto weights_md = plain_weights_md;
if (weights_packed) {
weights_md = memory::desc(weights_md.get_dims(), weights_md.get_data_type(),
memory::format_tag::any);
}
dnnl::post_ops post_ops;
int fused_operand_idx = 0;
for (auto& fused_op : matmul_config.fusions().ops()) {
switch (fused_op) {
case OneDnnFusionConfig::RELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_relu, 0.f, 0.f);
break;
case OneDnnFusionConfig::TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_TANH:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_tanh, 0.f, 0.f);
break;
case OneDnnFusionConfig::GELU_ERF:
post_ops.append_eltwise(dnnl::algorithm::eltwise_gelu_erf, 0.f, 0.f);
break;
case OneDnnFusionConfig::RELU6:
post_ops.append_eltwise(dnnl::algorithm::eltwise_clip_v2, 0.f, 6.0f);
break;
case OneDnnFusionConfig::SIGMOID:
post_ops.append_eltwise(dnnl::algorithm::eltwise_logistic, 0.f, 0.f);
break;
case OneDnnFusionConfig::BIAS: {
bias_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
if (fused_operands_ref) {
fused_operands_ref->postop_args.emplace_back(
DNNL_ARG_BIAS,
dnnl::memory(bias_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
fused_operand_idx++;
} break;
case OneDnnFusionConfig::ELU:
post_ops.append_eltwise(dnnl::algorithm::eltwise_elu, 1.0f, 0.0f);
break;
case OneDnnFusionConfig::BINARY_ADD: {
auto binary_md = fused_mds.at(fused_operand_idx);
auto missed_rank = output_md.get_ndims() - binary_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto binary_dims = binary_md.get_dims();
binary_dims.insert(binary_dims.begin(), missed_rank, 1);
binary_md = binary_md.reshape(binary_dims);
}
if (fused_operands_ref) {
auto arg_idx =
DNNL_ARG_ATTR_MULTIPLE_POST_OP(post_ops.len()) | DNNL_ARG_SRC_1;
fused_operands_ref->postop_args.emplace_back(
arg_idx,
dnnl::memory(binary_md, cpu_engine,
fused_operands_ref->bufs[fused_operand_idx]));
}
post_ops.append_binary(dnnl::algorithm::binary_add, binary_md);
fused_operand_idx++;
} break;
case OneDnnFusionConfig::LINEAR: {
float const_float;
*(reinterpret_cast<int32_t*>(&const_float)) =
matmul_config.fusions().alpha_typecast();
post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, const_float,
0.f);
} break;
default:
LOG(FATAL) << __FILE__ << ":" << __LINE__
<< " Attempt to call OneDNN MatMul runtime library with "
"unsupported post op."
<< std::endl;
}
}
dnnl::primitive_attr attrs;
if (matmul_config.optimization_config().user_scratchpad()) {
attrs.set_scratchpad_mode(dnnl::scratchpad_mode::user);
}
if (post_ops.len() > 0) {
attrs.set_post_ops(post_ops);
}
return std::make_unique<matmul::primitive_desc>(
cpu_engine, input_md, weights_md, bias_md, output_md, attrs);
}
std::unique_ptr<matmul::primitive_desc> CreateMatMulPrimDesc(
const Shape& input_shape, const Shape& weights_shape,
const Shape& output_shape, const std::vector<Shape>& fused_shapes,
const OneDnnMatMulConfig& matmul_config) {
auto input_md = ShapeToMemDesc(input_shape);
auto weights_md = ShapeToMemDesc(weights_shape);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weights_md);
auto output_md = ShapeToMemDesc(output_shape);
std::vector<memory::desc> fused_mds;
std::transform(fused_shapes.begin(), fused_shapes.end(),
std::back_inserter(fused_mds),
[](const Shape& shape) { return ShapeToMemDesc(shape); });
return CreateMatMulPrimDesc(engine(engine::kind::cpu, 0), input_md,
weights_md, output_md, fused_mds, matmul_config);
}
template <>
typename PrimitiveTrait<kOnednnMatmulConfig>::pointer_type
GetKernelConfig<kOnednnMatmulConfig>(
absl::StatusOr<BackendConfig>* backend_config) {
return (*backend_config)->mutable_onednn_matmul_config();
}
template <>
std::unique_ptr<dnnl::matmul::primitive_desc>
CreateOneDnnPrimDesc<dnnl::matmul::primitive_desc>(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall) {
return nullptr;
}
auto custom_call = Cast<xla::HloCustomCallInstruction>(instr);
auto backend_config = custom_call->backend_config<BackendConfig>();
if (!backend_config.ok()) {
return nullptr;
}
auto& matmul_config = backend_config.value().onednn_matmul_config();
auto operands = custom_call->operands();
auto input = operands[0];
auto weight = operands[1];
auto input_shape = input->shape();
auto weight_shape = weight->shape();
auto output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
auto fused_operands =
HloInstruction::InstructionVector(operands.begin() + 2, operands.end());
std::vector<Shape> fused_shapes;
std::transform(fused_operands.begin(), fused_operands.end(),
std::back_inserter(fused_shapes),
[](const HloInstruction* instr) { return instr->shape(); });
return CreateMatMulPrimDesc(input_shape, weight_shape, output_shape,
fused_shapes, matmul_config);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMul(
void* result, void* scratch, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weights_minfo(args[arg_indx++]);
MemrefInfo output_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weights_md = weights_minfo.GetOneDnnMemDesc();
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_a() && input_md.get_ndims() > 1, input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(
matmul_config.transpose_b() && weights_md.get_ndims() > 1, weights_md);
auto output_md = output_minfo.GetOneDnnMemDesc();
if (matmul_config.optimization_config().weights_prepacked()) {
weights_md =
memory::desc({input_md.get_dims().back(), output_md.get_dims().back()},
weights_md.get_data_type(), memory::format_tag::ab);
}
const int64_t num_fused_operands = num_args - arg_indx;
std::vector<memory::desc> fused_mds;
std::vector<void*> fused_bufs;
for (int64_t i = 0; i < num_fused_operands; ++i) {
MemrefInfo operand_minfo(args[arg_indx++]);
fused_mds.push_back(operand_minfo.GetOneDnnMemDesc());
fused_bufs.push_back(operand_minfo.Data());
}
std::vector<std::pair<int, dnnl::memory>> postop_args;
FusedOperandsRef fused_operands_ref{fused_bufs, postop_args};
auto matmul_pd =
CreateMatMulPrimDesc(cpu_engine, input_md, weights_md, output_md,
fused_mds, matmul_config, &fused_operands_ref);
XLA_LIGHTWEIGHT_CHECK(num_args == arg_indx);
auto lhs_mem = memory(input_md, cpu_engine, input_minfo.Data());
auto rhs_mem =
memory(matmul_pd->weights_desc(), cpu_engine, weights_minfo.Data());
auto result_mem = memory(output_md, cpu_engine, output_minfo.Data());
if (std::strstr(matmul_pd->impl_info_str(), "ref") != nullptr) {
LOG(WARNING) << "[Perf]: MatMul reference implementation being executed";
}
auto matmul_prim = matmul(*matmul_pd);
std::unordered_map<int, memory> matmul_args{{DNNL_ARG_SRC, lhs_mem},
{DNNL_ARG_WEIGHTS, rhs_mem},
{DNNL_ARG_DST, result_mem}};
if (matmul_config.optimization_config().user_scratchpad()) {
XLA_LIGHTWEIGHT_CHECK(scratch != nullptr);
MemrefInfo scratch_minfo(scratch);
auto scratchpad_md = matmul_pd->scratchpad_desc();
auto scratch_mem = memory(scratchpad_md, cpu_engine, scratch_minfo.Data());
matmul_args.insert({DNNL_ARG_SCRATCHPAD, scratch_mem});
}
matmul_args.insert(postop_args.begin(), postop_args.end());
matmul_prim.execute(onednn_stream, matmul_args);
}
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnMatMulReorder(
void* result, void** args) {
int arg_indx = 0;
const int64_t num_args = *(static_cast<int64_t*>(args[arg_indx++]));
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(args[arg_indx++]);
auto thread_pool = CreateOneDnnThreadPool(
run_options ? run_options->intra_op_thread_pool() : nullptr);
engine cpu_engine(engine::kind::cpu, 0);
auto onednn_stream = MakeOneDnnStream(cpu_engine, thread_pool.get());
std::string config_str(static_cast<const char*>(args[arg_indx++]));
OneDnnMatMulConfig matmul_config;
matmul_config.ParseFromString(config_str);
MemrefInfo input_minfo(args[arg_indx++]);
MemrefInfo weight_minfo(args[arg_indx++]);
MemrefInfo output_minfo(args[arg_indx++]);
MemrefInfo result_minfo(result);
auto input_md = input_minfo.GetOneDnnMemDesc();
auto weight_md = weight_minfo.GetOneDnnMemDesc();
auto output_md = output_minfo.GetOneDnnMemDesc();
auto bias_md = dnnl::memory::desc{};
if (absl::c_count(matmul_config.fusions().ops(), OneDnnFusionConfig::BIAS) >
0) {
MemrefInfo bias_minfo(args[arg_indx++]);
bias_md = bias_minfo.GetOneDnnMemDesc();
}
XLA_LIGHTWEIGHT_CHECK(num_args >= arg_indx);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_a(), input_md);
TRANSPOSE_LAST_TWO_DIMS_IF(matmul_config.transpose_b(), weight_md);
if (!bias_md.is_zero()) {
auto missed_rank = output_md.get_ndims() - bias_md.get_ndims();
XLA_LIGHTWEIGHT_CHECK(missed_rank >= 0);
if (missed_rank > 0) {
auto bias_dims = bias_md.get_dims();
bias_dims.insert(bias_dims.begin(), missed_rank, 1);
bias_md = bias_md.reshape(bias_dims);
}
}
auto result_md = OneDnnMatMulOptWeightsDesc(cpu_engine, input_md, weight_md,
bias_md, output_md);
XLA_LIGHTWEIGHT_CHECK(result_minfo.GetOneDnnMemDesc().get_size() ==
result_md.get_size());
auto weight_mem = dnnl::memory{weight_md, cpu_engine, weight_minfo.Data()};
auto result_mem = dnnl::memory{result_md, cpu_engine, result_minfo.Data()};
dnnl::reorder rdr{weight_mem, result_mem};
rdr.execute(onednn_stream, weight_mem, result_mem);
onednn_stream.wait();
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/cpu/onednn_contraction_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/cpu_info.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace cpu {
class MatmulTest : public HloTestBase {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* fused_matmul_bias_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_binary_add_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* matmul_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_tanh_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_gelu_erf_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_elu_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","ELU"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_tanh_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_relu6_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","RELU6"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)";
const char* fused_matmul_bias_sigmoid_rewrite_str_ = R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["BIAS","SIGMOID"]
; CHECK-DAG: }
; CHECK: }
)";
};
TEST_F(MatmulTest, SimpleTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f32[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = bf16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = bf16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg.0 = f16[32,8,128,64] parameter(0), parameter_replication={false}
arg.1 = f16[32,8,64,128] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f16[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeB) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,128,64]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,128,64]{3,1,2,0} parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[32,8,128,128] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
reshape.2 = f32[32,32,40,30] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[32,32,30,40] broadcast(constant.3), dimensions={}
dot.7 = f32[32,32,40,40] dot(reshape.2, broadcast.4), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
constant.5 = f32[] constant(15)
broadcast.6 = f32[40] broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,32,40,40] broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broadcast.9)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAddFusion2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[400,300] parameter(0), parameter_replication={false}
reshape.2 = f32[400,300] reshape(arg0.1)
constant.3 = f32[] constant(1)
broadcast.4 = f32[300,400] broadcast(constant.3), dimensions={}
dot.7 = f32[400,400] dot(reshape.2, broadcast.4), lhs_batch_dims={}, lhs_contracting_dims={1}, rhs_batch_dims={}, rhs_contracting_dims={0}
reshape.1 = f32[400,1,400] reshape(dot.7)
constant.5 = f32[] constant(15)
broadcast.6 = f32[400] broadcast(constant.5), dimensions={}
broadcast.9 = f32[400,1,400] broadcast(broadcast.6), dimensions={2}
add.10 = f32[400,1,400] add(reshape.1, broadcast.9)
tuple.12 = (f32[400,1,400]) tuple(add.10)
ROOT get-tuple-element.13 = f32[400,1,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter1) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[32,32,40,40] parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
add.10 = f32[32,32,40,40] add(dot.7, arg0.3)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
arg0.3 = f32[40]{0} parameter(2), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[32,32,40,40] broadcast(arg0.3), dimensions={3}
add.10 = f32[32,32,40,40] add(dot.7, broad.1)
reshape.11 = f32[32,32,40,40] reshape(add.10)
tuple.12 = (f32[32,32,40,40]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[2,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[2,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[2,400] parameter(2), parameter_replication={false}
dot.7 = f32[2,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[2,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[2,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[2,2,400,400] reshape(add.10)
tuple.12 = (f32[2,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[2,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter2D1B) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[1,2,400,30] parameter(0), parameter_replication={false}
arg0.2 = f32[1,2,30,400] parameter(1), parameter_replication={false}
arg0.3 = f32[1,400] parameter(2), parameter_replication={false}
dot.7 = f32[1,2,400,400] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
broad.1 = f32[1,2,400,400] broadcast(arg0.3), dimensions={0,3}
add.10 = f32[1,2,400,400] add(dot.7, broad.1)
reshape.11 = f32[1,2,400,400] reshape(add.10)
tuple.12 = (f32[1,2,400,400]) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[1,2,400,400] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32WithBiasAsParameter3) {
const char* matmul_module_str = R"(
HloModule matmul.biasadd.test.f32
ENTRY matmul.biasadd.test.f32 {
arg0.1 = f32[16,128,768] parameter(0), sharding={replicated}
arg0.2 = f32[768,768] parameter(1), sharding={replicated}
dot.84 = f32[16,128,768] dot(arg0.1, arg0.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
arg0.3 = f32[768]{0} parameter(2), sharding={replicated}
reshape.85 = f32[1,1,768] reshape(arg0.3)
broadcast.86 = f32[1,1,768] broadcast(reshape.85), dimensions={0,1,2}
reshape.87 = f32[768]{0} reshape(broadcast.86)
broadcast.88 = f32[16,128,768] broadcast(reshape.87), dimensions={2}
ROOT add.89 = f32[16,128,768] add(dot.84, broadcast.88)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleTestF32TransposeBWithBiasAddFusion) {
const char* matmul_module_str = R"(
HloModule matmul.test.1
ENTRY matmul.test.1 {
arg.0 = f32[32,8,4,16]{3,1,2,0} parameter(0), parameter_replication={false}
arg.1 = f32[32,8,16,16]{3,1,2,0} parameter(1), parameter_replication={false}
dot.7 = f32[32,8,4,16]{3,2,1,0} dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}
constant.5 = f32[] constant(15)
broadcast.6 = f32[16]{0} broadcast(constant.5), dimensions={}
broadcast.9 = f32[32,8,4,16]{3,2,1,0} broadcast(broadcast.6), dimensions={3}
add.10 = f32[32,8,4,16]{3,2,1,0} add(dot.7, broadcast.9)
reshape.11 = f32[32,8,4,16]{3,2,1,0} reshape(add.10)
tuple.12 = (f32[32,8,4,16]{3,2,1,0}) tuple(reshape.11)
ROOT get-tuple-element.13 = f32[32,8,4,16]{3,2,1,0} get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_binary_add_);
}
TEST_F(MatmulTest, F32BiasAddFusionNonCompatibleBias) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.1 {
arg.0 = f32[12288,2] parameter(0), parameter_replication={false}
arg.1 = f32[2,1024] parameter(1), parameter_replication={false}
dot.0 = f32[12288,1024] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.0 = f32[32,384,1024] reshape(dot.0)
constant.0 = f32[1,384,1024] constant(15)
reshape.1 = f32[384,1024] reshape(constant.0)
broadcast.0 = f32[32,384,1024] broadcast(reshape.1), dimensions={1,2}
add.0 = f32[32,384,1024] add(reshape.0, broadcast.0)
tuple.0 = (f32[32,384,1024]) tuple(add.0)
ROOT get-tuple-element.0 = f32[32,384,1024] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, ApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, onednn.matmul.0)
mul.1 = f32[32,32,4,32] multiply(onednn.matmul.0, mul.0)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.2 = f32[32,32,4,32] multiply(mul.1, bcast.0)
add.0 = f32[32,32,4,32] add(onednn.matmul.0, mul.2)
const.1 = f32[] constant(0.797884583)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
mul.3 = f32[32,32,4,32] multiply(add.0, bcast.1)
tanh = f32[32,32,4,32] tanh(mul.3)
const.2 = f32[] constant(1)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
add.2 = f32[32,32,4,32] add(tanh, bcast.2)
const.3 = f32[] constant(0.5)
bcast.3 = f32[32,32,4,32] broadcast(const.3), dimensions={}
mul.4 = f32[32,32,4,32] multiply(add.2, bcast.3)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.4)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_TANH"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndApproxGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
Arg_5.6 = f32[32,32,64] parameter(0), sharding={replicated}
Arg_7.8 = f32[64,256] parameter(1), sharding={replicated}
dot.232 = f32[32,32,256] dot(Arg_5.6, Arg_7.8), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_6.7 = f32[256] parameter(2), sharding={replicated}
reshape.233 = f32[1,1,256] reshape(Arg_6.7)
broadcast.234 = f32[1,1,256] broadcast(reshape.233), dimensions={0,1,2}
reshape.235 = f32[256] reshape(broadcast.234)
broadcast.236 = f32[32,32,256] broadcast(reshape.235), dimensions={2}
add.237 = f32[32,32,256] add(dot.232, broadcast.236)
multiply.238 = f32[32,32,256] multiply(add.237, add.237)
multiply.239 = f32[32,32,256] multiply(add.237, multiply.238)
constant.20 = f32[] constant(0.044715)
broadcast.21 = f32[32,32,256] broadcast(constant.20), dimensions={}
multiply.240 = f32[32,32,256] multiply(multiply.239, broadcast.21)
add.241 = f32[32,32,256] add(add.237, multiply.240)
constant.18 = f32[] constant(0.797884583)
broadcast.19 = f32[32,32,256] broadcast(constant.18), dimensions={}
multiply.242 = f32[32,32,256] multiply(add.241, broadcast.19)
tanh.243 = f32[32,32,256] tanh(multiply.242)
constant.16 = f32[] constant(1)
broadcast.17 = f32[32,32,256] broadcast(constant.16), dimensions={}
add.244 = f32[32,32,256] add(tanh.243, broadcast.17)
constant.14 = f32[] constant(0.5)
broadcast.15 = f32[32,32,256] broadcast(constant.14), dimensions={}
multiply.245 = f32[32,32,256] multiply(add.244, broadcast.15)
ROOT out = f32[32,32,256] multiply(add.237, multiply.245)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
dot.7 = f32[1024,256] dot(arg0.1, arg1.2), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast.9 = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.10 = f32[1024,256] add(dot.7, broadcast.9)
constant.12 = f32[] constant(0.044715)
broadcast.13 = f32[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f32[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f32[1024,256] multiply(add.10, add.10)
multiply.15 = f32[1024,256] multiply(multiply.14, multiply.11)
add.16 = f32[1024,256] add(add.10, multiply.15)
constant.17 = f32[] constant(0.797884583)
broadcast.18 = f32[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f32[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f32[1024,256] tanh(multiply.19)
constant.21 = f32[] constant(1)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
add.23 = f32[1024,256] add(tanh.20, broadcast.22)
constant.24 = f32[] constant(0.5)
broadcast.25 = f32[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f32[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f32[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[256,512] parameter(1), parameter_replication={false}
convert.9 = bf16[256,512] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.13 = f32[1024,256] add(convert, broadcast)
constant.16 = f32[] constant(0.044715)
broadcast.17 = f32[1024,256] broadcast(constant.16), dimensions={}
multiply.18 = f32[1024,256] multiply(broadcast.17, add.13)
multiply.15 = f32[1024,256] multiply(add.13, add.13)
multiply.19 = f32[1024,256] multiply(multiply.18, multiply.15)
add.20 = f32[1024,256] add(add.13, multiply.19)
constant.21 = f32[] constant(0.797884583)
broadcast.22 = f32[1024,256] broadcast(constant.21), dimensions={}
multiply.23 = f32[1024,256] multiply(add.20, broadcast.22)
tanh.24 = f32[1024,256] tanh(multiply.23)
constant.25 = f32[] constant(1)
broadcast.26 = f32[1024,256] broadcast(constant.25), dimensions={}
add.27 = f32[1024,256] add(tanh.24, broadcast.26)
constant.1 = f32[] constant(0.5)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
multiply.30 = f32[1024,256] multiply(add.13, broadcast.2)
ROOT multiply.32 = f32[1024,256] multiply(add.27, multiply.30)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, BiasAndApproxTFGELUTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f16[1024,512] parameter(0), parameter_replication={false}
reshape.4 = f16[1024,512] reshape(arg0.1)
arg1.2 = f16[256,512] parameter(1), parameter_replication={false}
reshape.5 = f16[256,512] reshape(arg1.2)
dot.7 = f16[1024,256] dot(reshape.4, reshape.5), lhs_contracting_dims={1}, rhs_contracting_dims={1}, frontend_attributes={grad_x="false",grad_y="false"}
transpose.8 = f16[1024,256] transpose(dot.7), dimensions={0,1}
arg2.3 = f16[256] parameter(2), parameter_replication={false}
reshape.6 = f16[256] reshape(arg2.3)
broadcast.9 = f16[1024,256] broadcast(reshape.6), dimensions={1}
add.10 = f16[1024,256] add(transpose.8, broadcast.9)
constant.12 = f16[] constant(0.044708)
broadcast.13 = f16[1024,256] broadcast(constant.12), dimensions={}
multiply.14 = f16[1024,256] multiply(broadcast.13, add.10)
multiply.11 = f16[1024,256] multiply(add.10, add.10)
multiply.15 = f16[1024,256] multiply(multiply.14, multiply.11)
add.16 = f16[1024,256] add(add.10, multiply.15)
constant.17 = f16[] constant(0.79785)
broadcast.18 = f16[1024,256] broadcast(constant.17), dimensions={}
multiply.19 = f16[1024,256] multiply(add.16, broadcast.18)
tanh.20 = f16[1024,256] tanh(multiply.19)
constant.21 = f16[] constant(1)
broadcast.22 = f16[1024,256] broadcast(constant.21), dimensions={}
add.23 = f16[1024,256] add(tanh.20, broadcast.22)
constant.24 = f16[] constant(0.5)
broadcast.25 = f16[1024,256] broadcast(constant.24), dimensions={}
multiply.26 = f16[1024,256] multiply(add.23, broadcast.25)
ROOT multiply.27 = f16[1024,256] multiply(add.10, multiply.26)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_tanh_);
}
TEST_F(MatmulTest, ExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[] constant(0.707106769)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,4,32] multiply(onednn.matmul.0, bcast.0)
erf.0 = f32[32,32,4,32] erf(mul.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
add.0 = f32[32,32,4,32] add(erf.0, bcast.1)
const.2 = f32[] constant(0.5)
bcast.2 = f32[32,32,4,32] broadcast(const.2), dimensions={}
mul.1 = f32[32,32,4,32] multiply(add.0, bcast.2)
ROOT out = f32[32,32,4,32] multiply(onednn.matmul.0, mul.1)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["GELU_ERF"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAndExactGELUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
dot.378 = f32[6304,3072] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
reshape.11 = f32[32,197,3072]reshape(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.11, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
multiply.393 = f32[32,197,3072] multiply(add.390, broadcast.392)
multiply.394 = f32[32,197,3072] multiply(multiply.393, add.383)
ROOT out = f32[6304,3072] reshape(multiply.394)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactGELUTestBF16) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[6304,3072] broadcast(constant.381), dimensions={1}
add.383 = f32[6304,3072] add(convert.2, broadcast.382)
constant.384 = f32[] constant(0.707106769)
broadcast.385 = f32[6304,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[6304,3072] multiply(broadcast.385, add.383)
erf.387 = f32[6304,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[6304,3072] broadcast(constant.388), dimensions={}
add.390 = f32[6304,3072] add(erf.387, broadcast.389)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[6304,3072] broadcast(constant.391)
multiply.393 = f32[6304,3072] multiply(add.390, broadcast.392)
ROOT out = f32[6304,3072] multiply(multiply.393, add.383)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactJaxGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[6304,768] parameter(0), parameter_replication={false}
convert.0 = bf16[6304,768] convert(arg.0)
arg.1 = f32[768,3072] parameter(1), parameter_replication={false}
convert.1 = bf16[768,3072] convert(arg.1)
dot.378 = bf16[6304,3072] dot(convert.0, convert.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
convert.2 = f32[6304,3072] convert(dot.378)
reshape.0 = f32[32,197,3072] reshape(convert.2)
constant.381 = f32[3072] constant(0.3)
broadcast.382 = f32[32,197,3072] broadcast(constant.381), dimensions={2}
add.383 = f32[32,197,3072] add(reshape.0, broadcast.382)
constant.384 = f32[] constant(0.707182348)
broadcast.385 = f32[32,197,3072] broadcast(constant.384), dimensions={}
multiply.386 = f32[32,197,3072] multiply(broadcast.385, add.383)
erf.387 = f32[32,197,3072] erf(multiply.386)
constant.388 = f32[] constant(1)
broadcast.389 = f32[32,197,3072] broadcast(constant.388), dimensions={}
add.390 = f32[32,197,3072] add(erf.387, broadcast.389)
multiply.393 = f32[32,197,3072] multiply(add.390, add.383)
constant.391 = f32[] constant(0.5)
broadcast.392 = f32[32,197,3072] broadcast(constant.391)
ROOT multiply.394 = f32[32,197,3072] multiply(multiply.393, broadcast.392)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactTFGELUTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg0.1 = f32[1024,512] parameter(0), parameter_replication={false}
convert.8 = bf16[1024,512] convert(arg0.1)
arg1.2 = f32[512,256] parameter(1), parameter_replication={false}
convert.9 = bf16[512,256] convert(arg1.2)
dot.10 = bf16[1024,256] dot(convert.8, convert.9), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"}
convert = f32[1024,256] convert(dot.10)
arg2.3 = f32[256] parameter(2), parameter_replication={false}
broadcast = f32[1024,256] broadcast(arg2.3), dimensions={1}
add.13 = f32[1024,256] add(convert, broadcast)
constant.1 = f32[] constant(0.70703125)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
multiply.16 = f32[1024,256] multiply(add.13, broadcast.2)
erf.17 = f32[1024,256] erf(multiply.16)
constant.3 = f32[] constant(1)
broadcast.4 = f32[1024,256] broadcast(constant.3), dimensions={}
add.20 = f32[1024,256] add(erf.17, broadcast.4)
constant.5 = f32[] constant(0.5)
broadcast.6 = f32[1024,256] broadcast(constant.5), dimensions={}
multiply.23 = f32[1024,256] multiply(add.20, broadcast.6)
ROOT multiply.24 = f32[1024,256] multiply(add.13, multiply.23)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, BiasAndExactGELUTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg.0 = f16[6304,768] parameter(0), parameter_replication={false}
arg.1 = f16[768,3072] parameter(1), parameter_replication={false}
dot.378 = f16[6304,3072] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
constant.381 = f16[3072] constant(0.3)
broadcast.382 = f16[6304,3072] broadcast(constant.381), dimensions={1}
add.383 = f16[6304,3072] add(dot.378, broadcast.382)
constant.384 = f16[] constant(0.707106769)
broadcast.385 = f16[6304,3072] broadcast(constant.384), dimensions={}
multiply.386 = f16[6304,3072] multiply(broadcast.385, add.383)
erf.387 = f16[6304,3072] erf(multiply.386)
constant.388 = f16[] constant(1)
broadcast.389 = f16[6304,3072] broadcast(constant.388), dimensions={}
add.390 = f16[6304,3072] add(erf.387, broadcast.389)
constant.391 = f16[] constant(0.5)
broadcast.392 = f16[6304,3072] broadcast(constant.391)
multiply.393 = f16[6304,3072] multiply(add.390, broadcast.392)
ROOT out = f16[6304,3072] multiply(multiply.393, add.383)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_gelu_erf_);
}
TEST_F(MatmulTest, TestNonScalarConstantEltwiseLinearF32) {
const char* matmul_module_str = R"(
HloModule matmul.nonscalar.test.1
ENTRY matmul.nonscalar.test.f32 {
arg.0 = f32[16,400,500] parameter(0)
arg.1 = f32[16,500,3] parameter(1)
onednn.matmul.0 = f32[16,400,3] dot(arg.0, arg.1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
constant.0 = f32[3]{0} constant({0.625, 0.875, 0.375})
broadcast.0 = f32[16,400,3] broadcast(constant.0), dimensions={2}
ROOT mult.0 = f32[16,400,3] multiply(onednn.matmul.0, broadcast.0)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-4, 1e-4)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-NOT: "fusions":{
; CHECK-NOT: "ops":["LINEAR"]
; CHECK-NOT: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, ReLUTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
relu.1 {
Arg_0.3 = f32[32,32,4,32] parameter(0)
constant.4 = f32[] constant(0)
broadcast.5 = f32[32,32,4,32] broadcast(constant.4), dimensions={}
ROOT maximum.6 = f32[32,32,4,32] maximum(Arg_0.3, broadcast.5)
}
ENTRY matmul.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
ROOT call.7 = f32[32,32,4,32] call(onednn.matmul.0), to_apply=relu.1
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["RELU"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, SimpleBiasTestBF16_PARAM_F32) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.bf16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = bf16[16,128,768] convert(Arg_2.3)
Arg_1.2 = f32[768,3072] parameter(1), sharding={replicated}
convert.5 = bf16[768,3072] convert(Arg_1.2)
dot.7 = bf16[16,128,3072] dot(convert.4, convert.5), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = f32[3072] parameter(0), sharding={replicated}
convert.6 = bf16[3072] convert(Arg_0.1)
reshape.8 = bf16[1,1,3072] reshape(convert.6)
broadcast.9 = bf16[1,1,3072] broadcast(reshape.8), dimensions={0,1,2}
reshape.10 = bf16[3072] reshape(broadcast.9)
broadcast.11 = bf16[16,128,3072] broadcast(reshape.10), dimensions={2}
ROOT add.12 = bf16[16,128,3072] add(dot.7, broadcast.11)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleBiasTestBF16_PARAM_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.bf16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = bf16[16,128,768] convert(Arg_2.3)
Arg_1.2 = bf16[768,3072] parameter(1), sharding={replicated}
dot.5 = bf16[16,128,3072] dot(convert.4, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = bf16[3072] parameter(0), sharding={replicated}
reshape.6 = bf16[1,1,3072] reshape(Arg_0.1)
broadcast.7 = bf16[1,1,3072] broadcast(reshape.6), dimensions={0,1,2}
reshape.8 = bf16[3072] reshape(broadcast.7)
broadcast.9 = bf16[16,128,3072] broadcast(reshape.8), dimensions={2}
ROOT add.10 = bf16[16,128,3072] add(dot.5, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, DivisionByConstantWithEltwiseLinearF32) {
const char* matmul_module_str = R"(
HloModule matmul.divide.test.1
ENTRY matmul.divide.test.f32 {
Arg_4.5 = f32[16,128,768] parameter(0), sharding={replicated}
Arg_2.3 = f32[768,12,64] parameter(1), sharding={replicated}
onednn.matmul.0 = f32[16,128,12,64] dot(Arg_4.5, Arg_2.3), lhs_contracting_dims={2}, rhs_contracting_dims={0}
constant.8 = f32[] constant(8)
broadcast.9 = f32[16,128,12,64] broadcast(constant.8), dimensions={}
ROOT divide.16 = f32[16,128,12,64] divide(onednn.matmul.0, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-4, 1e-4)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, SimpleBiasTestF16_PARAM_F32) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.f16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = f16[16,128,768] convert(Arg_2.3)
Arg_1.2 = f32[768,3072] parameter(1), sharding={replicated}
convert.5 = f16[768,3072] convert(Arg_1.2)
dot.7 = f16[16,128,3072] dot(convert.4, convert.5), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = f32[3072] parameter(0), sharding={replicated}
convert.6 = f16[3072] convert(Arg_0.1)
reshape.8 = f16[1,1,3072] reshape(convert.6)
broadcast.9 = f16[1,1,3072] broadcast(reshape.8), dimensions={0,1,2}
reshape.10 = f16[3072] reshape(broadcast.9)
broadcast.11 = f16[16,128,3072] broadcast(reshape.10), dimensions={2}
ROOT add.12 = f16[16,128,3072] add(dot.7, broadcast.11)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, SimpleBiasTestF16_PARAM_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule jit_apply
ENTRY matmul.test.f16 {
Arg_2.3 = f32[16,128,768] parameter(2), sharding={replicated}
convert.4 = f16[16,128,768] convert(Arg_2.3)
Arg_1.2 = f16[768,3072] parameter(1), sharding={replicated}
dot.5 = f16[16,128,3072] dot(convert.4, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0}
Arg_0.1 = f16[3072] parameter(0), sharding={replicated}
reshape.6 = f16[1,1,3072] reshape(Arg_0.1)
broadcast.7 = f16[1,1,3072] broadcast(reshape.6), dimensions={0,1,2}
reshape.8 = f16[3072] reshape(broadcast.7)
broadcast.9 = f16[16,128,3072] broadcast(reshape.8), dimensions={2}
ROOT add.10 = f16[16,128,3072] add(dot.5, broadcast.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_);
}
TEST_F(MatmulTest, TestF32NonConstantWeights) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[64,256,16] parameter(0), parameter_replication={false}
arg.1 = f32[16,32] parameter(1), parameter_replication={false}
ROOT onednn.matmul.0 = f32[64,256,32] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK-NOT: custom_call_target="__onednn$matmul_reorder",
; CHECK: custom-call(%{{[a-z,A-Z,0-9,\.]*}}, %arg.1), custom_call_target="__onednn$matmul",
)");
}
TEST_F(MatmulTest, TestF32ConstantWeights) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[64,256,16] parameter(0), parameter_replication={false}
constant = f32[] constant(1)
arg.1 = f32[16,32] broadcast(constant), dimensions={}
ROOT onednn.matmul.0 = f32[64,256,32] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK-NOT: custom_call_target="__onednn$matmul_reorder",
; CHECK: custom-call(%{{[a-z,A-Z,0-9,\.]*}}, %constant{{[a-z,A-Z,0-9,\.]*}}), custom_call_target="__onednn$matmul",
)");
}
TEST_F(MatmulTest, BiasAddELUFusion_F32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[1024,1024] parameter(0)
arg1.2 = f32[1024,1024] parameter(1)
dot.3 = f32[1024,1024] dot(arg1.2, arg0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.4 = f32[1024] parameter(2)
broadcast.5 = f32[1024,1024] broadcast(arg2.4), dimensions={1}
add.6 = f32[1024,1024] add(dot.3, broadcast.5)
constant.7 = f32[] constant(0)
broadcast.8 = f32[1024,1024] broadcast(constant.7), dimensions={}
compare.9 = pred[1024,1024] compare(add.6, broadcast.8), direction=GT
exponential-minus-one.10 = f32[1024,1024] exponential-minus-one(add.6)
ROOT select.11 = f32[1024,1024] select(compare.9, add.6, exponential-minus-one.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddELUFusion_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg0.1 = f32[1024,512] parameter(0)
convert.2 = bf16[1024,512] convert(arg0.1)
arg1.3 = f32[256,512] parameter(1)
convert.4 = bf16[256,512] convert(arg1.3)
dot.5 = bf16[1024,256] dot(convert.2, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={1}
convert.6 = f32[1024,256] convert(dot.5)
arg2.7 = f32[256] parameter(2)
broadcast.8 = f32[1024,256] broadcast(arg2.7), dimensions={1}
add.9 = f32[1024,256] add(convert.6, broadcast.8)
constant.10 = f32[] constant(0)
broadcast.11 = f32[1024,256] broadcast(constant.10), dimensions={}
compare.12 = pred[1024,256] compare(add.9, broadcast.11), direction=GT
convert.13 = bf16[1024,256] convert(add.9)
exponential-minus-one.14 = f32[1024,256] exponential-minus-one(add.9)
convert.15 = bf16[1024,256] convert(exponential-minus-one.14)
select.16 = bf16[1024,256] select(compare.12, convert.13, convert.15)
ROOT convert.17 = f32[1024,256] convert(select.16)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddELUFusion_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg0.1 = f16[1024,1024] parameter(0)
arg1.2 = f16[1024,1024] parameter(1)
dot.3 = f16[1024,1024] dot(arg1.2, arg0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.4 = f16[1024] parameter(2)
broadcast.5 = f16[1024,1024] broadcast(arg2.4), dimensions={1}
add.6 = f16[1024,1024] add(dot.3, broadcast.5)
constant.7 = f16[] constant(0)
broadcast.8 = f16[1024,1024] broadcast(constant.7), dimensions={}
compare.9 = pred[1024,1024] compare(add.6, broadcast.8), direction=GT
exponential-minus-one.10 = f16[1024,1024] exponential-minus-one(add.6)
ROOT select.11 = f16[1024,1024] select(compare.9, add.6, exponential-minus-one.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddELUFusion_F16_2) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.f16
ENTRY matmul.test.f16 {
arg0.1 = f32[1024,1024] parameter(0)
convert.2 = f16[1024,1024] convert(arg0.1)
arg1.3 = f32[1024,1024] parameter(2)
convert.4 = f16[1024,1024] convert(arg1.3)
dot.5 = f16[1024,1024] dot(convert.2, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.6 = f32[1024] parameter(1)
convert.7 = f16[1024] convert(arg2.6)
broadcast.8 = f16[1024,1024] broadcast(convert.7), dimensions={1}
add.9 = f16[1024,1024] add(dot.5, broadcast.8)
constant.10 = f16[] constant(0)
broadcast.11 = f16[1024,1024] broadcast(constant.10), dimensions={}
compare.12 = pred[1024,1024] compare(add.9, broadcast.11), direction=GT
exponential-minus-one.13 = f16[1024,1024] exponential-minus-one(add.9)
select.14 = f16[1024,1024] select(compare.12, add.9, exponential-minus-one.13)
dot.15 = f16[1024,1024] dot(select.14, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT convert.16 = f32[1024,1024] convert(dot.15)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_elu_rewrite_str_);
}
TEST_F(MatmulTest, SIGMOIDTestF32) {
const char* matmul_module_str = R"(
HloModule matmul.bias.sigmoid.test.f32
ENTRY matmul.bias.sigmoid.test.f32 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
onednn.matmul.0 = f32[32,32,4,32] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[32]{0} constant(5)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={3}
add.0 = f32[32,32,4,32] add(onednn.matmul.0, bcast.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
negate.0 = f32[32,32,4,32] negate(add.0)
exponential.0 = f32[32,32,4,32] exponential(negate.0)
add.1 = f32[32,32,4,32] add(bcast.1, exponential.0)
divide.0 = f32[32,32,4,32] divide(bcast.1, add.1)
tuple.0 =(f32[32,32,4,32]) tuple(divide.0)
ROOT get-tuple-element.0 = f32[32,32,4,32] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_sigmoid_rewrite_str_);
}
TEST_F(MatmulTest, SIGMOIDTestBF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.sigmoid.test.bf16
ENTRY matmul.bias.sigmoid.test.bf16 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
convert.0 = bf16[32,32,4,16] convert(arg.0)
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
convert.1 = bf16[32,32,16,32] convert(arg.1)
onednn.matmul.0 = bf16[32,32,4,32] dot(convert.0, convert.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
convert.2 = f32[32,32,4,32] convert(onednn.matmul.0)
const.0 = f32[32]{0} constant(5)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={3}
add.0 = f32[32,32,4,32] add(convert.2, bcast.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
negate.0 = f32[32,32,4,32] negate(add.0)
exponential.0 = f32[32,32,4,32] exponential(negate.0)
add.1 = f32[32,32,4,32] add(bcast.1, exponential.0)
divide.0 = f32[32,32,4,32] divide(bcast.1, add.1)
tuple.0 =(f32[32,32,4,32]) tuple(divide.0)
ROOT get-tuple-element.0 = f32[32,32,4,32] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_sigmoid_rewrite_str_);
}
TEST_F(MatmulTest, SIGMOIDTestF16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.sigmoid.test.f16
ENTRY matmul.bias.sigmoid.test.f16 {
arg.0 = f32[32,32,4,16] parameter(0), parameter_replication={false}
convert.0 = f16[32,32,4,16] convert(arg.0)
arg.1 = f32[32,32,16,32] parameter(1), parameter_replication={false}
convert.1 = f16[32,32,16,32] convert(arg.1)
onednn.matmul.0 = f16[32,32,4,32] dot(convert.0, convert.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
convert.2 = f32[32,32,4,32] convert(onednn.matmul.0)
const.0 = f32[32]{0} constant(5)
bcast.0 = f32[32,32,4,32] broadcast(const.0), dimensions={3}
add.0 = f32[32,32,4,32] add(convert.2, bcast.0)
const.1 = f32[] constant(1)
bcast.1 = f32[32,32,4,32] broadcast(const.1), dimensions={}
negate.0 = f32[32,32,4,32] negate(add.0)
exponential.0 = f32[32,32,4,32] exponential(negate.0)
add.1 = f32[32,32,4,32] add(bcast.1, exponential.0)
divide.0 = f32[32,32,4,32] divide(bcast.1, add.1)
tuple.0 =(f32[32,32,4,32]) tuple(divide.0)
ROOT get-tuple-element.0 = f32[32,32,4,32] get-tuple-element(tuple.0), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_sigmoid_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16Gemv1) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[1000,10000] parameter(0)
arg.1 = bf16[10000] parameter(1)
ROOT onednn.matmul.0 = bf16[1000] dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{2e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16Gemv2) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.test.bf16
ENTRY matmul.test.bf16 {
arg.0 = bf16[100,300,300] parameter(0)
arg.1 = bf16[300] parameter(1)
ROOT onednn.matmul.0 = bf16[100,300] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{2e-2, 1e-4}));
MatchOptimizedHlo(matmul_module_str, matmul_rewrite_str_);
}
TEST_F(MatmulTest, TestTransposeBNoRewriteF32) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[384,1024]{1,0} parameter(0), parameter_replication={false}
arg.1 = f32[2,1024]{1,0} parameter(1), parameter_replication={false}
ROOT dot.2 = f32[384,2]{1,0} dot(arg.0, arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK-NOT: custom_call_target="__onednn$matmul",
; CHECK: f32[384,2]{1,0} dot(%arg.0, %arg.1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
)");
}
TEST_F(MatmulTest, SimpleTestF32WithMulAndAddFusion) {
const char* matmul_module_str = R"(
ENTRY matmul.mul.add.test.f32 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
dot.7 = f32[32,32,40,40] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,40,40] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,40,40] multiply(dot.7,bcast.0)
const.1 = f32[] constant(0.65)
bcast.1 = f32[32,32,40,40] broadcast(const.1), dimensions={}
add.0 = f32[32,32,40,40] add(mul.0, bcast.1)
const.2 = f32[] constant(0.65)
bcast.2 = f32[32,32,40,40] broadcast(const.2), dimensions={}
add.1 = f32[32,32,40,40] add(bcast.2, bcast.1)
tuple.12 = (f32[32,32,40,40]) tuple(add.0)
ROOT get-tuple-element.13 = f32[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR","BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, BiasAddTanhFusionTest_F32) {
const char* matmul_module_str = R"(
HloModule matmul.bias.tanh.test.f32
ENTRY matmul.bias.tanh.test.f32 {
arg.0 = f32[32,32,40,30] parameter(0)
arg.1 = f32[32,32,30,40] parameter(1)
dot.2 = f32[32,32,40,40] dot(arg.0, arg.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.3 = f32[40] constant(15)
bcast.4 = f32[32,32,40,40] broadcast(const.3), dimensions={3}
add.5 = f32[32,32,40,40] add(dot.2, bcast.4)
tanh.6 = f32[32,32,40,40] tanh(add.5)
tuple.7 = (f32[32,32,40,40]) tuple(tanh.6)
ROOT get-tuple-element.8 = f32[32,32,40,40] get-tuple-element(tuple.7), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_tanh_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddTanhFusionTest_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.tanh.test.f32
ENTRY matmul.bias.tanh.test.f32 {
arg0.1 = f32[1024,512] parameter(0)
convert.2 = bf16[1024,512] convert(arg0.1)
arg1.3 = f32[256,512] parameter(1)
convert.4 = bf16[256,512] convert(arg1.3)
dot.5 = bf16[1024,256] dot(convert.2, convert.4), lhs_contracting_dims={1}, rhs_contracting_dims={1}
convert.6 = f32[1024,256] convert(dot.5)
arg2.7 = f32[256] parameter(2)
broadcast.8 = f32[1024,256] broadcast(arg2.7), dimensions={1}
add.9 = f32[1024,256] add(convert.6, broadcast.8)
ROOT tanh.10 = f32[1024,256] tanh(add.9)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_tanh_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddTanhFusionTest_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.tanh.test.f16
ENTRY matmul.bias.tanh.test.f16 {
arg0.1 = f16[1024,1024] parameter(0)
arg1.2 = f16[1024,1024] parameter(1)
dot.3 = f16[1024,1024] dot(arg1.2, arg0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.4 = f16[1024] parameter(2)
broadcast.5 = f16[1024,1024] broadcast(arg2.4), dimensions={1}
add.6 = f16[1024,1024] add(dot.3, broadcast.5)
ROOT tanh.7 = f16[1024,1024] tanh(add.6)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_tanh_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddRelu6Fusion_F32) {
const char* matmul_module_str = R"(
HloModule matmul.bias.relu6.test.f32
ENTRY matmul.bias.relu6.test.f32 {
constant.1 = f32[] constant(0)
broadcast.2 = f32[1024,1024] broadcast(constant.1), dimensions={}
arg1.3 = f32[1024,1024] parameter(1)
arg2.4 = f32[1024,1024] parameter(0)
dot.5 = f32[1024,1024] dot(arg1.3, arg2.4), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg3.6 = f32[1024] parameter(2)
broadcast.7 = f32[1024,1024] broadcast(arg3.6), dimensions={1}
add.8 = f32[1024,1024] add(dot.5, broadcast.7)
constant.9 = f32[] constant(6)
broadcast.10 = f32[1024,1024] broadcast(constant.9), dimensions={}
ROOT clamp.11 = f32[1024,1024] clamp(broadcast.2, add.8, broadcast.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_relu6_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddRelu6Fusion_BF16) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.relu6.test.bf16
ENTRY matmul.bias.relu6.test.bf16 {
constant.1 = f32[] constant(0)
broadcast.2 = f32[1024,256] broadcast(constant.1), dimensions={}
arg0.3 = f32[1024,512] parameter(0)
convert.4 = bf16[1024,512] convert(arg0.3)
arg1.5 = f32[256,512] parameter(1)
convert.6 = bf16[256,512] convert(arg1.5)
dot.7 = bf16[1024,256] dot(convert.4, convert.6), lhs_contracting_dims={1}, rhs_contracting_dims={1}
convert.8 = f32[1024,256] convert(dot.7)
arg2.9 = f32[256] parameter(2)
broadcast.10 = f32[1024,256] broadcast(arg2.9), dimensions={1}
add.11 = f32[1024,256] add(convert.8, broadcast.10)
constant.12 = f32[] constant(6)
broadcast.13 = f32[1024,256] broadcast(constant.12), dimensions={}
ROOT clamp.14 = f32[1024,256] clamp(broadcast.2, add.11, broadcast.13)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_relu6_rewrite_str_);
}
TEST_F(MatmulTest, BiasAddRelu6Fusion_F16) {
if (!IsSupportedType(PrimitiveType::F16)) {
GTEST_SKIP() << "CPU does not support F16.";
}
const char* matmul_module_str = R"(
HloModule matmul.bias.relu6.test.f16
ENTRY matmul.bias.relu6.test.f16 {
constant.1 = f16[] constant(0)
broadcast.2 = f16[1024,1024] broadcast(constant.1), dimensions={}
arg0.3 = f16[1024,1024] parameter(0)
arg1.4 = f16[1024,1024] parameter(1)
dot.5 = f16[1024,1024] dot(arg1.4, arg0.3), lhs_contracting_dims={1}, rhs_contracting_dims={0}
arg2.6 = f16[1024] parameter(2)
broadcast.7 = f16[1024,1024] broadcast(arg2.6), dimensions={1}
add.8 = f16[1024,1024] add(dot.5, broadcast.7)
constant.9 = f16[] constant(6)
broadcast.10 = f16[1024,1024] broadcast(constant.9), dimensions={}
ROOT clamp.11 = f16[1024,1024] clamp(broadcast.2, add.8, broadcast.10)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str, fused_matmul_bias_relu6_rewrite_str_);
}
TEST_F(MatmulTest, SimpleTestBF16WithMulAndAddFusion) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
ENTRY matmul.mul.add.test.bf16 {
arg0.1 = f32[32,32,40,30] parameter(0), parameter_replication={false}
convert0 = bf16[32,32,40,30] convert(arg0.1)
arg0.2 = f32[32,32,30,40] parameter(1), parameter_replication={false}
convert1 = bf16[32,32,30,40] convert(arg0.2)
dot.7 = bf16[32,32,40,40] dot(convert0, convert1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
convert2 = f32[32,32,40,40] convert(dot.7)
const.0 = f32[] constant(0.044715)
bcast.0 = f32[32,32,40,40] broadcast(const.0), dimensions={}
mul.0 = f32[32,32,40,40] multiply(convert2,bcast.0)
const.1 = f32[] constant(0.65)
bcast.1 = f32[32,32,40,40] broadcast(const.1), dimensions={}
add.0 = f32[32,32,40,40] add(mul.0, bcast.1)
convert3 = bf16[32,32,40,40] convert(add.0)
tuple.12 = (bf16[32,32,40,40]) tuple(convert3)
ROOT get-tuple-element.13 = bf16[32,32,40,40] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-2, 1e-2}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR","BINARY_ADD"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
TEST_F(MatmulTest, WeightsPrepackAndScratch) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg.0 = f32[64,256,16] parameter(0), parameter_replication={false}
constant = f32[] constant(1)
arg.1 = f32[16,32] broadcast(constant), dimensions={}
ROOT onednn.matmul.0 = f32[64,256,32] dot(arg.0, arg.1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: %matmul.test.f32
; CHECK: custom_call_target="__onednn$matmul",
; CHECK-SAME: backend_config={
; CHECK-SAME: "outer_dimension_partitions":[],
; CHECK-SAME: "onednn_matmul_config":{
; CHECK-SAME: "weights_prepacked":true,"user_scratchpad":true
; CHECK-SAME: }
; CHECK-SAME: }
)");
}
TEST_F(MatmulTest, ColMajorBF16DotBeforeLayoutAssignment) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const char* matmul_module_str = R"(
HloModule matmul.colmajor.test
ENTRY matmul.colmajor.test.bf16 {
arg.0 = bf16[500,500]{0,1} parameter(0)
arg.1 = bf16[500,500]{1,0} parameter(1)
transpose.0 = bf16[500,500]{0,1} transpose(arg.1), dimensions={1,0}
ROOT dot.0 = bf16[500,500]{1,0} dot(arg.0, arg.1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-2, 1e-2)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: (bf16[500,500]{1,0}, u8[{{.*}}]{0})
; CHECK-SAME: custom_call_target="__onednn$matmul"
)");
}
TEST_F(MatmulTest, ConsecutiveBinaryAdd) {
const char* matmul_module_str = R"(
HloModule matmul.test.f32
ENTRY matmul.test.f32 {
arg0.1 = f32[128,32,4,4] parameter(0)
arg0.2 = f32[128,32,4,4] parameter(1)
dot.7 = f32[128,32,4,4] dot(arg0.1, arg0.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
const.0 = f32[128,32] constant({...})
bcast.1 = f32[128,32,4,4] broadcast(const.0), dimensions={0,1}
add.0 = f32[128,32,4,4] add(dot.7,bcast.1)
const.1 = f32[4] constant({1,2,3,4})
bcast.2 = f32[128,32,4,4] broadcast(const.1), dimensions={3}
add.1 = f32[128,32,4,4] add(add.0, bcast.2)
tuple.12 = (f32[128,32,4,4]) tuple(add.1)
ROOT get-tuple-element.13 = f32[128,32,4,4] get-tuple-element(tuple.12), index=0
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec{1e-4, 1e-4}));
}
TEST_F(MatmulTest, BroadcastedAddAfterFusion) {
const char* matmul_module_str = R"(
HloModule matmul.nonscalar.test
ENTRY matmul.nonscalar.test.f32 {
arg.0 = f32[16,400,500] parameter(0)
arg.1 = f32[16,500,3] parameter(1)
onednn.matmul.0 = f32[16,400,3] dot(arg.0, arg.1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1}
constant.0 = f32[] constant(6)
broadcast.0 = f32[16,400,3] broadcast(constant.0), dimensions={}
mult.0 = f32[16,400,3] multiply(onednn.matmul.0, broadcast.0)
constant.1 = f32[3]{0} constant({0.625, 0.875, 0.375})
broadcast.2 = f32[16,400,3] broadcast(constant.1), dimensions={2}
ROOT add.0 = f32[16,400,3] add(mult.0, broadcast.2)
})";
EXPECT_TRUE(RunAndCompare(matmul_module_str, ErrorSpec(1e-4, 1e-4)));
MatchOptimizedHlo(matmul_module_str,
R"(
; CHECK: custom_call_target="__onednn$matmul",
; CHECK: backend_config={
; CHECK-DAG: "outer_dimension_partitions":[],
; CHECK-DAG: "onednn_matmul_config":{
; CHECK-DAG: "fusions":{
; CHECK-DAG: "ops":["LINEAR"]
; CHECK-DAG: }
; CHECK-DAG: }
; CHECK: }
)");
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_matmul.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_matmul_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7d1d8932-f15f-4fd7-a669-a3307f9aec2f | cpp | tensorflow/tensorflow | while_loop_concat_code_motion | third_party/xla/xla/service/while_loop_concat_code_motion.cc | third_party/xla/xla/service/while_loop_concat_code_motion_test.cc | #include "xla/service/while_loop_concat_code_motion.h"
#include <map>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
struct ConcatGroup {
ConcatGroup(std::vector<HloInstruction*> elements, int64_t concat_dim,
bool inserted_concat_dim)
: elements(std::move(elements)),
element_sizes(this->elements.size(), 1),
element_offsets(this->elements.size(), 0),
concat_dim(concat_dim),
inserted_concat_dim(inserted_concat_dim) {
if (inserted_concat_dim) {
absl::c_iota(element_offsets, 0);
} else {
for (int64_t i = 0; i < element_sizes.size(); ++i) {
element_sizes[i] = this->elements[i]->shape().dimensions(concat_dim);
if (i > 0) {
element_offsets[i] = element_offsets[i - 1] + element_sizes[i - 1];
}
}
}
}
Shape GetConcatShape() const {
if (inserted_concat_dim) {
std::vector<int64_t> dims;
const Shape& element_shape = elements.back()->shape();
dims.reserve(element_shape.rank() + 1);
for (int64_t i = 0; i < element_shape.rank(); ++i) {
if (i == concat_dim) {
dims.push_back(elements.size());
}
dims.push_back(element_shape.dimensions(i));
}
if (dims.size() == concat_dim) {
dims.push_back(elements.size());
}
return ShapeUtil::MakeShape(element_shape.element_type(), dims);
} else {
int64_t dim_size = 0;
for (int64_t size : element_sizes) {
dim_size += size;
}
Shape shape = elements.back()->shape();
shape.set_dimensions(concat_dim, dim_size);
return shape;
}
}
HloInstruction* CreateSlice(HloInstruction* full_data, int64_t element_index,
HloComputation* comp) const {
Shape shape = full_data->shape();
shape.set_dimensions(concat_dim, element_sizes[element_index]);
std::vector<int64_t> starts(shape.rank(), 0);
std::vector<int64_t> limits(shape.dimensions().begin(),
shape.dimensions().end());
starts[concat_dim] = element_offsets[element_index];
limits[concat_dim] += starts[concat_dim];
auto slice = comp->AddInstruction(
HloInstruction::CreateSlice(shape, full_data, starts, limits,
std::vector<int64_t>(shape.rank(), 1)));
if (!inserted_concat_dim) {
return slice;
}
std::vector<int64_t> element_shape;
element_shape.reserve(shape.rank() - 1);
for (int64_t i = 0; i < shape.rank(); ++i) {
if (i != concat_dim) {
element_shape.push_back(shape.dimensions(i));
}
}
return comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(shape.element_type(), element_shape), slice));
}
HloInstruction* CreateConcat(std::vector<HloInstruction*> input_elements,
HloComputation* comp) const {
if (inserted_concat_dim) {
for (int64_t i = 0; i < input_elements.size(); ++i) {
std::vector<int64_t> element_shape;
element_shape.reserve(input_elements[i]->shape().rank() + 1);
for (int64_t j = 0; j < input_elements[i]->shape().rank(); ++j) {
if (j == concat_dim) {
element_shape.push_back(1);
}
element_shape.push_back(input_elements[i]->shape().dimensions(j));
}
if (element_shape.size() == concat_dim) {
element_shape.push_back(1);
}
input_elements[i] = comp->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(input_elements[i]->shape().element_type(),
element_shape),
input_elements[i]));
}
}
return comp->AddInstruction(HloInstruction::CreateConcatenate(
GetConcatShape(), input_elements, concat_dim));
}
std::vector<HloInstruction*> elements;
std::vector<int64_t> element_sizes;
std::vector<int64_t> element_offsets;
int64_t concat_dim;
bool inserted_concat_dim;
};
class ConcatGroups {
public:
std::optional<std::pair<int64_t, int64_t>> GetGroupIndex(
const HloInstruction* hlo) const {
auto it = element_to_group_.find(hlo);
if (it == element_to_group_.end()) {
return std::nullopt;
}
return it->second;
}
const ConcatGroup& GetGroup(int64_t index) const { return groups_[index]; }
std::pair<bool, int64_t> MaybeCreateNewGroup(ConcatGroup group) {
int64_t group_id = -1;
absl::flat_hash_set<HloInstruction*> elements_dedup;
for (int64_t i = 0; i < group.elements.size(); ++i) {
if (!elements_dedup.insert(group.elements[i]).second) {
VLOG(2) << "Duplicates in group. Element: "
<< group.elements[i]->ToString();
}
if (concat_disallowed_.contains(group.elements[i])) {
VLOG(2) << "Failed creating group. Grouping disallowed on "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
auto existing = GetGroupIndex(group.elements[i]);
if (existing.has_value() &&
(i != existing->second ||
groups_[existing->first].concat_dim != group.concat_dim)) {
VLOG(2)
<< "Failed creating group. Different than existing group. Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
if (i == 0 && existing.has_value()) {
group_id = existing->first;
}
if (i > 0) {
if (existing.has_value() && existing->first != group_id) {
VLOG(2) << "Failed creating group. Different than existing group. "
"Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
if (!existing.has_value() && group_id >= 0) {
VLOG(2) << "Failed creating group. Different than existing group. "
"Element: "
<< group.elements[i]->ToString();
return std::pair<bool, int64_t>(false, -1);
}
}
}
if (group_id >= 0) {
VLOG(2) << "Group already exists at " << group_id << " for "
<< group.elements[0]->ToString();
return std::pair<bool, int64_t>(false, group_id);
}
int64_t index = groups_.size();
for (int64_t i = 0; i < group.elements.size(); ++i) {
element_to_group_[group.elements[i]] =
std::pair<int64_t, int64_t>(index, i);
}
VLOG(2) << "Created new group at " << index << " for "
<< group.elements[0]->ToString()
<< ", concat_dim: " << group.concat_dim
<< ", inserted: " << group.inserted_concat_dim;
groups_.push_back(std::move(group));
return std::pair<bool, int64_t>(true, index);
}
const std::vector<ConcatGroup>& Groups() const { return groups_; }
int64_t NextGroupIndex() const { return groups_.size(); }
void RemoveTailingGroups(int64_t start_index) {
while (groups_.size() > start_index) {
for (auto element : groups_.back().elements) {
element_to_group_.erase(element);
}
groups_.pop_back();
}
}
void DisallowGroupingOn(const HloInstruction* hlo) {
VLOG(2) << "Disallow grouping on " << hlo->ToString();
concat_disallowed_.insert(hlo);
}
private:
absl::flat_hash_map<const HloInstruction*, std::pair<int64_t, int64_t>>
element_to_group_;
std::vector<ConcatGroup> groups_;
absl::flat_hash_set<const HloInstruction*> concat_disallowed_;
};
std::optional<std::pair<int64_t, bool>> GetOperandConcatDim(
const HloInstruction* hlo, int64_t operand_index, int64_t hlo_concat_dim,
bool hlo_inserted_concat_dim,
const ConcatGroup* combined_operand_group = nullptr) {
if (hlo->IsElementwise() || hlo->opcode() == HloOpcode::kAllReduce) {
return std::pair<int64_t, bool>(hlo_concat_dim, hlo_inserted_concat_dim);
}
int64_t operand_concat_dim = -1;
bool operand_inserted_concat_dim = false;
const Shape& operand_shape =
combined_operand_group == nullptr
? hlo->operand(operand_index)->shape()
: combined_operand_group->elements.back()->shape();
if (hlo->opcode() == HloOpcode::kBroadcast) {
operand_concat_dim = 0;
operand_inserted_concat_dim = true;
int64_t min_dist_to_concat_dim = hlo->shape().rank();
for (int64_t i = 0; i < operand_shape.rank(); ++i) {
if (hlo->dimensions(i) == hlo_concat_dim) {
operand_concat_dim = i;
operand_inserted_concat_dim = hlo_inserted_concat_dim;
break;
}
if (hlo->dimensions(i) < hlo_concat_dim &&
min_dist_to_concat_dim > hlo_concat_dim - hlo->dimensions(i)) {
operand_concat_dim = i + 1;
min_dist_to_concat_dim = hlo_concat_dim - hlo->dimensions(i);
}
if (hlo->dimensions(i) > hlo_concat_dim &&
min_dist_to_concat_dim > hlo->dimensions(i) - hlo_concat_dim) {
operand_concat_dim = i;
min_dist_to_concat_dim = hlo->dimensions(i) - hlo_concat_dim;
}
}
} else if (hlo->opcode() == HloOpcode::kReduce) {
if (operand_index != 0) {
return std::nullopt;
}
operand_concat_dim = hlo_concat_dim;
operand_inserted_concat_dim = hlo_inserted_concat_dim;
std::set<int64_t> sorted_reduce_dims;
for (int64_t dim : hlo->dimensions()) {
sorted_reduce_dims.insert(dim);
}
for (int64_t dim : sorted_reduce_dims) {
if ((hlo_inserted_concat_dim && dim < operand_concat_dim) ||
(!hlo_inserted_concat_dim && dim <= operand_concat_dim)) {
operand_concat_dim++;
}
}
} else if (hlo->opcode() == HloOpcode::kReshape) {
int64_t i = 0;
int64_t j = 0;
operand_inserted_concat_dim = false;
while (i < operand_shape.rank() || j <= hlo_concat_dim) {
if (i < operand_shape.rank() && j < hlo->shape().rank() &&
operand_shape.dimensions(i) == hlo->shape().dimensions(j)) {
if (j == hlo_concat_dim) {
operand_inserted_concat_dim =
hlo_inserted_concat_dim && operand_shape.dimensions(i) != 1;
operand_concat_dim = i;
break;
}
i++;
j++;
continue;
}
if (i < operand_shape.rank() && operand_shape.dimensions(i) == 1) {
if (j == hlo_concat_dim && hlo_inserted_concat_dim) {
operand_concat_dim = i;
break;
}
i++;
continue;
}
if (j == hlo_concat_dim) {
operand_concat_dim = i;
operand_inserted_concat_dim = true;
break;
}
if (j < hlo->shape().rank() && hlo->shape().dimensions(j) == 1) {
j++;
continue;
}
return std::nullopt;
}
} else {
return std::nullopt;
}
CHECK_GE(operand_concat_dim, 0);
return std::pair<int64_t, bool>(operand_concat_dim,
operand_inserted_concat_dim);
}
void ModifyHloPropertiesForConcatShape(const ConcatGroup& group,
HloInstruction* hlo) {
*hlo->mutable_shape() = group.GetConcatShape();
if (hlo->opcode() == HloOpcode::kBroadcast) {
auto operand_dim = GetOperandConcatDim(
group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);
CHECK(operand_dim.has_value());
int64_t operand_concat_dim = operand_dim->first;
bool operand_inserted_concat_dim = operand_dim->second;
if (operand_inserted_concat_dim) {
CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size() + 1)
<< hlo->ToString();
} else {
CHECK_EQ(hlo->operand(0)->shape().rank(), hlo->dimensions().size());
}
std::vector<int64_t> dims;
const int64_t rank = hlo->operand(0)->shape().rank();
dims.reserve(rank);
for (int64_t i = 0; i < rank; ++i) {
if (i == operand_concat_dim && operand_inserted_concat_dim) {
dims.push_back(group.concat_dim);
} else {
if (i > operand_concat_dim && operand_inserted_concat_dim) {
dims.push_back(hlo->dimensions(i - 1));
} else {
dims.push_back(hlo->dimensions(i));
}
if (group.inserted_concat_dim && dims.back() >= group.concat_dim) {
dims.back()++;
}
}
}
*hlo->mutable_dimensions() = std::move(dims);
} else if (hlo->opcode() == HloOpcode::kReduce) {
auto operand_dim = GetOperandConcatDim(
group.elements.back(), 0, group.concat_dim, group.inserted_concat_dim);
int64_t operand_concat_dim = operand_dim->first;
bool operand_inserted_concat_dim = operand_dim->second;
CHECK(operand_dim.has_value());
if (operand_inserted_concat_dim) {
auto dims = hlo->mutable_dimensions();
for (int64_t i = 0; i < dims->size(); ++i) {
if ((*dims)[i] >= operand_concat_dim) {
(*dims)[i]++;
}
}
}
}
}
bool GroupHlosForConcat(
HloComputation* body, HloInstruction* concat,
absl::flat_hash_map<const HloInstruction*, int64_t> topological_order,
ConcatGroups* groups) {
const int64_t group_size = concat->operand_count();
absl::flat_hash_set<int64_t> used_groups;
auto root_tuple = body->root_instruction();
CHECK_EQ(root_tuple->opcode(), HloOpcode::kTuple);
absl::flat_hash_map<HloInstruction*, int64_t> root_tuple_element_use_count;
for (auto operand : root_tuple->operands()) {
root_tuple_element_use_count.emplace(operand, 0).first->second++;
}
std::multimap<int64_t, ConcatGroup> pq;
const int64_t first_group_id_to_create = groups->NextGroupIndex();
auto fail_and_cleanup = [&] {
VLOG(1) << "Failed to get the subcomputation to optimize for "
<< concat->ToString() << ", clear groups starting at "
<< first_group_id_to_create;
groups->RemoveTailingGroups(first_group_id_to_create);
return false;
};
struct GroupUse {
int64_t group_id;
bool newly_created;
bool already_used_by_subcomp;
};
auto maybe_create_group = [&](ConcatGroup group) {
auto res = groups->MaybeCreateNewGroup(std::move(group));
GroupUse use{res.second, false, false};
if (res.second < 0) {
return use;
}
use.newly_created = res.first;
use.already_used_by_subcomp = !used_groups.insert(res.second).second;
return use;
};
std::vector<HloInstruction*> concat_operands(concat->operands().begin(),
concat->operands().end());
int64_t concat_operand_order = -topological_order[concat_operands[0]];
pq.emplace(concat_operand_order,
ConcatGroup(std::move(concat_operands),
concat->concatenate_dimension(), false));
while (!pq.empty()) {
auto group = std::move(pq.begin()->second);
pq.erase(pq.begin());
const auto& hlos = group.elements;
VLOG(2) << "GroupHlosForConcat dequeued " << hlos[0]->ToString();
bool group_is_param_gtes = false;
if (absl::c_all_of(hlos, [&](const HloInstruction* element) {
return element == hlos[0];
})) {
if (groups->GetGroupIndex(hlos[0]).has_value()) {
VLOG(1) << "We do not support the case if a shared operand also part "
"of a group: "
<< hlos[0]->ToString();
return fail_and_cleanup();
}
groups->DisallowGroupingOn(hlos[0]);
continue;
}
if (absl::c_all_of(hlos, [&](const HloInstruction* element) {
return element->opcode() == HloOpcode::kGetTupleElement &&
element->operand(0) == body->parameter_instruction(0);
})) {
group_is_param_gtes = true;
} else if (((hlos[0]->IsElementwise() ||
hlos[0]->opcode() == HloOpcode::kAllReduce) &&
!hlos[0]->HasSideEffect()) ||
hlos[0]->opcode() == HloOpcode::kBroadcast ||
hlos[0]->opcode() == HloOpcode::kReduce ||
hlos[0]->opcode() == HloOpcode::kReshape ||
hlos[0]->IsCustomCall("Sharding")) {
if (hlos[0]->opcode() == HloOpcode::kAllReduce &&
(!hlos[0]->shape().IsArray() || hlos[0]->IsCrossModuleAllReduce())) {
VLOG(2) << "Unsupported allreduce: " << hlos[0]->ToString();
return fail_and_cleanup();
}
if (absl::c_any_of(hlos, [&](const HloInstruction* element) {
auto eq_operand = [](const HloInstruction* a,
const HloInstruction* b) {
return ShapeUtil::Compatible(a->shape(), b->shape());
};
auto eq_computations = [](const HloComputation* lhs,
const HloComputation* rhs) {
return lhs->Equal(*rhs, false);
};
if (!hlos[0]->Identical(*element, eq_operand, eq_computations,
false)) {
return true;
}
if (element->opcode() == HloOpcode::kReduce &&
(element->operand_count() != 2 ||
element->operand(1) != hlos[0]->operand(1))) {
return true;
}
return false;
})) {
VLOG(2) << "Different types of elements. First element: "
<< hlos[0]->ToString();
return fail_and_cleanup();
}
int64_t input_count = hlos[0]->operand_count();
if (hlos[0]->opcode() == HloOpcode::kReduce) {
CHECK_EQ(input_count, 2);
input_count = 1;
}
for (int64_t i = 0; i < input_count; ++i) {
std::vector<HloInstruction*> elements(group_size);
for (int64_t j = 0; j < group_size; ++j) {
elements[j] = hlos[j]->mutable_operand(i);
}
auto maybe_new_concat_dim = GetOperandConcatDim(
hlos[0], i, group.concat_dim, group.inserted_concat_dim);
if (!maybe_new_concat_dim.has_value()) {
VLOG(2) << "Cannot find operand concat dimension for operand " << i
<< " of " << hlos[0]->ToString();
return fail_and_cleanup();
}
int64_t new_group_concat_dim = maybe_new_concat_dim->first;
bool inserted_concat_dim = maybe_new_concat_dim->second;
int64_t element_order = -topological_order[elements[0]];
pq.emplace(element_order,
ConcatGroup(std::move(elements), new_group_concat_dim,
inserted_concat_dim));
}
} else if (hlos[0]->opcode() == HloOpcode::kSlice) {
int64_t offset = 0;
auto operand = hlos[0]->operand(0);
if (group.inserted_concat_dim) {
VLOG(2) << "Slices cannot be grouped on new dimension.";
return fail_and_cleanup();
}
if (groups->GetGroupIndex(operand).has_value()) {
return fail_and_cleanup();
}
groups->DisallowGroupingOn(operand);
for (int64_t i = 0; i < group_size; ++i) {
if (hlos[i]->operand(0) != operand) {
VLOG(2) << "Slices of different operands.";
return fail_and_cleanup();
}
for (int64_t j = 0; j < hlos[i]->shape().rank(); ++j) {
if (hlos[i]->slice_strides(j) != 1) {
VLOG(2) << "Slices with strides.";
return fail_and_cleanup();
}
if (j == group.concat_dim) {
if (hlos[i]->slice_starts(j) != offset) {
VLOG(2) << "Slices with unsupported offsets.";
return fail_and_cleanup();
}
offset += hlos[i]->shape().dimensions(j);
} else {
if (hlos[i]->slice_starts(j) != 0 ||
hlos[i]->slice_limits(j) != operand->shape().dimensions(j)) {
VLOG(2) << "Slice with unsupported offsets at dimension " << j
<< ", " << hlos[i]->ToString();
return fail_and_cleanup();
}
}
}
}
if (offset != operand->shape().dimensions(group.concat_dim)) {
VLOG(2) << "Slices with unsupported sizes.";
return fail_and_cleanup();
}
} else {
VLOG(2) << "Unsupported opcode: " << hlos[0]->ToString();
return fail_and_cleanup();
}
auto guse = maybe_create_group(std::move(group));
if (guse.group_id < 0) {
VLOG(2) << "Failed to create group.";
return fail_and_cleanup();
}
const auto& registered_group = groups->GetGroup(guse.group_id);
if (!guse.already_used_by_subcomp && group_is_param_gtes) {
std::vector<HloInstruction*> new_outputs(group_size);
for (int64_t i = 0; i < group_size; ++i) {
new_outputs[i] = root_tuple->mutable_operand(
registered_group.elements[i]->tuple_index());
}
int64_t new_output_order = -topological_order[new_outputs[0]];
pq.emplace(
new_output_order,
ConcatGroup(std::move(new_outputs), registered_group.concat_dim,
registered_group.inserted_concat_dim));
}
}
return groups->Groups().size() > first_group_id_to_create;
}
std::vector<bool> TupleElementsUsedInCond(HloInstruction* loop) {
std::vector<bool> result(loop->shape().tuple_shapes_size(), false);
for (auto user : loop->while_condition()->parameter_instruction(0)->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
absl::c_fill(result, true);
return result;
}
result[user->tuple_index()] = true;
}
return result;
}
absl::Status AddCopiesToRoot(HloComputation* body,
absl::Span<HloInstruction* const> param_gtes,
ConcatGroups* groups) {
auto root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
std::vector<HloInstruction*> copies(root->operand_count(), nullptr);
for (int64_t i = 0; i < copies.size(); ++i) {
auto element = root->mutable_operand(i);
if (!element->shape().IsArray()) {
continue;
}
copies[i] = body->AddInstruction(HloInstruction::CreateUnary(
element->shape(), HloOpcode::kCopy, element));
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copies[i]));
}
for (int64_t i = 0; i < copies.size(); ++i) {
auto copy = copies[i];
if (groups->GetGroupIndex(copy).has_value()) {
continue;
}
auto param_group_index = groups->GetGroupIndex(param_gtes[i]);
if (!param_group_index.has_value()) {
continue;
}
const auto& param_group = groups->GetGroup(param_group_index->first);
std::vector<HloInstruction*> copy_group(param_group.elements.size());
for (int64_t j = 0; j < copy_group.size(); ++j) {
copy_group[j] = copies[param_group.elements[j]->tuple_index()];
}
CHECK(groups
->MaybeCreateNewGroup(
ConcatGroup(std::move(copy_group), param_group.concat_dim,
param_group.inserted_concat_dim))
.first);
}
return absl::OkStatus();
}
absl::Status RemoveCopiesFromRoot(HloComputation* body) {
auto root = body->root_instruction();
CHECK_EQ(root->opcode(), HloOpcode::kTuple);
for (int64_t i = 0; i < root->operand_count(); ++i) {
auto copy = root->mutable_operand(i);
if (copy->opcode() == HloOpcode::kCopy) {
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(i, copy->mutable_operand(0)));
}
}
return absl::OkStatus();
}
absl::Status RewriteLoopWithConcatGroups(
HloInstruction* loop, absl::Span<HloInstruction* const> param_gtes,
ConcatGroups& groups) {
VLOG(1) << "RewriteLoopWithConcatGroups with " << groups.Groups().size()
<< " groups.";
absl::flat_hash_set<int64_t> processed_groups;
auto body = loop->while_body();
auto param = body->parameter_instruction(0);
auto cond_param = loop->while_condition()->parameter_instruction(0);
std::vector<HloInstruction*> init_elements(loop->shape().tuple_shapes_size());
for (int64_t i = 0; i < param_gtes.size(); ++i) {
init_elements[i] =
loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
loop->shape().tuple_shapes(i), loop->mutable_operand(0), i));
}
for (int64_t i = 0; i < param_gtes.size(); ++i) {
const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
*param_gtes[i]->mutable_shape() = group.GetConcatShape();
*param->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();
*body->root_instruction()->mutable_shape()->mutable_tuple_shapes(i) =
param_gtes[i]->shape();
*cond_param->mutable_shape()->mutable_tuple_shapes(i) =
param_gtes[i]->shape();
*loop->mutable_shape()->mutable_tuple_shapes(i) = param_gtes[i]->shape();
processed_groups.insert(group_and_index->first);
std::vector<HloInstruction*> input_concat_elements;
input_concat_elements.reserve(group.elements.size());
for (auto param_gte : group.elements) {
input_concat_elements.push_back(init_elements[param_gte->tuple_index()]);
}
init_elements[i] =
group.CreateConcat(std::move(input_concat_elements), loop->parent());
}
TF_RETURN_IF_ERROR(loop->ReplaceOperandWithDifferentShape(
0, loop->parent()->AddInstruction(
HloInstruction::CreateTuple(init_elements))));
auto original_loop_users = loop->users();
const bool loop_is_root = loop == loop->parent()->root_instruction();
std::vector<HloInstruction*> output_elements(
loop->shape().tuple_shapes_size());
for (int64_t i = 0; i < param_gtes.size(); ++i) {
output_elements[i] =
loop->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
init_elements[i]->shape(), loop, i));
}
for (int64_t i = 0; i < param_gtes.size(); ++i) {
const auto& group_and_index = groups.GetGroupIndex(param_gtes[i]);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
auto concat_output = output_elements[group.elements[0]->tuple_index()];
for (int64_t j = 0; j < group.elements.size(); ++j) {
const auto param_gte = group.elements[j];
output_elements[param_gte->tuple_index()] =
group.CreateSlice(concat_output, j, loop->parent());
}
}
auto new_output_tuple = loop->parent()->AddInstruction(
HloInstruction::CreateTuple(output_elements));
for (auto user : original_loop_users) {
TF_RETURN_IF_ERROR(
loop->ReplaceUseWithDifferentShape(user, new_output_tuple));
}
if (loop_is_root) {
loop->parent()->set_root_instruction(new_output_tuple,
true);
}
std::vector<HloInstruction*> slices_to_remove;
absl::flat_hash_set<HloInstruction*> new_reshapes;
for (auto hlo : body->MakeInstructionPostOrder()) {
const auto& group_and_index = groups.GetGroupIndex(hlo);
if (!group_and_index.has_value() || group_and_index->second != 0) {
continue;
}
if (!processed_groups.insert(group_and_index->first).second) {
continue;
}
const auto& group = groups.GetGroup(group_and_index->first);
if (hlo->opcode() == HloOpcode::kSlice) {
slices_to_remove.push_back(hlo);
} else {
int64_t operand_count_to_adjust = hlo->operand_count();
if (hlo->opcode() == HloOpcode::kReduce) {
CHECK_EQ(operand_count_to_adjust, 2);
operand_count_to_adjust = 1;
}
for (int64_t i = 0; i < operand_count_to_adjust; ++i) {
auto operand_group_index = groups.GetGroupIndex(hlo->operand(i));
const ConcatGroup* operand_group =
operand_group_index.has_value()
? &groups.GetGroup(operand_group_index->first)
: nullptr;
auto maybe_operand_concat_dim = GetOperandConcatDim(
hlo, i, group.concat_dim, group.inserted_concat_dim, operand_group);
CHECK(maybe_operand_concat_dim.has_value())
<< "Operand " << i << " of " << hlo->ToString();
int64_t operand_concat_dim = maybe_operand_concat_dim->first;
bool operand_inserted_concat_dim = maybe_operand_concat_dim->second;
if (operand_group != nullptr) {
CHECK_EQ(operand_concat_dim, operand_group->concat_dim);
if (operand_inserted_concat_dim !=
operand_group->inserted_concat_dim) {
std::vector<int64_t> new_dims;
int64_t d = 0;
for (; d < operand_concat_dim; ++d) {
new_dims.push_back(hlo->operand(i)->shape().dimensions(d));
}
if (operand_inserted_concat_dim) {
new_dims.push_back(group.elements.size());
new_dims.push_back(
hlo->operand(i)->shape().dimensions(operand_concat_dim) /
group.elements.size());
d = operand_concat_dim + 1;
} else {
new_dims.push_back(
group.elements.size() *
hlo->operand(i)->shape().dimensions(operand_concat_dim + 1));
d = operand_concat_dim + 2;
}
for (; d < hlo->operand(i)->shape().rank(); ++d) {
new_dims.push_back(hlo->operand(i)->shape().dimensions(d));
}
auto reshape = body->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(hlo->operand(i)->shape().element_type(),
new_dims),
hlo->mutable_operand(i)));
new_reshapes.insert(reshape);
TF_RETURN_IF_ERROR(
hlo->ReplaceOperandWithDifferentShape(i, reshape));
}
continue;
}
CHECK(
absl::c_all_of(group.elements, [&](const HloInstruction* element) {
return element->operand(i) == hlo->operand(i);
}));
VLOG(2) << "Broadcasting shared operand "
<< hlo->operand(i)->ToString();
Shape data_shape = hlo->operand(i)->shape();
std::vector<int64_t> broadcast_dims;
std::vector<int64_t> broadcast_shape;
const int64_t data_shape_rank = data_shape.rank();
broadcast_dims.reserve(data_shape_rank);
broadcast_shape.reserve(data_shape_rank + 1);
for (int64_t j = 0; j < data_shape_rank; ++j) {
if (j < operand_concat_dim) {
broadcast_dims.push_back(j);
} else {
broadcast_dims.push_back(j + 1);
}
if (j == operand_concat_dim) {
broadcast_shape.push_back(group.elements.size());
}
broadcast_shape.push_back(data_shape.dimensions(j));
}
if (broadcast_shape.size() == data_shape.rank()) {
broadcast_shape.push_back(group.elements.size());
}
auto broadcast = body->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeShape(data_shape.element_type(), broadcast_shape),
hlo->mutable_operand(i), broadcast_dims));
if (!operand_inserted_concat_dim) {
data_shape.set_dimensions(
operand_concat_dim,
data_shape.dimensions(operand_inserted_concat_dim) *
group.elements.size());
broadcast = body->AddInstruction(
HloInstruction::CreateReshape(data_shape, broadcast));
}
TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, broadcast));
}
}
VLOG(2) << "Modifying HLO to full shape " << hlo->ToString();
ModifyHloPropertiesForConcatShape(group, hlo);
VLOG(2) << "Modified HLO to full shape " << hlo->ToString();
}
for (auto hlo : body->MakeInstructionPostOrder()) {
if (new_reshapes.contains(hlo)) {
continue;
}
const auto& group_and_index = groups.GetGroupIndex(hlo);
if ((!group_and_index.has_value() || hlo->opcode() == HloOpcode::kReduce) &&
hlo != body->root_instruction()) {
auto operands = hlo->operands();
if (group_and_index.has_value()) {
CHECK_EQ(operands.size(), 2);
CHECK_EQ(hlo->opcode(), HloOpcode::kReduce);
operands.erase(operands.begin());
}
for (int64_t i = 0; i < operands.size(); ++i) {
auto operand = operands[i];
auto operand_group_index = groups.GetGroupIndex(operand);
if (!operand_group_index.has_value()) {
continue;
}
const auto& operand_group = groups.GetGroup(operand_group_index->first);
auto slice = operand_group.CreateSlice(
operand_group.elements[0], operand_group_index->second, body);
TF_RETURN_IF_ERROR(hlo->ReplaceOperandWithDifferentShape(i, slice));
}
}
}
for (auto slice : slices_to_remove) {
TF_RETURN_IF_ERROR(slice->ReplaceAllUsesWith(slice->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstruction(slice));
}
return absl::OkStatus();
}
absl::StatusOr<bool> RunOnLoop(HloInstruction* loop,
int64_t min_operand_count_to_optimize) {
auto body = loop->while_body();
auto param = body->parameter_instruction(0);
auto root = body->root_instruction();
if (!param->shape().IsTuple() || root->opcode() != HloOpcode::kTuple) {
return false;
}
std::vector<HloInstruction*> gtes(param->shape().tuple_shapes_size(),
nullptr);
ConcatGroups groups;
auto indices_used_in_cond = TupleElementsUsedInCond(loop);
for (auto user : param->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
int64_t idx = user->tuple_index();
if (gtes[idx] != nullptr) {
return false;
}
gtes[idx] = user;
if (indices_used_in_cond[idx]) {
groups.DisallowGroupingOn(user);
}
}
std::vector<HloInstruction*> concats;
auto body_instructions = body->MakeInstructionPostOrder();
absl::flat_hash_map<const HloInstruction*, int64_t> topological_order;
for (int64_t i = 0; i < body_instructions.size(); ++i) {
auto hlo = body_instructions[i];
topological_order[hlo] = i;
if (hlo->opcode() == HloOpcode::kConcatenate &&
hlo->operand_count() >= min_operand_count_to_optimize) {
concats.push_back(hlo);
}
}
for (auto& concat : concats) {
if (!GroupHlosForConcat(body, concat, topological_order, &groups)) {
concat = nullptr;
}
}
if (groups.Groups().empty()) {
return false;
}
TF_RETURN_IF_ERROR(AddCopiesToRoot(body, gtes, &groups));
TF_RETURN_IF_ERROR(RewriteLoopWithConcatGroups(loop, gtes, groups));
for (auto concat : concats) {
if (concat == nullptr) {
continue;
}
auto new_slice = concat->mutable_operand(0);
CHECK_EQ(new_slice->opcode(), HloOpcode::kSlice);
TF_RETURN_IF_ERROR(
concat->ReplaceAllUsesWith(new_slice->mutable_operand(0)));
TF_RETURN_IF_ERROR(body->RemoveInstruction(concat));
}
TF_RETURN_IF_ERROR(RemoveCopiesFromRoot(body));
for (auto gte : gtes) {
auto group_index = groups.GetGroupIndex(gte);
if (group_index.has_value() && group_index->second > 0) {
TF_RETURN_IF_ERROR(root->ReplaceOperandWith(gte->tuple_index(), gte));
}
}
return true;
}
}
absl::StatusOr<bool> WhileLoopConcatCodeMotion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kWhile) {
TF_ASSIGN_OR_RETURN(bool loop_changed,
RunOnLoop(hlo, min_operand_count_to_optimize_));
changed |= loop_changed;
}
}
}
if (changed) {
HloPassPipeline pipeline("loop-concat-motion-cleanup");
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloDCE>();
pipeline.AddPass<WhileLoopSimplifier>();
pipeline.AddPass<TupleSimplifier>();
pipeline.AddPass<HloDCE>();
TF_RETURN_IF_ERROR(pipeline.Run(module, execution_threads).status());
}
return changed;
}
} | #include "xla/service/while_loop_concat_code_motion.h"
#include <algorithm>
#include <iterator>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class WhileLoopConcatCodeMotionTest : public HloTestBase {};
TEST_F(WhileLoopConcatCodeMotionTest, SimpleMotion) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%ccall2 = f32[1024,1024] custom-call(), custom_call_target="test2"
%add.0 = f32[1024,1024] add(%slice.0, %ccall2)
%add.1 = f32[1024,1024] add(%slice.1, %ccall2)
%t0 = token[] after-all()
%outfeed = token[] outfeed(%slice.1, %t0)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1)))));
ASSERT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
auto while_op =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
EXPECT_THAT(while_op->while_body()->root_instruction(),
op::Tuple(op::Add(),
op::Add(op::CustomCall(),
op::Reshape(op::Broadcast(op::CustomCall())))));
}
TEST_F(WhileLoopConcatCodeMotionTest, NoMotionWithChangedElementOrder) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %slice.1, %slice.0)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024]) tuple(%constant.0, %param.0, %param.1)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024]) while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_FALSE(changed);
}
TEST_F(WhileLoopConcatCodeMotionTest, CascadedConcats) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %gte.3)
%add.1 = f32[1024,1024] add(%slice.1, %gte.4)
%add.2 = f32[1024,1024] add(%gte.3, %gte.3)
%add.3 = f32[1024,1024] add(%gte.4, %gte.4)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %add.2, %add.3)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1))),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsSharedGroups) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}
%ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test"
%slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}
%slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %slice.2)
%add.1 = f32[1024,1024] add(%slice.1, %slice.3)
%sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)
%sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(0), op::Parameter(1))),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, TwoConcatsDifferentOrders) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%concat = f32[2048,1024] concatenate(%gte.1, %gte.2), dimensions={0}
%ccall = f32[2048,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1024,1024] slice(%ccall), slice={[0:1024], [0:1024]}
%slice.1 = f32[1024,1024] slice(%ccall), slice={[1024:2048], [0:1024]}
%gte.3 = f32[1024,1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024,1024] get-tuple-element(%param), index=4
%concat.1 = f32[2048,1024] concatenate(%gte.3, %gte.4), dimensions={0}
%ccall.1 = f32[2048,1024] custom-call(%concat.1), custom_call_target="test"
%slice.2 = f32[1024,1024] slice(%ccall.1), slice={[0:1024], [0:1024]}
%slice.3 = f32[1024,1024] slice(%ccall.1), slice={[1024:2048], [0:1024]}
%add.0 = f32[1024,1024] add(%slice.0, %slice.3)
%add.1 = f32[1024,1024] add(%slice.1, %slice.2)
%sub.0 = f32[1024,1024] subtract(%slice.0, %slice.2)
%sub.1 = f32[1024,1024] subtract(%slice.1, %slice.3)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%increment_iteration, %add.0, %add.1, %sub.0, %sub.1)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024,1024] parameter(2)
%param.3 = f32[1024,1024] parameter(3)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024,1024], f32[1024,1024])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
EXPECT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(), op::Parameter(0), op::Parameter(1),
AllOf(op::Shape("f32[2048,1024]"),
op::Concatenate(op::Parameter(2), op::Parameter(3)))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop), op::GetTupleElement(loop),
op::GetTupleElement(loop), op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
TEST_F(WhileLoopConcatCodeMotionTest, NonElementwiseOps) {
constexpr absl::string_view kHloModule = R"(
HloModule test
%cond {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%constant = s32[] constant(5)
ROOT result = pred[] compare(%gte.0, %constant), direction=LT
}
%sum {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(%a, %b)
}
%body {
%param = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1]) parameter(0)
%gte.0 = s32[] get-tuple-element(%param), index=0
%gte.1 = f32[1024,1024] get-tuple-element(%param), index=1
%gte.2 = f32[1024,1024] get-tuple-element(%param), index=2
%reshape.0 = f32[1,1024,1024] reshape(%gte.1)
%reshape.1 = f32[1,1024,1024] reshape(%gte.2)
%concat = f32[2,1024,1024] concatenate(%reshape.0, %reshape.1), dimensions={0}
%ccall = f32[2,1024,1024] custom-call(%concat), custom_call_target="test"
%slice.0 = f32[1,1024,1024] slice(%ccall), slice={[0:1], [0:1024], [0:1024]}
%slice.1 = f32[1,1024,1024] slice(%ccall), slice={[1:2], [0:1024], [0:1024]}
%reshape.2 = f32[1024,1024] reshape(%slice.0 )
%reshape.3 = f32[1024,1024] reshape(%slice.1)
%gte.3 = f32[1024] get-tuple-element(%param), index=3
%gte.4 = f32[1024] get-tuple-element(%param), index=4
%constant.0 = f32[] constant(0)
%reduce.0 = f32[1024] reduce(%reshape.0, %constant.0), to_apply=%sum, dimensions={0,1}
%reduce.1 = f32[1024] reduce(%reshape.1, %constant.0), to_apply=%sum, dimensions={0,1}
%add.0 = f32[1024] add(%reduce.0, %gte.3)
%add.1 = f32[1024] add(%reduce.1, %gte.4)
%br0 = f32[1024,1024] broadcast(%add.0), dimensions={1}
%br1 = f32[1024,1024] broadcast(%add.1), dimensions={1}
%sub.0 = f32[1024,1024] subtract(%reshape.2, %br0)
%sub.1 = f32[1024,1024] subtract(%reshape.3, %br1)
%gte.5 = f32[1] get-tuple-element(%param), index=5
%gte.6 = f32[1] get-tuple-element(%param), index=6
%reshape.4 = f32[] reshape(%gte.5)
%reshape.5 = f32[] reshape(%gte.6)
%br2 = f32[1024] broadcast(%reshape.4), dimensions={}
%br3 = f32[1024] broadcast(%reshape.5), dimensions={}
%add.2 = f32[1024] add(%add.0, %br2)
%add.3 = f32[1024] add(%add.1, %br3)
%inc0 = f32[] add(%constant.0, %reshape.4)
%inc1 = f32[] add(%constant.0, %reshape.5)
%reshape.6 = f32[1] reshape(%inc0)
%reshape.7 = f32[1] reshape(%inc1)
%constant = s32[] constant(1)
%increment_iteration = s32[] add(s32[] %gte.0, s32[] %constant)
ROOT %loop_result = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
tuple(%increment_iteration, %sub.0, %sub.1, %add.2, %add.3, %reshape.6, %reshape.7)
}
ENTRY test_main {
%param.0 = f32[1024,1024] parameter(0)
%param.1 = f32[1024,1024] parameter(1)
%param.2 = f32[1024] parameter(2)
%param.3 = f32[1024] parameter(3)
%param.4 = f32[1] parameter(4)
%param.5 = f32[1] parameter(5)
%constant.0 = s32[] constant(0)
%while_init = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
tuple(%constant.0, %param.0, %param.1, %param.2, %param.3, %param.4, %param.5)
ROOT %while = (s32[], f32[1024,1024], f32[1024,1024], f32[1024], f32[1024], f32[1], f32[1])
while(%while_init), condition=%cond, body=%body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloModule));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
WhileLoopConcatCodeMotion(2).Run(module.get()));
ASSERT_TRUE(changed);
VLOG(1) << module->ToString();
auto loop = op::While(
op::Tuple(op::Constant(),
AllOf(op::Shape("f32[2,1024,1024]"),
op::Concatenate(op::Reshape(op::Parameter(0)),
op::Reshape(op::Parameter(1)))),
AllOf(op::Shape("f32[2,1024]"),
op::Concatenate(op::Reshape(op::Parameter(2)),
op::Reshape(op::Parameter(3)))),
AllOf(op::Shape("f32[2]"),
op::Concatenate(op::Parameter(4), op::Parameter(5)))));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(loop),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Reshape(op::Slice(op::GetTupleElement(loop))),
op::Slice(op::GetTupleElement(loop)),
op::Slice(op::GetTupleElement(loop))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/while_loop_concat_code_motion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
603e6420-5efb-4b8e-9e71-70db41222d1d | cpp | tensorflow/tensorflow | cuda_kernel | third_party/xla/xla/stream_executor/cuda/cuda_kernel.cc | third_party/xla/xla/stream_executor/cuda/cuda_kernel_test.cc | #include "xla/stream_executor/cuda/cuda_kernel.h"
#include <cstddef>
#include <cstdint>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/launch_dim.h"
namespace stream_executor {
namespace gpu {
absl::StatusOr<int32_t> CudaKernel::GetMaxOccupiedBlocksPerCore(
ThreadDim threads, size_t dynamic_shared_memory_bytes) const {
int32_t threads_per_block = threads.x * threads.y * threads.z;
VLOG(3) << "Get kernel block occupancy: " << name()
<< "; threads_per_block: " << threads_per_block
<< "; dynamic_shared_memory_bytes: " << dynamic_shared_memory_bytes;
return GpuDriver::GetMaxOccupiedBlocksPerCore(
gpu_executor_->gpu_context(), gpu_function_, threads_per_block,
dynamic_shared_memory_bytes);
}
}
} | #include "xla/stream_executor/cuda/cuda_kernel.h"
#include <gtest/gtest.h>
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/stream_executor/cuda/cuda_runtime.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Ge;
using tsl::testing::IsOkAndHolds;
TEST(CudaKernelTest, GetMaxOccupiedBlocksPerCore) {
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("CUDA"));
TF_ASSERT_OK_AND_ASSIGN(StreamExecutor * executor,
platform->ExecutorForDevice(0));
GpuExecutor* gpu_executor = ExtractGpuExecutor(executor);
CudaKernel cuda_kernel(gpu_executor);
cuda_kernel.set_arity(3);
TF_ASSERT_OK_AND_ASSIGN(
CUfunction function,
CudaRuntime::GetFuncBySymbol(internal::GetAddI32Kernel()));
cuda_kernel.set_gpu_function(function);
EXPECT_EQ(cuda_kernel.Arity(), 3);
EXPECT_EQ(cuda_kernel.gpu_function(), function);
EXPECT_THAT(cuda_kernel.GetMaxOccupiedBlocksPerCore(
ThreadDim(1, 1, 1), 0),
IsOkAndHolds(Ge(1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/cuda/cuda_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1559345f-112d-4a23-82b6-d268bfab2164 | cpp | tensorflow/tensorflow | kernel_reuse_cache | third_party/xla/xla/service/gpu/kernel_reuse_cache.cc | third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc | #include "xla/service/gpu/kernel_reuse_cache.h"
#include <functional>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
std::string GetArgumentFingerprint(
absl::Span<const KernelArgument> kernel_arguments) {
return absl::StrJoin(
kernel_arguments, ",", [](std::string* s, const KernelArgument& arg) {
if (arg.first_with_same_slice().has_value()) {
absl::StrAppend(s, "=", arg.first_with_same_slice().value());
return;
}
absl::StrAppend(s, arg.alignment());
if (arg.aliased()) {
absl::StrAppend(s, "a");
}
if (arg.written()) {
absl::StrAppend(s, "w");
}
});
}
}
std::string GetComputationFingerprint(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator) {
auto print_options = HloPrintOptions::Fingerprint()
.set_print_only_essential_constants(false)
.set_print_operand_shape(false);
return absl::StrCat(discriminator, "(",
GetArgumentFingerprint(kernel_arguments), ")",
fused_computation->ToString(print_options));
}
absl::Status KernelReuseCache::Load(const CompilationCacheProto& proto) {
for (const auto& [name, entry] : proto.entries()) {
std::optional<se::ClusterDim> cluster_dim;
if (entry.has_cluster_dim()) {
cluster_dim =
se::ClusterDim{entry.cluster_dim().x(), entry.cluster_dim().y(),
entry.cluster_dim().z()};
}
TF_RET_CHECK(
cache_
.insert(
{entry.fingerprint(),
Entry{name,
LaunchDimensions{
entry.launch_dimensions().num_blocks(),
entry.launch_dimensions().num_threads_per_block()},
cluster_dim, entry.shmem_bytes(), entry.binary()}})
.second);
}
return absl::OkStatus();
}
CompilationCacheProto KernelReuseCache::Export() const {
CompilationCacheProto proto;
for (const auto& [fingerprint, cache_entry] : cache_) {
if (!hits_.contains(fingerprint)) {
VLOG(5) << "Not exporting unused " << cache_entry.kernel_name;
continue;
}
auto [it, inserted] = proto.mutable_entries()->emplace(
cache_entry.kernel_name, CompilationCacheEntryProto{});
CHECK(inserted) << cache_entry.kernel_name;
CompilationCacheEntryProto& proto_entry = it->second;
proto_entry.set_fingerprint(fingerprint);
LaunchDimensionsProto launch_dimensions_proto;
launch_dimensions_proto.set_num_blocks(
cache_entry.launch_dimensions.num_blocks());
launch_dimensions_proto.set_num_threads_per_block(
cache_entry.launch_dimensions.num_threads_per_block());
*proto_entry.mutable_launch_dimensions() = launch_dimensions_proto;
if (cache_entry.cluster_dim.has_value()) {
ClusterDimProto cluster_dim_proto;
cluster_dim_proto.set_x(cache_entry.cluster_dim->x);
cluster_dim_proto.set_y(cache_entry.cluster_dim->y);
cluster_dim_proto.set_z(cache_entry.cluster_dim->z);
*proto_entry.mutable_cluster_dim() = cluster_dim_proto;
}
proto_entry.set_shmem_bytes(cache_entry.shmem_bytes);
proto_entry.set_binary(cache_entry.binary);
}
return proto;
}
absl::Status UpdateDiskKernelCache(
absl::string_view path, const bool do_append,
const CompilationCacheProto& current_cache,
absl::Span<const KernelReuseCache::NamedBinary> binaries_to_cache) {
CompilationCacheProto disk_cache;
if (do_append) {
std::string serialized;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(),
std::string(path), &serialized));
if (!disk_cache.ParseFromString(std::string(serialized))) {
return Internal("Failed to parse serialized CompilationCacheProto.");
}
}
auto entries = disk_cache.mutable_entries();
int stored_kernel_count = 0;
for (const auto& [name, binary] : binaries_to_cache) {
auto it_current = current_cache.entries().find(name);
TF_RET_CHECK(it_current != current_cache.entries().end());
auto [it_disk, inserted] = entries->insert({name, it_current->second});
TF_RET_CHECK(inserted);
TF_RET_CHECK(!binary.empty());
it_disk->second.set_binary(reinterpret_cast<const char*>(binary.data()),
binary.size());
VLOG(5) << "Cached kernel: " << name << ": " << binary.size();
++stored_kernel_count;
}
if (stored_kernel_count > 0) {
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(tsl::Env::Default(),
std::string(path),
disk_cache.SerializeAsString()));
VLOG(2) << "Stored " << stored_kernel_count << " / "
<< binaries_to_cache.size() << " kernels in the cache file.";
}
return absl::OkStatus();
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
const HloComputation* fused_computation,
absl::Span<const KernelArgument> kernel_arguments,
absl::string_view discriminator,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
std::string fingerprint = GetComputationFingerprint(
fused_computation, kernel_arguments, discriminator);
VLOG(4) << "Fingerprint: ";
XLA_VLOG_LINES(4, fingerprint);
return GetWithStatus(std::move(fingerprint), generator);
}
std::pair<absl::StatusOr<const KernelReuseCache::Entry*>, bool>
KernelReuseCache::GetWithStatus(
std::string fingerprint,
const std::function<absl::StatusOr<KernelReuseCache::Entry>()>& generator) {
hits_.insert(fingerprint);
auto it = cache_.find(fingerprint);
if (it != cache_.end()) {
return {&it->second, true};
}
absl::StatusOr<Entry> entry = generator();
if (entry.ok()) {
it =
cache_.insert({std::move(fingerprint), std::move(entry.value())}).first;
return {&it->second, false};
}
return {entry.status(), false};
}
}
} | #include "xla/service/gpu/kernel_reuse_cache.h"
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "xla/service/gpu/executable.pb.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
namespace xla {
namespace gpu {
namespace {
using KernelReuseTest = ::testing::Test;
TEST_F(KernelReuseTest, ExportAndLoadWork) {
KernelReuseCache cache;
EXPECT_TRUE(cache.IsEmpty());
auto [result, was_cached] = cache.GetWithStatus(
"fingerprint", []() { return KernelReuseCache::Entry{}; });
TF_EXPECT_OK(result);
EXPECT_NE(result.value(), nullptr);
EXPECT_FALSE(was_cached);
EXPECT_FALSE(cache.IsEmpty());
const CompilationCacheProto proto = cache.Export();
cache.Clear();
EXPECT_TRUE(cache.IsEmpty());
TF_EXPECT_OK(cache.Load(proto));
EXPECT_FALSE(cache.IsEmpty());
}
TEST_F(KernelReuseTest, UpdatingDiskKernelCacheWorks) {
std::string cache_file_path;
CHECK(tsl::Env::Default()->LocalTempFilename(&cache_file_path));
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k1");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, false,
proto,
{{.name = "k1", .binary = {5, 6}}}));
}
{
const CompilationCacheProto proto = [](std::string kernel_name) {
KernelReuseCache cache;
auto [result, was_cached] = cache.GetWithStatus("fingerprint", [&]() {
return KernelReuseCache::Entry{.kernel_name = kernel_name};
});
return cache.Export();
}("k2");
TF_EXPECT_OK(UpdateDiskKernelCache(cache_file_path, true,
proto,
{{.name = "k2", .binary = {7, 8}}}));
}
std::string serialized;
TF_EXPECT_OK(
tsl::ReadFileToString(tsl::Env::Default(), cache_file_path, &serialized));
CompilationCacheProto proto;
EXPECT_TRUE(proto.ParseFromString(std::string(serialized)));
EXPECT_EQ(proto.entries_size(), 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernel_reuse_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4b58cfbf-7b21-47ea-86db-812d0b5e7490 | cpp | tensorflow/tensorflow | simple_philox | third_party/xla/xla/tsl/lib/random/simple_philox.cc | third_party/xla/xla/tsl/lib/random/simple_philox_test.cc | #include "xla/tsl/lib/random/simple_philox.h"
#include "xla/tsl/lib/random/exact_uniform_int.h"
#include "tsl/platform/logging.h"
namespace tsl {
namespace random {
uint32 SimplePhilox::Uniform(uint32 n) {
return ExactUniformInt<uint32>(n, [this]() { return Rand32(); });
}
uint64 SimplePhilox::Uniform64(uint64 n) {
return ExactUniformInt<uint64>(n, [this]() { return Rand64(); });
}
uint32 SimplePhilox::Skewed(int max_log) {
CHECK(0 <= max_log && max_log <= 32);
const int shift = Rand32() % (max_log + 1);
const uint32 mask = shift == 32 ? ~static_cast<uint32>(0) : (1 << shift) - 1;
return Rand32() & mask;
}
}
} | #include "xla/tsl/lib/random/simple_philox.h"
#include <set>
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace random {
namespace {
TEST(SimplePhiloxTest, FloatTest) {
PhiloxRandom philox(7, 7);
SimplePhilox gen(&philox);
static const int kIters = 1000000;
for (int i = 0; i < kIters; ++i) {
float f = gen.RandFloat();
EXPECT_LE(0.0f, f);
EXPECT_GT(1.0f, f);
}
for (int i = 0; i < kIters; ++i) {
double d = gen.RandDouble();
EXPECT_LE(0.0, d);
EXPECT_GT(1.0, d);
}
}
static void DifferenceTest(const char *names, SimplePhilox *gen1,
SimplePhilox *gen2) {
static const int kIters = 100;
bool different = false;
for (int i = 0; i < kIters; ++i) {
if (gen1->Rand32() != gen2->Rand32()) {
different = true;
break;
}
}
CHECK(different) << "different seeds but same output!";
}
TEST(SimplePhiloxTest, DifferenceTest) {
PhiloxRandom philox1(1, 1), philox2(17, 17);
SimplePhilox gen1(&philox1), gen2(&philox2);
DifferenceTest("SimplePhilox: different seeds", &gen1, &gen2);
}
TEST(SimplePhiloxTest, DifferenceTestCloseSeeds) {
PhiloxRandom philox1(1, 1), philox2(2, 1);
SimplePhilox gen1(&philox1), gen2(&philox2);
DifferenceTest("SimplePhilox: close seeds", &gen1, &gen2);
}
TEST(SimplePhiloxTest, Regression_CloseSeedsAreDifferent) {
const int kCount = 1000;
PhiloxRandom philox1(0, 1), philox2(1, 1);
SimplePhilox gen1(&philox1), gen2(&philox2);
std::set<uint32> first;
std::set<uint32> all;
for (int i = 0; i < kCount; ++i) {
uint32 v = gen1.Rand32();
first.insert(v);
all.insert(v);
all.insert(gen2.Rand32());
}
EXPECT_EQ(kCount, first.size());
EXPECT_EQ(2 * kCount, all.size());
}
TEST(SimplePhiloxTest, TestUniform) {
PhiloxRandom philox(17, 17);
SimplePhilox gen(&philox);
uint32 range = 3 * (1L << 29);
uint32 threshold = 1L << 30;
size_t count = 0;
static const int kTrials = 100000;
for (int i = 0; i < kTrials; ++i) {
uint32 rnd = gen.Uniform(range);
if (rnd < threshold) {
++count;
}
}
EXPECT_LT(fabs((threshold + 0.0) / range - (count + 0.0) / kTrials), 0.005);
}
TEST(SimplePhiloxTest, TestUniform64) {
PhiloxRandom philox(17, 17);
SimplePhilox gen(&philox);
uint64 range = 3 * (1LL << 59);
uint64 threshold = 1LL << 60;
size_t count = 0;
static const int kTrials = 100000;
for (int i = 0; i < kTrials; ++i) {
uint64 rnd = gen.Uniform64(range);
if (rnd < threshold) {
++count;
}
}
EXPECT_LT(fabs((threshold + 0.0) / range - (count + 0.0) / kTrials), 0.005);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/simple_philox.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/random/simple_philox_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ca03b724-c668-4b97-a31c-86e5d1ef4ee6 | cpp | google/quiche | settings_payload_decoder | quiche/http2/decoder/payload_decoders/settings_payload_decoder.cc | quiche/http2/decoder/payload_decoders/settings_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/settings_payload_decoder.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus SettingsPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "SettingsPayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::SETTINGS, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(0, frame_header.flags & ~(Http2FrameFlag::ACK));
if (frame_header.IsAck()) {
if (total_length == 0) {
state->listener()->OnSettingsAck(frame_header);
return DecodeStatus::kDecodeDone;
} else {
state->InitializeRemainders();
return state->ReportFrameSizeError();
}
} else {
state->InitializeRemainders();
state->listener()->OnSettingsStart(frame_header);
return StartDecodingSettings(state, db);
}
}
DecodeStatus SettingsPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "SettingsPayloadDecoder::ResumeDecodingPayload"
<< " remaining_payload=" << state->remaining_payload()
<< " db->Remaining=" << db->Remaining();
QUICHE_DCHECK_EQ(Http2FrameType::SETTINGS, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
DecodeStatus status =
state->ResumeDecodingStructureInPayload(&setting_fields_, db);
if (status == DecodeStatus::kDecodeDone) {
state->listener()->OnSetting(setting_fields_);
return StartDecodingSettings(state, db);
}
return HandleNotDone(state, db, status);
}
DecodeStatus SettingsPayloadDecoder::StartDecodingSettings(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "SettingsPayloadDecoder::StartDecodingSettings"
<< " remaining_payload=" << state->remaining_payload()
<< " db->Remaining=" << db->Remaining();
while (state->remaining_payload() > 0) {
DecodeStatus status =
state->StartDecodingStructureInPayload(&setting_fields_, db);
if (status == DecodeStatus::kDecodeDone) {
state->listener()->OnSetting(setting_fields_);
continue;
}
return HandleNotDone(state, db, status);
}
QUICHE_DVLOG(2) << "LEAVING SettingsPayloadDecoder::StartDecodingSettings"
<< "\n\tdb->Remaining=" << db->Remaining()
<< "\n\t remaining_payload=" << state->remaining_payload();
state->listener()->OnSettingsEnd();
return DecodeStatus::kDecodeDone;
}
DecodeStatus SettingsPayloadDecoder::HandleNotDone(FrameDecoderState* state,
DecodeBuffer* db,
DecodeStatus status) {
QUICHE_DCHECK(
(status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError && state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload()
<< "; db->Remaining=" << db->Remaining();
return status;
}
} | #include "quiche/http2/decoder/payload_decoders/settings_payload_decoder.h"
#include <stddef.h>
#include <vector>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_constants_test_util.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class SettingsPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::SETTINGS;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() {
return Http2FrameFlag::ACK;
}
};
namespace {
struct Listener : public FramePartsCollector {
void OnSettingsStart(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnSettingsStart: " << header;
EXPECT_EQ(Http2FrameType::SETTINGS, header.type) << header;
EXPECT_EQ(Http2FrameFlag(), header.flags) << header;
StartFrame(header)->OnSettingsStart(header);
}
void OnSetting(const Http2SettingFields& setting_fields) override {
QUICHE_VLOG(1) << "Http2SettingFields: setting_fields=" << setting_fields;
CurrentFrame()->OnSetting(setting_fields);
}
void OnSettingsEnd() override {
QUICHE_VLOG(1) << "OnSettingsEnd";
EndFrame()->OnSettingsEnd();
}
void OnSettingsAck(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnSettingsAck: " << header;
StartAndEndFrame(header)->OnSettingsAck(header);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class SettingsPayloadDecoderTest
: public AbstractPayloadDecoderTest<SettingsPayloadDecoder,
SettingsPayloadDecoderPeer, Listener> {
protected:
Http2SettingFields RandSettingsFields() {
Http2SettingFields fields;
test::Randomize(&fields, RandomPtr());
return fields;
}
};
TEST_F(SettingsPayloadDecoderTest, SettingsWrongSize) {
auto approve_size = [](size_t size) {
return 0 != (size % Http2SettingFields::EncodedSize());
};
Http2FrameBuilder fb;
fb.Append(RandSettingsFields());
fb.Append(RandSettingsFields());
fb.Append(RandSettingsFields());
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
TEST_F(SettingsPayloadDecoderTest, SettingsAkcWrongSize) {
auto approve_size = [](size_t size) { return size != 0; };
Http2FrameBuilder fb;
fb.Append(RandSettingsFields());
fb.Append(RandSettingsFields());
fb.Append(RandSettingsFields());
EXPECT_TRUE(VerifyDetectsFrameSizeError(Http2FrameFlag::ACK, fb.buffer(),
approve_size));
}
TEST_F(SettingsPayloadDecoderTest, SettingsAck) {
for (int stream_id = 0; stream_id < 3; ++stream_id) {
Http2FrameHeader header(0, Http2FrameType::SETTINGS,
RandFlags() | Http2FrameFlag::ACK, stream_id);
set_frame_header(header);
FrameParts expected(header);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays("", expected));
}
}
TEST_F(SettingsPayloadDecoderTest, OneRealSetting) {
std::vector<uint32_t> values = {0, 1, 0xffffffff, Random().Rand32()};
for (auto param : AllHttp2SettingsParameters()) {
for (uint32_t value : values) {
Http2SettingFields fields(param, value);
Http2FrameBuilder fb;
fb.Append(fields);
Http2FrameHeader header(fb.size(), Http2FrameType::SETTINGS, RandFlags(),
RandStreamId());
set_frame_header(header);
FrameParts expected(header);
expected.AppendSetting(fields);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
TEST_F(SettingsPayloadDecoderTest, ManySettings) {
const size_t num_settings = 100;
const size_t size = Http2SettingFields::EncodedSize() * num_settings;
Http2FrameHeader header(size, Http2FrameType::SETTINGS,
RandFlags(),
RandStreamId());
set_frame_header(header);
FrameParts expected(header);
Http2FrameBuilder fb;
for (size_t n = 0; n < num_settings; ++n) {
Http2SettingFields fields(static_cast<Http2SettingsParameter>(n),
Random().Rand32());
fb.Append(fields);
expected.AppendSetting(fields);
}
ASSERT_EQ(size, fb.size());
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/settings_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/settings_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
5a463dc2-4f5c-449a-811a-81e0b7980903 | cpp | tensorflow/tensorflow | dilate | tensorflow/lite/kernels/dilate.cc | tensorflow/lite/kernels/dilate_test.cc | #include <algorithm>
#include <array>
#include <cstdint>
#include <cstring>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dilate {
namespace {
constexpr size_t kMaxDilateDims = 6;
using Array = std::array<int32_t, kMaxDilateDims>;
void DilateImpl(const char* input, char* output,
const char* const padding_values, const int32_t size,
const int32_t* const shape, const int32_t* const input_strides,
const int32_t* const output_strides,
const int32_t* const output_element_sizes, size_t depth = 0) {
const int output_stride = output_strides[depth];
const int input_stride = input_strides[depth];
const int num_elts = shape[depth];
const int padding_size = output_stride - output_element_sizes[depth];
if (depth + 1 >= size) {
for (size_t i = 0; i + 1 < num_elts; ++i) {
std::memcpy(output, input, input_stride);
std::memcpy(output + input_stride, padding_values, padding_size);
input += input_stride;
output += output_stride;
}
std::memcpy(output, input, input_stride);
} else {
for (size_t i = 0; i + 1 < num_elts; ++i) {
DilateImpl(input, output, padding_values, size, shape, input_strides,
output_strides, output_element_sizes, depth + 1);
std::memcpy(output + output_element_sizes[depth], padding_values,
padding_size);
input += input_stride;
output += output_stride;
}
DilateImpl(input, output, padding_values, size, shape, input_strides,
output_strides, output_element_sizes, depth + 1);
}
}
class DilationRunner {
public:
DilationRunner(const TfLiteIntArray& shape, const int32_t* const dilations,
const char* padding_value, const int element_size)
: size_(shape.size), element_size_(element_size) {
static_assert(sizeof(shape.data[0]) == sizeof(Array::value_type),
"Don't use memcpy here if you change the Array type.");
std::memcpy(shape_.data(), shape.data, size_ * sizeof(shape.data[0]));
static_assert(sizeof(dilations[0]) == sizeof(Array::value_type),
"Don't use memcpy here if you change the Array type.");
std::memcpy(dilations_.data(), dilations, size_ * sizeof(dilations[0]));
MergeTrailingDilations();
ComputeInputStrides();
ComputeOutputStridesAndElementSizes();
FillPaddingValueBuffer(padding_value, element_size);
}
int size() const { return size_; }
int element_size() const { return element_size_; }
const char* padding_values() const { return padding_value_buffer_.data(); }
const Array& shape() const { return shape_; }
const Array& dilations() const { return dilations_; }
const Array& input_strides() const { return input_strides_; }
const Array& output_strides() const { return output_strides_; }
const Array& output_element_sizes() const { return output_element_sizes_; }
void Run(const char* const input, char* const output) const {
DilateImpl(input, output, padding_values(), size(), shape().data(),
input_strides().data(), output_strides().data(),
output_element_sizes().data());
}
private:
void MergeTrailingDilations() {
for (int i = size_ - 2; i >= 0; --i) {
if (dilations_[i + 1] == 1) {
element_size_ *= shape_[i + 1];
--size_;
} else {
break;
}
}
if (size_ == 1 && dilations_[0] == 1) {
element_size_ *= shape_[0];
shape_[0] = 1;
}
}
void ComputeInputStrides() {
input_strides_[size_ - 1] = element_size_;
for (int i = size_ - 2; i >= 0; --i) {
input_strides_[i] = shape_[i + 1] * input_strides_[i + 1];
}
}
void ComputeOutputStridesAndElementSizes() {
const int last = size_ - 1;
output_element_sizes_[last] = element_size_;
output_strides_[last] = dilations_[last] * output_element_sizes_[last];
for (int i = size_ - 2; i >= 0; --i) {
output_element_sizes_[i] = ((shape_[i + 1] - 1) * output_strides_[i + 1] +
output_element_sizes_[i + 1]);
output_strides_[i] = dilations_[i] * output_element_sizes_[i];
}
}
void FillPaddingValueBuffer(const char* padding_element,
const size_t padding_element_size) {
int first_dilated_idx = 0;
while (dilations_[first_dilated_idx] == 1 &&
first_dilated_idx + 1 < size_) {
++first_dilated_idx;
}
const size_t size = output_strides_[first_dilated_idx] -
output_element_sizes_[first_dilated_idx];
if (!size) {
return;
}
padding_value_buffer_.resize(size);
std::memcpy(padding_value_buffer_.data(), padding_element,
padding_element_size);
size_t sz = padding_element_size;
while (sz < size) {
const size_t bytes_to_copy = std::min(size - sz, sz);
std::memcpy(padding_value_buffer_.data() + sz,
padding_value_buffer_.data(), bytes_to_copy);
sz += bytes_to_copy;
}
}
Array shape_;
Array dilations_;
Array output_strides_;
Array output_element_sizes_;
Array input_strides_;
std::vector<char> padding_value_buffer_;
int size_;
int element_size_;
};
struct DilationContext {
enum InputTensorId { kInput, kDilations, kPaddingValue, kNumInputTensors };
enum OutputTensorId { kOutput, kNumOutputTensors };
DilationContext(TfLiteContext* context, TfLiteNode* node)
: context(context),
node(node),
input_tensor(GetInput(context, node, kInput)),
dilations_tensor(GetInput(context, node, kDilations)),
padding_value_tensor(GetInput(context, node, kPaddingValue)),
output_tensor(GetOutput(context, node, kOutput)) {}
TfLiteContext* context;
TfLiteNode* node;
const TfLiteTensor* input_tensor;
const TfLiteTensor* dilations_tensor;
const TfLiteTensor* padding_value_tensor;
TfLiteTensor* output_tensor;
};
int DilateDim(int dim, int dilate_factor) {
return (dim - 1) * dilate_factor + 1;
}
TfLiteStatus SetupOutputTensor(const DilationContext& ctx) {
const TfLiteIntArray& input_shape = *(ctx.input_tensor->dims);
const int32_t* dilations = ctx.dilations_tensor->data.i32;
IntArrayUniquePtr output_shape = BuildTfLiteArray(input_shape.size);
for (int i = 0; i < output_shape->size; ++i) {
output_shape->data[i] = DilateDim(input_shape.data[i], dilations[i]);
}
return ctx.context->ResizeTensor(ctx.context, ctx.output_tensor,
output_shape.release());
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node),
DilationContext::kNumInputTensors);
TF_LITE_ENSURE_EQ(context, NumOutputs(node),
DilationContext::kNumOutputTensors);
const DilationContext ctx(context, node);
TF_LITE_ENSURE(context, ctx.input_tensor->dims != nullptr);
TF_LITE_ENSURE(context, ctx.input_tensor->dims->size > 0);
TF_LITE_ENSURE(context, ctx.input_tensor->dims->size <= kMaxDilateDims);
TF_LITE_ENSURE_EQ(context, ctx.input_tensor->type, ctx.output_tensor->type);
TF_LITE_ENSURE_EQ(context, ctx.input_tensor->type,
ctx.padding_value_tensor->type);
if (!IsConstantTensor(ctx.dilations_tensor)) {
SetTensorToDynamic(ctx.output_tensor);
return kTfLiteOk;
}
return SetupOutputTensor(ctx);
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const DilationContext ctx(context, node);
TF_LITE_ENSURE_EQ(context, ctx.dilations_tensor->type, kTfLiteInt32);
TF_LITE_ENSURE(context, ctx.dilations_tensor->dims != nullptr);
TF_LITE_ENSURE_EQ(context, ctx.dilations_tensor->dims->size, 1);
TF_LITE_ENSURE_EQ(context, ctx.dilations_tensor->dims->data[0],
ctx.input_tensor->dims->size);
for (int i = 0; i < ctx.dilations_tensor->dims->size; ++i) {
TF_LITE_ENSURE(context, ctx.dilations_tensor->data.i32[i] >= 1);
}
if (!IsConstantTensor(ctx.dilations_tensor)) {
TF_LITE_ENSURE_OK(context, SetupOutputTensor(ctx));
}
size_t element_size;
TF_LITE_ENSURE_OK(
context, GetSizeOfType(context, ctx.input_tensor->type, &element_size));
const DilationRunner runner(
*ctx.input_tensor->dims, ctx.dilations_tensor->data.i32,
ctx.padding_value_tensor->data.raw_const, element_size);
runner.Run(ctx.input_tensor->data.raw_const, ctx.output_tensor->data.raw);
return kTfLiteOk;
}
}
}
TfLiteRegistration* Register_DILATE() {
static TfLiteRegistration r = {nullptr, nullptr,
dilate::Prepare,
dilate::Eval};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <numeric>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
namespace tflite {
namespace {
template <class T>
std::vector<T> DilateReference(const std::vector<T>& input,
const std::vector<int32_t>& shape,
const std::vector<int32_t>& dilations,
const T padding_value) {
constexpr int kMaxDilateDims = 6;
std::vector<int> output_shape(kMaxDilateDims, 0);
for (size_t i = 0; i < shape.size(); ++i) {
output_shape[i] = (shape[i] - 1) * dilations[i] + 1;
}
std::vector<int> strides(kMaxDilateDims, 0);
strides[shape.size() - 1] = 1;
for (size_t i = shape.size() - 1; i > 0; --i) {
strides[i - 1] = shape[i] * strides[i];
}
std::vector<int> output_strides(kMaxDilateDims, 0);
output_strides[shape.size() - 1] = 1;
for (size_t i = shape.size() - 1; i > 0; --i) {
output_strides[i - 1] = output_shape[i] * output_strides[i];
}
std::vector<int> safe_dilations(kMaxDilateDims, 0);
absl::c_copy(dilations, safe_dilations.begin());
std::vector<int> safe_input_shape(kMaxDilateDims, 0);
absl::c_copy(shape, safe_input_shape.begin());
std::vector<T> output(
std::accumulate(output_shape.begin(), output_shape.begin() + shape.size(),
1, std::multiplies<>()),
padding_value);
int a = 0;
do {
int b = 0;
do {
int c = 0;
do {
int d = 0;
do {
int e = 0;
do {
int f = 0;
do {
const int i_idx = a * strides[0] + b * strides[1] +
c * strides[2] + d * strides[3] +
e * strides[4] + f * strides[5];
const int o_idx = a * safe_dilations[0] * output_strides[0] +
b * safe_dilations[1] * output_strides[1] +
c * safe_dilations[2] * output_strides[2] +
d * safe_dilations[3] * output_strides[3] +
e * safe_dilations[4] * output_strides[4] +
f * safe_dilations[5] * output_strides[5];
output[o_idx] = input[i_idx];
} while (++f < safe_input_shape[5]);
} while (++e < safe_input_shape[4]);
} while (++d < safe_input_shape[3]);
} while (++c < safe_input_shape[2]);
} while (++b < safe_input_shape[1]);
} while (++a < safe_input_shape[0]);
return output;
}
template <class T>
struct TensorTypeFor;
#define TENSOR_TYPE_ASSOC(CPP_TYPE, TENSORTYPE_VALUE) \
template <> \
struct TensorTypeFor<CPP_TYPE> { \
static constexpr TensorType value = TENSORTYPE_VALUE; \
};
TENSOR_TYPE_ASSOC(int8_t, TensorType_INT8);
TENSOR_TYPE_ASSOC(int16_t, TensorType_INT16);
TENSOR_TYPE_ASSOC(int32_t, TensorType_INT32);
TENSOR_TYPE_ASSOC(int64_t, TensorType_INT64);
TENSOR_TYPE_ASSOC(uint8_t, TensorType_UINT8);
TENSOR_TYPE_ASSOC(uint16_t, TensorType_UINT16);
TENSOR_TYPE_ASSOC(uint32_t, TensorType_UINT32);
TENSOR_TYPE_ASSOC(uint64_t, TensorType_UINT64);
TENSOR_TYPE_ASSOC(float, TensorType_FLOAT32);
static_assert(sizeof(float) == 4, "float type is expected to be 32 bit long");
TENSOR_TYPE_ASSOC(double, TensorType_FLOAT64);
static_assert(sizeof(double) == 8, "double type is expected to be 64 bit long");
template <class T, bool IsDilationTensorConst>
class DilateOpModel : public SingleOpModel {
static constexpr TensorType kTensorType = TensorTypeFor<T>::value;
public:
void SetInput(absl::Span<const int32_t> shape,
absl::Span<const T> data = {}) {
input_shape_.assign(shape.begin(), shape.end());
if (data.empty()) {
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<int>()));
absl::c_iota(input_data_, 1);
} else {
input_data_.assign(data.begin(), data.end());
}
}
void SetDilations(absl::Span<const int32_t> dilations) {
dilations_shape_ = std::vector<int>(1, dilations.size());
dilations_data_.assign(dilations.begin(), dilations.end());
}
void SetPaddingValue(const T& val) { padding_value_data_ = val; }
void Build() {
input_ = AddInput({kTensorType, input_shape_});
if (IsDilationTensorConst) {
dilations_ = AddConstInput(TensorType_INT32, dilations_data_,
{static_cast<int>(dilations_data_.size())});
} else {
dilations_ = AddInput({TensorType_INT32, dilations_shape_});
}
padding_value_ = AddConstInput(kTensorType, &padding_value_data_, {1});
output_ = AddOutput(kTensorType);
SetBuiltinOp(BuiltinOperator_DILATE, BuiltinOptions2_DilateOptions,
CreateDilateOptions(builder_).Union());
BuildInterpreter({input_shape_});
PopulateTensor(input_, input_data_);
if (!IsDilationTensorConst) {
PopulateTensor(dilations_, dilations_data_);
}
}
TfLiteStatus BuildAndInvoke() {
Build();
return Invoke();
}
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_),
GetTensorSize(output_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape = *(interpreter_->tensor(output_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
const std::vector<T>& GetInput() const { return input_data_; }
const std::vector<int>& GetInputShape() const { return input_shape_; }
const std::vector<int>& GetDilations() const { return dilations_data_; }
const T& GetPaddingValue() const { return padding_value_data_; }
protected:
int input_ = -1;
int dilations_ = -1;
int padding_value_ = -1;
int output_ = -1;
std::vector<T> input_data_;
std::vector<int32_t> input_shape_;
std::vector<int32_t> dilations_data_;
std::vector<int32_t> dilations_shape_;
T padding_value_data_ = 0;
};
template <class Configuration>
class DilateTest;
template <class StorageType, class IsDilationTensorConst>
class DilateTest<testing::Types<StorageType, IsDilationTensorConst>>
: public testing::Test {
protected:
DilateOpModel<StorageType, IsDilationTensorConst::value> model_;
};
struct ConstantDilation : std::true_type {};
struct VariableDilation : std::false_type {};
using TestList = testing::Types<testing::Types<int8_t, ConstantDilation>,
testing::Types<int16_t, ConstantDilation>,
testing::Types<int32_t, ConstantDilation>,
testing::Types<int64_t, ConstantDilation>,
testing::Types<uint8_t, ConstantDilation>,
testing::Types<uint16_t, ConstantDilation>,
testing::Types<uint32_t, ConstantDilation>,
testing::Types<uint64_t, ConstantDilation>,
testing::Types<float, ConstantDilation>,
testing::Types<double, ConstantDilation>,
testing::Types<int8_t, VariableDilation>,
testing::Types<int16_t, VariableDilation>,
testing::Types<int32_t, VariableDilation>,
testing::Types<int64_t, VariableDilation>,
testing::Types<uint8_t, VariableDilation>,
testing::Types<uint16_t, VariableDilation>,
testing::Types<uint32_t, VariableDilation>,
testing::Types<uint64_t, VariableDilation>,
testing::Types<float, VariableDilation>,
testing::Types<double, VariableDilation>>;
TYPED_TEST_SUITE(DilateTest, TestList);
TYPED_TEST(DilateTest, DilationManualTest) {
this->model_.SetInput({2, 2});
this->model_.SetDilations({2, 3});
const std::vector<int> expected{
1, 0, 0, 2,
0, 0, 0, 0,
3, 0, 0, 4
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(3, 4));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, DilationManualTest2) {
this->model_.SetInput({2, 3});
this->model_.SetDilations({2, 3});
const std::vector<int> expected{
1, 0, 0, 2, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 5, 0, 0, 6
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(3, 7));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, DilationManualTest3) {
this->model_.SetInput({4, 2, 3});
this->model_.SetDilations({2, 3, 4});
const std::vector<int> expected{
1, 0, 0, 0, 2, 0, 0, 0, 3,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
4, 0, 0, 0, 5, 0, 0, 0, 6,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
7, 0, 0, 0, 8, 0, 0, 0, 9,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
10, 0, 0, 0, 11, 0, 0, 0, 12,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
13, 0, 0, 0, 14, 0, 0, 0, 15,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
16, 0, 0, 0, 17, 0, 0, 0, 18,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
19, 0, 0, 0, 20, 0, 0, 0, 21,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
22, 0, 0, 0, 23, 0, 0, 0, 24,
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(7, 4, 9));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, TrailingDilationOptimizationWorks) {
this->model_.SetInput({2, 2, 2, 2});
this->model_.SetDilations({2, 1, 1, 1});
const std::vector<int> expected{
1, 2, 3, 4, 5, 6, 7, 8,
0, 0, 0, 0, 0, 0, 0, 0,
9, 10, 11, 12, 13, 14, 15, 16
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(3, 2, 2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, TrailingDilationOptimizationDegenerateCaseWorks) {
this->model_.SetInput({2, 2, 2, 2});
this->model_.SetDilations({1, 1, 1, 1});
const std::vector<int> expected{
1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16
};
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2, 2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAreArray(expected));
}
TYPED_TEST(DilateTest, CheckAgainstReferenceImplementation) {
auto& model = this->model_;
model.SetInput({5, 4, 2});
model.SetDilations({2, 3, 5});
model.SetPaddingValue(-1);
const auto expected =
DilateReference(model.GetInput(), model.GetInputShape(),
model.GetDilations(), model.GetPaddingValue());
EXPECT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dilate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/dilate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a56bb79d-f735-498e-9b14-cd281b406b7b | cpp | tensorflow/tensorflow | hlo_traversal | third_party/xla/xla/service/gpu/hlo_traversal.cc | third_party/xla/xla/service/gpu/hlo_traversal_test.cc | #include "xla/service/gpu/hlo_traversal.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <queue>
#include <sstream>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/memory/memory.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
namespace gpu {
namespace {
template <typename F>
void ResolveUsers(const HloInstruction* value, const HloInstruction* user,
const HloFusionAdaptor& fusion_adaptor, F&& add_user) {
if (user->opcode() == HloOpcode::kTuple && user->IsRoot()) {
if (auto* fusion = user->parent()->FusionInstruction()) {
for (const auto* gte : fusion->users()) {
if (gte->opcode() != HloOpcode::kGetTupleElement) {
if (fusion_adaptor.ContainsInstruction(value)) {
add_user(gte);
}
continue;
}
for (const auto* gte_user : gte->users()) {
ResolveUsers(gte, gte_user, fusion_adaptor, add_user);
}
}
}
} else if (fusion_adaptor.ContainsInstruction(user) &&
user->opcode() == HloOpcode::kFusion) {
auto* param = user->fused_parameter(user->operand_index(value));
for (const auto* param_user : param->users()) {
add_user(param_user);
}
} else if (fusion_adaptor.ContainsInstruction(user)) {
add_user(user);
}
}
const HloInstruction* ResolveOperand(const HloInstruction* operand,
const HloFusionAdaptor& fusion_adaptor) {
if (operand->opcode() == HloOpcode::kGetTupleElement &&
operand->operand(0)->opcode() == HloOpcode::kFusion &&
operand->operand(0)->fused_expression_root()->opcode() ==
HloOpcode::kTuple &&
fusion_adaptor.ContainsInstruction(operand->operand(0))) {
return operand->operand(0)->fused_expression_root()->operand(
operand->tuple_index());
}
if (!fusion_adaptor.ContainsInstruction(operand)) {
return operand;
}
if (operand->opcode() == HloOpcode::kFusion) {
return operand->fused_expression_root();
}
if (operand->opcode() == HloOpcode::kParameter) {
if (auto* fusion = operand->parent()->FusionInstruction()) {
return ResolveOperand(fusion->operand(operand->parameter_number()),
fusion_adaptor);
}
}
return operand;
}
}
class SingleInstructionFusion : public internal::HloFusionInstructionAdaptor {
public:
explicit SingleInstructionFusion(const HloInstruction* instruction,
const HloFusionAdaptor* parent)
: instruction_(instruction), parent_(parent) {
CHECK_NE(instruction->opcode(), HloOpcode::kFusion)
<< "Use HloComputationFusion";
}
bool ContainsInstruction(const HloInstruction* instruction) const override {
return instruction == instruction_;
}
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override {
return {HloInstructionAdaptor{*instruction_, parent_}};
}
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override {
const auto& operands = instruction_->operands();
return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(),
operands.end());
}
const HloInstruction& FusionInstruction() const override {
return *instruction_;
}
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const override {
return {HloInstructionAdaptor{*instruction_, parent_}};
}
void ForEach(
const std::function<void(HloInstructionAdaptor)>& fn) const override {
fn(HloInstructionAdaptor{*instruction_, parent_});
}
std::string ToString() const override { return instruction_->ToString(); }
private:
const HloInstruction* instruction_;
const HloFusionAdaptor* parent_;
};
class HloComputationFusion : public internal::HloFusionInstructionAdaptor {
public:
explicit HloComputationFusion(const HloComputation* computation,
const HloFusionAdaptor* parent)
: computation_(computation), parent_(parent) {
CHECK(computation->IsFusionComputation());
roots_ = FindRoots(computation);
}
absl::InlinedVector<HloInstructionAdaptor, 2> FindRoots(
const HloComputation* computation) {
absl::InlinedVector<HloInstructionAdaptor, 2> roots;
std::function<void(const HloInstruction*)> get_roots;
get_roots = [&](const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kTuple) {
for (const auto* operand : instr->operands()) {
get_roots(operand);
}
} else {
HloInstructionAdaptor wrapped{*instr, parent_};
roots.push_back(wrapped);
}
};
get_roots(computation->root_instruction());
return roots;
}
bool ContainsInstruction(const HloInstruction* instruction) const override {
return instruction->parent() == computation_ ||
instruction == computation_->FusionInstruction();
}
absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override {
CHECK(!roots_.empty())
<< "No roots found in the computation. HloFusionAdaptor was likely "
"created for a non-fusion computation: "
<< computation_->ToString();
return roots_;
}
absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override {
const auto& operands = computation_->FusionInstruction()->operands();
return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(),
operands.end());
}
const HloInstruction& FusionInstruction() const override {
return *computation_->FusionInstruction();
}
absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder()
const override {
auto post_order = computation_->MakeInstructionPostOrder();
absl::InlinedVector<HloInstructionAdaptor, 2> result;
result.reserve(post_order.size() - computation_->num_parameters());
for (auto* instr : post_order) {
if (instr->opcode() == HloOpcode::kParameter ||
(instr->opcode() == HloOpcode::kTuple && instr->IsRoot())) {
continue;
}
result.emplace_back(*instr, parent_);
}
return result;
}
void ForEach(
const std::function<void(HloInstructionAdaptor)>& fn) const override {
for (const HloInstruction* instr : computation_->instructions()) {
if (instr->opcode() == HloOpcode::kParameter ||
instr->opcode() == HloOpcode::kTuple ||
instr->opcode() == HloOpcode::kGetTupleElement) {
continue;
}
fn(HloInstructionAdaptor{*instr, parent_});
}
}
std::string ToString() const override { return computation_->ToString(); }
private:
const HloComputation* computation_;
absl::InlinedVector<HloInstructionAdaptor, 2> roots_;
const HloFusionAdaptor* parent_;
};
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForInstruction(
const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
return ForComputation(instruction->fused_instructions_computation());
}
auto fusion_adaptor = absl::WrapUnique(new HloFusionAdaptor);
fusion_adaptor->AddInstruction(instruction);
return fusion_adaptor;
}
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer) {
auto fusion_adaptor = absl::WrapUnique(new HloFusionAdaptor);
fusion_adaptor->AddInstruction(producer);
fusion_adaptor->AddInstruction(consumer);
return fusion_adaptor;
}
std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForComputation(
const HloComputation* computation) {
auto fusion_adaptor = absl::WrapUnique(new HloFusionAdaptor);
fusion_adaptor->AddComputation(computation);
return fusion_adaptor;
}
bool HloFusionAdaptor::ContainsInstruction(
HloInstructionAdaptor instruction) const {
return ContainsInstruction(&instruction.instruction());
}
bool HloFusionAdaptor::ContainsInstruction(
const HloInstruction* instruction) const {
for (const auto& fusion_instruction : fusion_instructions_) {
if (fusion_instruction->ContainsInstruction(instruction)) return true;
}
return false;
}
absl::InlinedVector<HloInstructionAdaptor, 2> HloFusionAdaptor::GetRoots()
const {
auto roots = fusion_instructions_.back()->GetRoots();
if (fusion_instructions_.size() == 1) {
return roots;
}
CHECK_EQ(fusion_instructions_.size(), 2);
auto producer_roots = fusion_instructions_[0]->GetRoots();
const HloInstruction& producer_fusion =
fusion_instructions_[0]->FusionInstruction();
const HloInstruction& consumer_fusion =
fusion_instructions_.back()->FusionInstruction();
for (auto& root : roots) {
if (root.opcode() != HloOpcode::kParameter) {
continue;
}
const HloInstruction* operand =
consumer_fusion.operand(root.instruction().parameter_number());
int64_t root_index = 0;
if (operand->opcode() == HloOpcode::kGetTupleElement) {
root_index = operand->tuple_index();
operand = operand->operand(0);
}
if (operand == &producer_fusion) {
root = producer_roots[root_index];
}
}
if (!producer_fusion.IsMultiOutputFusion()) {
return roots;
}
absl::flat_hash_set<int64_t> root_indices_with_outside_usage;
for (HloInstruction* instr : producer_fusion.users()) {
bool has_outside_user = false;
int64_t root_index = 0;
if (instr->opcode() == HloOpcode::kGetTupleElement) {
for (HloInstruction* user : instr->users()) {
if (user != &consumer_fusion) {
root_index = instr->tuple_index();
has_outside_user = true;
break;
}
}
} else if (instr != &consumer_fusion) {
has_outside_user = true;
}
if (has_outside_user) {
root_indices_with_outside_usage.insert(root_index);
}
}
for (int64_t i = 0; i < producer_roots.size(); ++i) {
if (!root_indices_with_outside_usage.contains(i)) {
continue;
}
if (producer_roots[i].opcode() != HloOpcode::kParameter) {
roots.push_back(producer_roots[i]);
}
}
return roots;
}
absl::InlinedVector<const HloInstruction*, 2> HloFusionAdaptor::GetParameters()
const {
if (fusion_instructions_.size() == 1) {
return fusion_instructions_.back()->GetParameters();
}
CHECK_EQ(fusion_instructions_.size(), 2);
absl::InlinedVector<const HloInstruction*, 2> combined_parameters;
const HloInstruction& producer_fusion =
fusion_instructions_[0]->FusionInstruction();
for (const auto& param : fusion_instructions_.back()->GetParameters()) {
const HloInstruction* operand = param;
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->operand(0);
}
if (operand != &producer_fusion) {
combined_parameters.push_back(param);
}
}
absl::flat_hash_set<const HloInstruction*> params(combined_parameters.begin(),
combined_parameters.end());
auto producer_roots = fusion_instructions_[0]->GetRoots();
absl::flat_hash_set<const HloInstruction*> parameters_to_skip;
for (const auto& root : producer_roots) {
if (root.opcode() == HloOpcode::kParameter) {
if (&root.instruction() == &producer_fusion) {
parameters_to_skip.insert(&producer_fusion);
} else if (root.instruction().user_count() <= 1) {
parameters_to_skip.insert(
producer_fusion.operand(root.instruction().parameter_number()));
}
}
}
for (auto param : fusion_instructions_[0]->GetParameters()) {
if (!parameters_to_skip.contains(param) && params.insert(param).second) {
combined_parameters.push_back(param);
}
}
return combined_parameters;
}
absl::InlinedVector<HloInstructionAdaptor, 2>
HloFusionAdaptor::MakeInstructionPostOrder() const {
absl::InlinedVector<HloInstructionAdaptor, 2> result_post_order;
for (const auto& fusion_instruction : fusion_instructions_) {
absl::c_move(fusion_instruction->MakeInstructionPostOrder(),
std::back_inserter(result_post_order));
}
return result_post_order;
}
void HloFusionAdaptor::ForEach(
const std::function<void(HloInstructionAdaptor)>& fn) const {
for (const auto& fusion_instruction : fusion_instructions_) {
fusion_instruction->ForEach(fn);
}
}
std::string HloFusionAdaptor::ToString() const {
std::ostringstream ss;
for (const auto& fusion_instruction : fusion_instructions_) {
ss << fusion_instruction->ToString() << "\n";
}
return ss.str();
}
void HloFusionAdaptor::AddInstruction(const HloInstruction* instruction) {
if (instruction->opcode() == HloOpcode::kFusion) {
AddComputation(instruction->fused_instructions_computation());
} else {
fusion_instructions_.push_back(
std::make_unique<SingleInstructionFusion>(instruction, this));
}
}
void HloFusionAdaptor::AddComputation(const HloComputation* computation) {
fusion_instructions_.push_back(
std::make_unique<HloComputationFusion>(computation, this));
}
absl::InlinedVector<HloInstructionAdaptor, 2>
HloInstructionAdaptor::GetOperands() const {
absl::InlinedVector<HloInstructionAdaptor, 2> operands;
if (instruction_->opcode() == HloOpcode::kParameter) {
auto operand = ResolveOperand(instruction_, *parent_);
if (operand != instruction_) {
operands.emplace_back(*operand, parent_);
}
} else {
for (const auto* operand : instruction_->operands()) {
operands.emplace_back(*ResolveOperand(operand, *parent_), parent_);
}
}
return operands;
}
HloInstructionAdaptor::HloInstructionAdaptor(const HloInstruction& instruction,
const HloFusionAdaptor* parent)
: instruction_(&instruction), parent_(parent) {
CHECK_NE(parent, nullptr) << "Parent fusion adaptor must not be null";
}
HloInstructionAdaptor HloInstructionAdaptor::GetOperand(int index) const {
return HloInstructionAdaptor{
*ResolveOperand(instruction_->operand(index), *parent_), parent_};
}
absl::InlinedVector<HloInstructionAdaptor, 2> HloInstructionAdaptor::GetUsers()
const {
absl::InlinedVector<HloInstructionAdaptor, 2> users;
auto add_user = [&](const HloInstruction* instr) {
users.emplace_back(*instr, parent_);
};
if (instruction_->IsRoot()) {
if (auto* fusion = instruction_->parent()->FusionInstruction()) {
for (auto* user : fusion->users()) {
ResolveUsers(fusion, user, *parent_, add_user);
}
}
}
for (auto* user : instruction_->users()) {
ResolveUsers(instruction_, user, *parent_, add_user);
}
return users;
}
bool operator==(const HloInstructionAdaptor& lhs,
const HloInstructionAdaptor& rhs) {
return lhs.instruction_->GetModule() == rhs.instruction_->GetModule() &&
lhs.instruction_->unique_id() == rhs.instruction_->unique_id();
}
bool operator!=(const HloInstructionAdaptor& lhs,
const HloInstructionAdaptor& rhs) {
return !(lhs == rhs);
}
namespace {
void HloBfsTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node,
bool visit_operands) {
absl::flat_hash_set<HloInstructionAdaptor> visited;
std::queue<HloInstructionAdaptor> q;
auto enqueue = [&](const HloInstructionAdaptor& node) {
const auto& adjacent_nodes =
visit_operands ? node.GetOperands() : node.GetUsers();
for (const auto& node : adjacent_nodes) {
if (fusion.ContainsInstruction(node) && visited.insert(node).second) {
q.push(node);
}
}
};
for (auto root : roots) {
if (visited.insert(root).second) {
q.push(root);
}
}
while (!q.empty()) {
HloInstructionAdaptor node = q.front();
q.pop();
switch (visit_node(node)) {
case TraversalResult::kAdvance:
enqueue(node);
break;
case TraversalResult::kInterrupt:
return;
case TraversalResult::kSkip:
break;
}
}
}
}
void HloBfsConsumersFirstTraversal(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node) {
HloBfsTraversal(roots, fusion, visit_node,
true);
}
void HloBfsProducersFirstTraversal(
absl::Span<const HloInstructionAdaptor> producers,
const HloFusionAdaptor& fusion,
const std::function<TraversalResult(HloInstructionAdaptor node)>&
visit_node) {
HloBfsTraversal(producers, fusion, visit_node,
false);
}
bool HloBfsAnyOf(absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands) {
return HloBfsFindIf(roots, fusion, visit, visit_operands).has_value();
}
bool HloBfsAnyOf(absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
return HloBfsFindIf(roots, visit, visit_operands).has_value();
}
std::optional<HloInstructionAdaptor> HloBfsFindIf(
absl::Span<const HloInstructionAdaptor> roots,
const HloFusionAdaptor& fusion,
const std::function<bool(HloInstructionAdaptor node)>& visit,
bool visit_operands) {
std::optional<HloInstructionAdaptor> result = std::nullopt;
HloBfsTraversal(
roots, fusion,
[&](HloInstructionAdaptor node) {
if (visit(node)) {
result = node;
return TraversalResult::kInterrupt;
}
return TraversalResult::kAdvance;
},
visit_operands);
return result;
}
std::vector<const HloInstruction*> HloFindAllImpl(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands, bool find_first_only = false) {
std::vector<const HloInstruction*> result;
absl::flat_hash_set<const HloInstruction*> visited;
std::queue<const HloInstruction*> q;
auto enqueue = [&](const HloInstruction* node) {
if (visit_operands) {
for (const HloInstruction* operand : node->operands()) {
if (visited.insert(operand).second) {
q.push(operand);
}
}
} else {
for (const HloInstruction* operand : node->users()) {
if (visited.insert(operand).second) {
q.push(operand);
}
}
}
};
for (auto root : roots) {
if (visited.insert(root).second) {
q.push(root);
}
}
while (!q.empty()) {
const HloInstruction* node = q.front();
q.pop();
if (visit(node)) {
result.push_back(node);
if (find_first_only) {
return result;
}
}
enqueue(node);
}
return result;
}
std::optional<const HloInstruction*> HloBfsFindIf(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
auto result = HloFindAllImpl(roots, visit, visit_operands,
true);
if (result.empty()) {
return std::nullopt;
}
return result[0];
}
std::vector<const HloInstruction*> HloBfsFindAll(
absl::Span<const HloInstruction* const> roots,
const std::function<bool(const HloInstruction* node)>& visit,
bool visit_operands) {
std::vector<const HloInstruction*> result;
return HloFindAllImpl(roots, visit, visit_operands);
}
std::vector<HloInstructionAdaptor> HloFindUseChain(HloInstructionAdaptor parent,
HloInstructionAdaptor root) {
absl::flat_hash_set<HloInstructionAdaptor> visited;
std::vector<HloInstructionAdaptor> result;
std::function<bool(HloInstructionAdaptor)> visit;
visit = [&](HloInstructionAdaptor node) {
if (node == root) return true;
for (const auto& user : node.GetUsers()) {
if (visited.insert(user).second && visit(user)) {
result.push_back(user);
return true;
}
}
return false;
};
if (visit(parent)) {
result.push_back(parent);
std::reverse(result.begin(), result.end());
} else {
result.clear();
}
return result;
}
}
} | #include "xla/service/gpu/hlo_traversal.h"
#include <optional>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::ElementsAre;
using ::testing::IsEmpty;
MATCHER_P(InstructionAdaptorName, name, "") { return arg.name() == name; }
class HloTraversalTest : public HloTestBase {};
const char kTestModule[] = R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_1 {
p0.2 = f32[] parameter(0)
zero = f32[] constant(0.0)
is_positive = pred[] compare(p0.2, zero), direction=GE
not = pred[] not(is_positive)
ROOT tuple = (pred[], pred[]) tuple(is_positive, not)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
log = f32[128] log(sum)
negate = f32[128] negate(log)
fusion = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation
fusion2 = (pred[], pred[]) fusion(fusion), kind=kLoop, calls=fused_computation_1
gte = pred[] get-tuple-element(fusion2), index=0
ROOT select = f32[] select(gte, fusion, p0)
})";
TEST_F(HloTraversalTest, AdaptorOperands) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion2"),
module->entry_computation()->GetInstructionWithName("select"));
HloInstructionAdaptor instr = fusion_adaptor->GetRoots()[0];
EXPECT_THAT(instr.GetOperands(),
ElementsAre(InstructionAdaptorName("is_positive"),
InstructionAdaptorName("fusion"),
InstructionAdaptorName("p0")));
}
TEST_F(HloTraversalTest, AdaptorUsers) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation {
p0 = f32[] parameter(0)
neg = f32[] negate(p0)
add = f32[] add(p0, neg)
ROOT t = (f32[], f32[]) tuple(neg, add)
}
fused_computation_1 {
p0.0 = f32[] parameter(0)
mul = f32[] multiply(p0.0, p0.0)
ROOT neg.1 = f32[] negate(mul)
}
ENTRY entry {
p0 = f32[] parameter(0)
fusion = (f32[], f32[]) fusion(p0), kind=kLoop, calls=fused_computation
gte = f32[] get-tuple-element(fusion), index=0
add.1 = f32[] add(p0, gte)
fusion2 = f32[] fusion(gte), kind=kLoop, calls=fused_computation_1
exp.1 = f32[] exponential(fusion2)
ROOT res = (f32[], (f32[], f32[]), f32[], f32[]) tuple(add.1, fusion, fusion2, exp.1)
}
)")
.value();
auto fusion_adaptor1 = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion"),
module->entry_computation()->GetInstructionWithName("fusion2"));
HloInstructionAdaptor add{*module->GetComputationWithName("fused_computation")
->GetInstructionWithName("add"),
fusion_adaptor1.get()};
EXPECT_THAT(add.GetUsers(), ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("res")));
auto fusion_adaptor2 = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion2"));
HloInstructionAdaptor mul{
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("mul"),
fusion_adaptor2.get()};
EXPECT_THAT(mul.GetUsers(), ElementsAre(InstructionAdaptorName("neg.1")));
HloInstructionAdaptor neg{
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("neg.1"),
fusion_adaptor2.get()};
EXPECT_TRUE(neg.GetUsers().empty());
}
TEST_F(HloTraversalTest, TraverseFusionConsumerFirst) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest,
TraverseFusionConsumerFirstFromFusionRootAndInnerNode) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto root = fusion->GetRoots()[0];
HloBfsConsumersFirstTraversal({root, root.GetOperand(0)}, *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, TraverseFusionProducerFirst) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
std::vector<std::string> visited_nodes;
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto root = fusion->GetRoots()[0];
HloBfsProducersFirstTraversal({root.GetOperand(0)}, *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(visited_nodes, ElementsAre("mul", "reduce.1"));
}
TEST_F(HloTraversalTest, AbortTraversal) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> visited_nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
visited_nodes.emplace_back(node.name());
return node.opcode() == HloOpcode::kReduce
? TraversalResult::kAdvance
: TraversalResult::kInterrupt;
});
EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, FindArguments) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> producers;
absl::c_for_each(fusion->GetParameters(),
[&](const HloInstruction* producer) {
producers.emplace_back(producer->name());
});
EXPECT_THAT(producers, ElementsAre("p0", "negate"));
}
TEST_F(HloTraversalTest, FindArgumentsAfterFusion) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("negate"),
module->entry_computation()->GetInstructionWithName("fusion"));
std::vector<std::string> producers;
absl::c_for_each(fusion->GetParameters(),
[&](const HloInstruction* producer) {
producers.emplace_back(producer->name());
});
EXPECT_THAT(producers, ElementsAre("p0", "log"));
}
TEST_F(HloTraversalTest, HloBfsFindIf_Found) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto result = HloBfsFindIf(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
return node.opcode() == HloOpcode::kMultiply;
});
ASSERT_NE(result, std::nullopt);
ASSERT_EQ(result->name(), "mul");
}
TEST_F(HloTraversalTest, HloBfsFindIf_NotFound) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
auto result = HloBfsFindIf(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) { return false; });
ASSERT_EQ(result, std::nullopt);
}
TEST_F(HloTraversalTest, HloAnyOf_Found) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
EXPECT_TRUE(HloAnyOf(*fusion, [&](HloInstructionAdaptor node) {
return node.opcode() == HloOpcode::kMultiply;
}));
}
TEST_F(HloTraversalTest, HloAnyOf_NotFound) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
EXPECT_FALSE(
HloAnyOf(*fusion, [&](HloInstructionAdaptor node) { return false; }));
}
TEST_F(HloTraversalTest, FindAllMultiple) {
const char kConverts[] = R"(
HloModule test
ENTRY entry {
p0 = s8[128] parameter(0)
p1 = pred[128] parameter(1)
p1c = s8[128] convert(p1)
p1c1 = f16[128] convert(p1c)
p0c = f16[128] convert(p0)
ROOT diff = f16[128] subtract(p0c, p1c1)
})";
auto module = ParseAndReturnVerifiedModule(kConverts).value();
auto root = module->entry_computation()->GetInstructionWithName("diff");
std::vector<const HloInstruction*> converts =
HloBfsFindAll({root}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kConvert;
});
auto get = [&](absl::string_view name) {
return module->entry_computation()->GetInstructionWithName(name);
};
EXPECT_THAT(converts, ElementsAre(get("p0c"), get("p1c1"), get("p1c")));
}
TEST_F(HloTraversalTest, FindAllNotFound) {
const char kConverts[] = R"(
HloModule test
ENTRY entry {
p0 = s8[128] parameter(0)
p1 = f16[128] parameter(1)
p0c = f16[128] convert(p0)
ROOT diff = f16[128] subtract(p0c, p1)
})";
auto module = ParseAndReturnVerifiedModule(kConverts).value();
auto root = module->entry_computation()->GetInstructionWithName("diff");
std::vector<const HloInstruction*> converts =
HloBfsFindAll({root}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kAdd;
});
EXPECT_THAT(converts, IsEmpty());
}
const char kTwoFusions[] = R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation_1 {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
}
fused_computation_2 {
p0.2 = f32[] parameter(0)
p1.2 = f32[128] parameter(1)
ROOT reduce.2 = f32[] reduce(p1.2, p0.2), dimensions={0}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
negate = f32[128] negate(sum)
fusion.1 = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[] fusion(fusion.1, negate), kind=kLoop, calls=fused_computation_2
ROOT difference = f32[] subtract(fusion.2, p0)
})";
TEST_F(HloTraversalTest, FuseFusionConsumer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer = module->entry_computation()->GetInstructionWithName("negate");
auto consumer =
module->entry_computation()->GetInstructionWithName("fusion.1");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
HloInstructionAdaptor reduce_1(
*module->GetComputationWithName("fused_computation_1")
->GetInstructionWithName("reduce.1"),
fusion.get());
EXPECT_TRUE(reduce_1.GetUsers().empty());
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul", "negate"));
}
TEST_F(HloTraversalTest, FuseFusionProducer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("fusion.2");
auto consumer =
module->entry_computation()->GetInstructionWithName("difference");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
HloInstructionAdaptor reduce_2(
*module->GetComputationWithName("fused_computation_2")
->GetInstructionWithName("reduce.2"),
fusion.get());
EXPECT_THAT(reduce_2.GetOperands(),
ElementsAre(InstructionAdaptorName("negate"),
InstructionAdaptorName("fusion.1")));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("difference", "reduce.2"));
}
TEST_F(HloTraversalTest, FuseFusionConsumerAndProducer) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("fusion.1");
auto consumer =
module->entry_computation()->GetInstructionWithName("fusion.2");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
std::vector<std::string> params;
absl::c_for_each(fusion->GetParameters(), [&](const HloInstruction* param) {
params.emplace_back(param->name());
});
EXPECT_THAT(nodes, ElementsAre("reduce.2", "reduce.1", "mul"));
EXPECT_THAT(params, ElementsAre("negate", "p0"));
}
TEST_F(HloTraversalTest, FuseNonFusionConsumerAndProducer) {
auto module = ParseAndReturnVerifiedModule(kTestModule).value();
auto producer = module->entry_computation()->GetInstructionWithName("log");
auto consumer = module->entry_computation()->GetInstructionWithName("negate");
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("negate", "log"));
}
TEST_F(HloTraversalTest, SingleInstructionFusionOfFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion.1"));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul"));
}
TEST_F(HloTraversalTest, SingleInstructionFusionOfInstruction) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("negate"));
std::vector<std::string> nodes;
HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion,
[&](HloInstructionAdaptor node) {
nodes.emplace_back(node.name());
return TraversalResult::kAdvance;
});
EXPECT_THAT(nodes, ElementsAre("negate"));
}
TEST_F(HloTraversalTest, MultiOutputFusionDuplicateRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
fused_computation {
p0.1 = f32[128] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p0.1, p1.1)
ROOT res = (f32[128], f32[128]) tuple(mul, mul)
}
ENTRY entry {
p0 = f32[128] parameter(0)
p1 = f32[128] parameter(1)
ROOT fusion = (f32[128], f32[128]) fusion(p0, p1), kind=kLoop, calls=fused_computation
})")
.value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("fusion"));
EXPECT_THAT(fusion->GetRoots(), ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("mul")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_SingleInstruction) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForInstruction(
module->entry_computation()->GetInstructionWithName("negate"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("negate")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoFusions) {
auto module = ParseAndReturnVerifiedModule(kTwoFusions).value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion.1"),
module->entry_computation()->GetInstructionWithName("fusion.2"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("reduce.1"),
InstructionAdaptorName("reduce.2")));
}
TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoMultiOutputFusions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
fused_computation_1 {
p0.1 = f32[] parameter(0)
p1.1 = f32[128] parameter(1)
mul = f32[128] multiply(p1.1, p1.1)
reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation
ROOT t = (f32[128], f32[]) tuple(mul, reduce.1)
}
fused_computation_2 {
p0.2 = f32[] parameter(0)
p1.2 = f32[128] parameter(1)
neg = f32[128] negate(p1.2)
reduce.2 = f32[] reduce(neg, p0.2), dimensions={0}, to_apply=scalar_add_computation
ROOT t2 = (f32[], f32[128]) tuple(reduce.2, neg)
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128] parameter(1)
sum = f32[128] add(p1, p1)
negate = f32[128] negate(sum)
fusion.1 = (f32[128], f32[]) fusion(p0, negate), kind=kLoop, calls=fused_computation_1
gte1 = f32[128] get-tuple-element(fusion.1), index=0
gte2 = f32[] get-tuple-element(fusion.1), index=1
fusion.2 = (f32[], f32[128]) fusion(p0, gte1), kind=kLoop, calls=fused_computation_2
gte3 = f32[] get-tuple-element(fusion.2), index=0
gte4 = f32[128] get-tuple-element(fusion.2), index=1
difference = f32[] subtract(gte3, p0)
ROOT res = (f32[], f32[128]) tuple(difference, gte4)
})")
.value();
auto fusion = HloFusionAdaptor::ForProducerConsumer(
module->entry_computation()->GetInstructionWithName("fusion.1"),
module->entry_computation()->GetInstructionWithName("fusion.2"));
auto nodes = fusion->MakeInstructionPostOrder();
EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"),
InstructionAdaptorName("reduce.1"),
InstructionAdaptorName("neg"),
InstructionAdaptorName("reduce.2")));
}
const char kTwoMultiOutputFusions[] = R"(
HloModule mof
mof_producer {
param0 = f32[10]{0} parameter(0)
param1 = f32[10]{0} parameter(1)
param2 = f32[10]{0} parameter(2)
add = f32[10]{0} add(param0, param1)
sub = f32[10]{0} subtract(param0, param1)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(param1, add, sub, param0, param2)
}
mof_consumer {
param0.0 = f32[10]{0} parameter(0)
param1.0 = f32[10]{0} parameter(1)
param2.0 = f32[10]{0} parameter(2)
mul = f32[10]{0} multiply(param0.0, param1.0)
div = f32[10]{0} divide(param0.0, param1.0)
ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param2.0)
}
ENTRY main {
p0 = f32[10]{0} parameter(0)
p1 = f32[10]{0} parameter(1)
p2 = f32[10]{0} parameter(2)
producer = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(p0, p1, p2), kind=kLoop, calls=mof_producer
gte0 = f32[10]{0} get-tuple-element(producer), index=0
gte1 = f32[10]{0} get-tuple-element(producer), index=1
gte2 = f32[10]{0} get-tuple-element(producer), index=2
gte3 = f32[10]{0} get-tuple-element(producer), index=3
gte4 = f32[10]{0} get-tuple-element(producer), index=4
consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2, gte3), kind=kLoop, calls=mof_consumer
gte5 = f32[10]{0} get-tuple-element(consumer), index=0
gte6 = f32[10]{0} get-tuple-element(consumer), index=1
gte7 = f32[10]{0} get-tuple-element(consumer), index=2
ROOT res = tuple(gte0, gte1, gte3, gte4, gte5, gte6, gte7)
})";
TEST_F(HloTraversalTest, GetParametersMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value();
auto producer =
module->entry_computation()->GetInstructionWithName("producer");
auto consumer =
module->entry_computation()->GetInstructionWithName("consumer");
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(producer, consumer);
auto p0 = module->entry_computation()->GetInstructionWithName("p0");
auto p1 = module->entry_computation()->GetInstructionWithName("p1");
EXPECT_THAT(fusion_adaptor->GetParameters(), ElementsAre(p0, p1));
consumer->MergeFusionInstructionIntoMultiOutput(producer);
EXPECT_THAT(consumer->operands(), ElementsAre(p0, p1));
}
TEST_F(HloTraversalTest, GetRootsMultiOutputFusion) {
auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value();
auto consumer_fusion_instr =
module->entry_computation()->GetInstructionWithName("consumer");
auto producer_fusion_instr =
module->entry_computation()->GetInstructionWithName("producer");
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(
producer_fusion_instr, consumer_fusion_instr);
auto producer_computation = module->GetComputationWithName("mof_producer");
auto producer = HloFusionAdaptor::ForComputation(producer_computation);
auto consumer_computation = module->GetComputationWithName("mof_consumer");
auto consumer = HloFusionAdaptor::ForComputation(consumer_computation);
EXPECT_THAT(fusion_adaptor->GetRoots(),
ElementsAre(
HloInstructionAdaptor{
*consumer_computation->GetInstructionWithName("mul"),
consumer.get()},
HloInstructionAdaptor{
*consumer_computation->GetInstructionWithName("div"),
consumer.get()},
HloInstructionAdaptor{
*producer_computation->GetInstructionWithName("param0"),
producer.get()},
HloInstructionAdaptor{
*producer_computation->GetInstructionWithName("add"),
producer.get()}));
consumer_fusion_instr->MergeFusionInstructionIntoMultiOutput(
producer_fusion_instr);
EXPECT_THAT(consumer_fusion_instr->fused_expression_root(),
GmockMatch(m::Tuple(
m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Divide(m::Add(m::Parameter(0), m::Parameter(1)),
m::Subtract(m::Parameter(0), m::Parameter(1))),
m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1)))));
}
TEST_F(HloTraversalTest, HloFindUseChain) {
auto module = ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
negate = f32[] negate(p0)
log = f32[] log(p0)
sum = f32[] add(p0, log)
exp = f32[] exponential(p1)
ROOT call = f32[] custom-call(negate, exp, sum), custom_call_target="it"
}
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT fusion = f32[] fusion(p0, p1), kind=kLoop, calls=fusion
}
)")
.value();
auto* fusion_computation = module->GetComputationWithName("fusion");
auto fusion = HloFusionAdaptor::ForComputation(fusion_computation);
auto get = [&](absl::string_view name) {
return HloInstructionAdaptor{
*fusion_computation->GetInstructionWithName(name), fusion.get()};
};
auto p0 = get("p0");
auto p1 = get("p1");
auto log = get("log");
auto sum = get("sum");
auto negate = get("negate");
auto exp = get("exp");
auto call = get("call");
EXPECT_THAT(HloFindUseChain(p0, p0), ElementsAre(p0));
EXPECT_THAT(HloFindUseChain(p0, p1), IsEmpty());
EXPECT_THAT(HloFindUseChain(p0, call), ElementsAre(p0, negate, call));
EXPECT_THAT(HloFindUseChain(p0, sum), ElementsAre(p0, log, sum));
EXPECT_THAT(HloFindUseChain(p1, exp), ElementsAre(p1, exp));
EXPECT_THAT(HloFindUseChain(negate, exp), IsEmpty());
EXPECT_THAT(HloFindUseChain(call, p0), IsEmpty());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_traversal.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_traversal_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2ff66a8f-ddcf-4b7d-80b1-e7448a66996e | cpp | tensorflow/tensorflow | kernel_def_builder | tensorflow/core/framework/kernel_def_builder.cc | tensorflow/core/framework/kernel_def_builder_test.cc | #include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
namespace tensorflow {
KernelDefBuilder::KernelDefBuilder(const char* op_name) {
kernel_def_ = new KernelDef;
kernel_def_->set_op(op_name);
}
KernelDefBuilder::~KernelDefBuilder() {
DCHECK(kernel_def_ == nullptr) << "Did not call Build()";
}
KernelDefBuilder& KernelDefBuilder::Device(const char* device_type) {
kernel_def_->set_device_type(device_type);
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<int64_t>(
const char* attr_name, absl::Span<const int64_t> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (const int64_t integer : allowed) {
allowed_values->add_i(integer);
}
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<int64_t>(
const char* attr_name, int64_t allowed) {
return AttrConstraint(
attr_name,
absl::Span<const int64_t>(std::initializer_list<int64_t>({allowed})));
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<string>(
const char* attr_name, absl::Span<const string> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (const auto& str : allowed) {
allowed_values->add_s(str);
}
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<string>(
const char* attr_name, string allowed) {
return AttrConstraint(
attr_name,
absl::Span<const string>(std::initializer_list<string>({allowed})));
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<const char*>(
const char* attr_name, absl::Span<const char* const> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (const auto& str : allowed) {
allowed_values->add_s(str);
}
return *this;
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<const char*>(
const char* attr_name, const char* allowed) {
return AttrConstraint(attr_name,
absl::Span<const char* const>(
std::initializer_list<const char*>({allowed})));
}
template <>
KernelDefBuilder& KernelDefBuilder::AttrConstraint<bool>(const char* attr_name,
bool allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
allowed_values->add_b(allowed);
return *this;
}
KernelDefBuilder& KernelDefBuilder::TypeConstraint(
const char* attr_name, absl::Span<const DataType> allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
auto* allowed_values = constraint->mutable_allowed_values()->mutable_list();
for (DataType dt : allowed) {
allowed_values->add_type(dt);
}
return *this;
}
KernelDefBuilder& KernelDefBuilder::TypeConstraint(const char* attr_name,
DataType allowed) {
auto* constraint = kernel_def_->add_constraint();
constraint->set_name(attr_name);
constraint->mutable_allowed_values()->mutable_list()->add_type(allowed);
return *this;
}
KernelDefBuilder& KernelDefBuilder::HostMemory(const char* arg_name) {
kernel_def_->add_host_memory_arg(arg_name);
return *this;
}
KernelDefBuilder& KernelDefBuilder::Label(const char* label) {
CHECK_EQ(kernel_def_->label(), "")
<< "Trying to set a kernel's label a second time: '" << label
<< "' in: " << kernel_def_->DebugString();
kernel_def_->set_label(label);
return *this;
}
KernelDefBuilder& KernelDefBuilder::Priority(int32_t priority) {
kernel_def_->set_priority(priority);
return *this;
}
const KernelDef* KernelDefBuilder::Build() {
KernelDef* r = kernel_def_;
kernel_def_ = nullptr;
return r;
}
} | #include "tensorflow/core/framework/kernel_def_builder.h"
#include "tensorflow/core/framework/kernel_def.pb.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(KernelDefBuilderTest, Basic) {
const KernelDef* def = KernelDefBuilder("A").Device(DEVICE_CPU).Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString("op: 'A' device_type: 'CPU'",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, TypeConstraint) {
const KernelDef* def = KernelDefBuilder("B")
.Device(DEVICE_GPU)
.TypeConstraint<float>("T")
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(R"proto(
op: 'B' device_type: 'GPU'
constraint { name: 'T' allowed_values { list { type: DT_FLOAT } } } )proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("C")
.Device(DEVICE_GPU)
.TypeConstraint<int32>("U")
.TypeConstraint<bool>("V")
.Build();
protobuf::TextFormat::ParseFromString(R"proto(
op: 'C' device_type: 'GPU'
constraint { name: 'U' allowed_values { list { type: DT_INT32 } } }
constraint { name: 'V' allowed_values { list { type: DT_BOOL } } } )proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("D")
.Device(DEVICE_CPU)
.TypeConstraint("W", {DT_DOUBLE, DT_STRING})
.Build();
protobuf::TextFormat::ParseFromString(R"proto(
op: 'D' device_type: 'CPU'
constraint { name: 'W'
allowed_values { list { type: [DT_DOUBLE, DT_STRING] } } } )proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, Int64Constraint) {
const KernelDef* def = KernelDefBuilder("B")
.Device(DEVICE_GPU)
.AttrConstraint("T", int64_t{5})
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(R"proto(
op: 'B'
device_type: 'GPU'
constraint {
name: 'T'
allowed_values { list { i: 5 } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("C")
.Device(DEVICE_GPU)
.AttrConstraint("U",
absl::Span<const int64_t>{int64_t{5}, int64_t{17}})
.AttrConstraint("V", string("proto"))
.Build();
protobuf::TextFormat::ParseFromString(
R"proto(
op: 'C'
device_type: 'GPU'
constraint {
name: 'U'
allowed_values { list { i: [ 5, 17 ] } }
}
constraint {
name: 'V'
allowed_values { list { s: 'proto' } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, StringConstraint) {
const KernelDef* def = KernelDefBuilder("B")
.Device(DEVICE_GPU)
.AttrConstraint("T", "hi")
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(R"proto(
op: 'B'
device_type: 'GPU'
constraint {
name: 'T'
allowed_values { list { s: 'hi' } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
def = KernelDefBuilder("C")
.Device(DEVICE_GPU)
.AttrConstraint("U", absl::Span<const char* const>{"boo", "ya"})
.AttrConstraint("V", string("proto"))
.Build();
protobuf::TextFormat::ParseFromString(
R"proto(
op: 'C'
device_type: 'GPU'
constraint {
name: 'U'
allowed_values { list { s: [ 'boo', 'ya' ] } }
}
constraint {
name: 'V'
allowed_values { list { s: 'proto' } }
})proto",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
TEST(KernelDefBuilderTest, HostMemory) {
const KernelDef* def = KernelDefBuilder("E")
.Device(DEVICE_GPU)
.HostMemory("in")
.HostMemory("out")
.Build();
KernelDef expected;
protobuf::TextFormat::ParseFromString(
"op: 'E' device_type: 'GPU' "
"host_memory_arg: ['in', 'out']",
&expected);
EXPECT_EQ(def->DebugString(), expected.DebugString());
delete def;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/kernel_def_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8bb9de31-2dae-4bbc-8191-b301b7399a6e | cpp | tensorflow/tensorflow | plugin_c_api | tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api.h | tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api_test.cc | #ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_PLUGIN_C_API_H_
#define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_C_PLUGIN_C_API_H_
#include <cstddef>
#include "tensorflow/c/c_api.h"
#include "tensorflow/c/c_api_macros.h"
#include "tensorflow/c/tf_status.h"
#include "tensorflow/c/tf_tensor.h"
#include "xla/c/c_api_decl.h"
#include "xla/pjrt/c/pjrt_c_api.h"
#include "xla/stream_executor/tpu/c_api_decl.h"
#define TFNPD_MAJOR 0
#define TFNPD_MINOR 0
#define TFNPD_PATCH 1
#ifdef __cplusplus
extern "C" {
#endif
typedef struct TFNPD_DeviceEvent TFNPD_DeviceEvent;
typedef TFNPD_DeviceEvent* TFNPD_NewDeviceEvent();
typedef void TFNPD_DeviceEventAwait(TFNPD_DeviceEvent* event,
TF_Status* status);
typedef bool TFNPD_DeviceEventIsReady(TFNPD_DeviceEvent* event);
typedef void TFNPD_DeviceEventAndThen(TFNPD_DeviceEvent* event,
void (*callback)(void*),
void* callback_arg);
typedef void TFNPD_DeviceEventDelete(TFNPD_DeviceEvent* event);
typedef struct TFNPD_DeviceAllocator TFNPD_DeviceAllocator;
typedef TFNPD_DeviceAllocator* TFNPD_DeviceAllocatorCreate(int device_ordinal);
typedef void* TFNPD_DeviceAllocateRaw(TFNPD_DeviceAllocator* allocator,
size_t alignment, size_t num_bytes);
typedef void TFNPD_DeviceDeallocateRaw(TFNPD_DeviceAllocator* allocator,
void* ptr);
typedef TF_StringView TFNPD_DeviceAllocatorName(
TFNPD_DeviceAllocator* allocator);
typedef bool TFNPD_DeviceAllocatorAllocatesOpaqueHandle(
TFNPD_DeviceAllocator* allocator);
typedef void TFNPD_DeviceAllocatorDelete(TFNPD_DeviceAllocator* allocator);
typedef struct TFNPD_DeviceContext TFNPD_DeviceContext;
typedef TFNPD_DeviceContext* TFNPD_DeviceContextCreate(int device_ordinal);
typedef TFNPD_DeviceEvent* TFNPD_DeviceTensorToHostTensor(
TFNPD_DeviceContext* device_context, const TF_Tensor* device_tensor,
TF_Tensor* cpu_tensor, TF_Status* status);
typedef TFNPD_DeviceEvent* TFNPD_HostTensorToDeviceTensor(
TFNPD_DeviceContext* device_context, const TF_Tensor* cpu_tensor,
TF_Tensor* device_tensor, TF_Status* status);
typedef TFNPD_DeviceEvent* TFNPD_SameDeviceTensorCopy(
TFNPD_DeviceContext* context);
typedef PJRT_Buffer* TFNPD_SameDevicePjRtBufferCopy(PJRT_Buffer* src_buffer,
PJRT_Client* c_client,
TF_Status* status);
typedef void TFNPD_DeviceContextDelete(TFNPD_DeviceContext* context);
typedef void TFNPD_XlaShapeToDeviceShapeRepresentation(
XLA_Shape* serialized_xla_shape, int data_type, bool use_fast_memory,
XLA_LayoutPreference layout_preference, XLA_Shape* serialized_device_shape,
TF_Status* tf_status);
typedef int32_t TFNPD_GetDeviceCount(TF_Status* status);
typedef void TFNPD_InitPluginInternalDeviceStates(TF_Status* status);
#define TFNPD_API_STRUCT_FN(fn_type) fn_type* fn_type
typedef struct {
size_t struct_size;
void* priv;
TFNPD_API_STRUCT_FN(TFNPD_NewDeviceEvent);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventAwait);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventIsReady);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventAndThen);
TFNPD_API_STRUCT_FN(TFNPD_DeviceEventDelete);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorCreate);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocateRaw);
TFNPD_API_STRUCT_FN(TFNPD_DeviceDeallocateRaw);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorName);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorAllocatesOpaqueHandle);
TFNPD_API_STRUCT_FN(TFNPD_DeviceAllocatorDelete);
TFNPD_API_STRUCT_FN(TFNPD_DeviceContextCreate);
TFNPD_API_STRUCT_FN(TFNPD_DeviceContextDelete);
TFNPD_API_STRUCT_FN(TFNPD_DeviceTensorToHostTensor);
TFNPD_API_STRUCT_FN(TFNPD_HostTensorToDeviceTensor);
TFNPD_API_STRUCT_FN(TFNPD_SameDeviceTensorCopy);
TFNPD_API_STRUCT_FN(TFNPD_SameDevicePjRtBufferCopy);
TFNPD_API_STRUCT_FN(TFNPD_XlaShapeToDeviceShapeRepresentation);
TFNPD_API_STRUCT_FN(TFNPD_GetDeviceCount);
TFNPD_API_STRUCT_FN(TFNPD_InitPluginInternalDeviceStates);
} TFNPD_Api;
const size_t TFNPD_Api_STRUCT_SIZE =
TF_OFFSET_OF_END(TFNPD_Api, TFNPD_InitPluginInternalDeviceStates);
#undef TFNPD_API_STRUCT_FN
typedef struct TFNPD_PluginParams {
size_t struct_size;
void* ext;
const char* device_type;
const char* compilation_device_name;
int32_t priority;
bool is_pluggable_device;
bool use_pjrt_on_demand_compile;
} TFNPD_PluginParams;
const size_t TFNPD_PLUGIN_PARAMS_STRUCT_SIZE =
TF_OFFSET_OF_END(TFNPD_PluginParams, is_pluggable_device);
const TFNPD_Api* TFNPD_InitPlugin(TFNPD_PluginParams* params,
TF_Status* tf_status);
#if defined(__cplusplus)
}
#endif
#endif | #include "tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api.h"
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/c/tf_status.h"
#include "tensorflow/core/common_runtime/next_pluggable_device/c/example_plugin.h"
#include "tensorflow/core/platform/status.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace {
struct CallbackParams {
std::function<void(const tensorflow::Status&)> callback;
tensorflow::Status status;
const TFNPD_Api* api;
TFNPD_DeviceEvent* event;
~CallbackParams() {
api->TFNPD_DeviceEventDelete(event);
}
};
void InvokeCallbackFn(void* arg) {
CallbackParams* params = reinterpret_cast<CallbackParams*>(arg);
params->callback(params->status);
delete params;
}
class PluginEventTestFixture : public testing::Test {
protected:
PluginEventTestFixture() {
api_ = GetExamplePluginApi();
auto diag_handler = [](const tfrt::DecodedDiagnostic& diag) {
LOG(ERROR) << diag.message();
};
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue =
tfrt::CreateMultiThreadedWorkQueue(
4, 4);
std::unique_ptr<tfrt::HostAllocator> host_allocator =
tfrt::CreateMallocAllocator();
host_ = std::make_unique<tfrt::HostContext>(
diag_handler, std::move(host_allocator), std::move(work_queue));
status_ = TF_NewStatus();
}
~PluginEventTestFixture() override { TF_DeleteStatus(status_); }
std::unique_ptr<tfrt::HostContext> host_;
const TFNPD_Api* api_;
TF_Status* status_;
};
TEST_F(PluginEventTestFixture, TestAwait) {
std::unique_ptr<TFNPD_DeviceEvent> event;
event.reset(example_plugin::CreateDeviceEventAndSetAvailable(host_.get()));
EXPECT_FALSE(api_->TFNPD_DeviceEventIsReady(event.get()));
api_->TFNPD_DeviceEventAwait(event.get(), status_);
EXPECT_TRUE(api_->TFNPD_DeviceEventIsReady(event.get()));
EXPECT_EQ(TF_GetCode(status_), TF_OK);
}
TEST_F(PluginEventTestFixture, TestAwaitWithError) {
std::unique_ptr<TFNPD_DeviceEvent> event;
event.reset(
example_plugin::CreateDeviceEventAndSetAvailable(host_.get(),
true));
EXPECT_FALSE(api_->TFNPD_DeviceEventIsReady(event.get()));
api_->TFNPD_DeviceEventAwait(event.get(), status_);
EXPECT_TRUE(api_->TFNPD_DeviceEventIsReady(event.get()));
EXPECT_EQ(TF_GetCode(status_), TF_INTERNAL);
EXPECT_STREQ(TF_Message(status_), "ERROR");
}
TEST_F(PluginEventTestFixture, TestInvokeCallback) {
auto result_avref = tfrt::MakeUnconstructedAsyncValueRef<int>();
std::string tennis_goat = "Sampras";
auto done = [result_avref = result_avref.CopyRef(),
&tennis_goat](const tensorflow::Status& status) {
result_avref.emplace(42);
LOG(INFO) << "Invoking status callback. Tennis goat is: "
<< status.message();
tennis_goat = status.message();
};
TFNPD_DeviceEvent* event =
example_plugin::CreateDeviceEventAndSetAvailable(host_.get());
tensorflow::Status status(absl::StatusCode::kInternal, "Federer");
CallbackParams* params =
new CallbackParams{std::move(done), status, api_, event};
api_->TFNPD_DeviceEventAndThen(event, &InvokeCallbackFn,
params);
result_avref.AndThen([result_avref = result_avref.CopyRef(), tennis_goat,
host = std::move(host_)] {
EXPECT_EQ(result_avref.get(), 42);
LOG(INFO) << "Tennis goat: " << tennis_goat;
EXPECT_EQ(tennis_goat, "Federer");
});
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/next_pluggable_device/c/plugin_c_api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d9291186-f644-4803-84a1-9f17601d1838 | cpp | tensorflow/tensorflow | threadpool_async_executor | third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor.h | third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_THREADPOOL_ASYNC_EXECUTOR_H_
#define TENSORFLOW_TSL_PLATFORM_THREADPOOL_ASYNC_EXECUTOR_H_
#include <utility>
#include "xla/tsl/concurrency/async_value.h"
#include "tsl/platform/threadpool.h"
namespace tsl::thread {
class ThreadPoolAsyncExecutor : public AsyncValue::Executor {
public:
explicit ThreadPoolAsyncExecutor(ThreadPool* thread_pool)
: thread_pool_(thread_pool) {}
void Execute(Task task) final {
auto* task_ptr = new Task(std::move(task));
thread_pool_->Schedule([task_ptr] {
(*task_ptr)();
delete task_ptr;
});
}
private:
ThreadPool* thread_pool_;
};
}
#endif | #include "tsl/platform/threadpool_async_executor.h"
#include "absl/synchronization/notification.h"
#include "tsl/platform/env.h"
#include "tsl/platform/test.h"
#include "tsl/platform/threadpool.h"
namespace tsl::thread {
namespace {
TEST(ThreadPoolAsyncExecutorTest, ExecuteTasks) {
ThreadPool thread_pool(Env::Default(), "test", 4);
ThreadPoolAsyncExecutor executor(&thread_pool);
absl::Notification notification;
executor.Execute([&] { notification.Notify(); });
notification.WaitForNotification();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/threadpool_async_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e6003ff8-dfe3-42fc-a3ac-46631b67e34d | cpp | tensorflow/tensorflow | tf2xla | tensorflow/compiler/tf2xla/tf2xla.cc | tensorflow/compiler/tf2xla/tf2xla_test.cc | #include "tensorflow/compiler/tf2xla/tf2xla.h"
#include <map>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/aot/aot_only_var_handle_op.h"
#include "tensorflow/compiler/tf2xla/graph_compiler_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/tf2xla_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_computation.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
Status ConvertGraphToXla(std::unique_ptr<Graph> graph,
const tf2xla::Config& config, xla::Client* client,
xla::XlaComputation* computation) {
XlaOpRegistry::RegisterCompilationKernels();
for (Node* node : graph->nodes()) {
node->set_assigned_device_name(
absl::StrCat("/device:", DEVICE_CPU_XLA_JIT));
}
std::vector<XlaCompiler::Argument> xla_args;
TF_RETURN_IF_ERROR(CreateXlaArgs(*graph, &xla_args));
PopulateXlaArgs(config, &xla_args);
XlaCompiler::Options compiler_options;
compiler_options.client = client;
compiler_options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
compiler_options.flib_def = &graph->flib_def();
compiler_options.graph_def_version = graph->versions().producer();
compiler_options.allow_cpu_custom_calls = true;
XlaCompiler compiler(compiler_options);
XlaCompiler::CompilationResult result;
XlaCompiler::CompileOptions options;
options.alias_resource_update = true;
TF_RETURN_IF_ERROR(compiler.CompileGraph(
options, "tfcompile", std::move(graph), xla_args, &result));
*computation = std::move(*result.computation);
int num_const_results = 0;
for (int i = 0, end = result.outputs.size(); i < end; ++i) {
if (result.outputs[i].is_constant) {
++num_const_results;
LOG(ERROR) << "ConstRetVal index:" << i
<< " value:" << result.outputs[i].constant_value.DebugString();
}
}
if (num_const_results > 0) {
return errors::Unimplemented(
"Conversion from TensorFlow graph to XLA resulted in ",
num_const_results,
" constant results. The configuration of "
"the output args (i.e. fetch ids) is probably wrong.");
}
{
std::vector<bool> updated_inputs(xla_args.size());
for (const XlaCompiler::ResourceUpdate& update : result.resource_updates) {
updated_inputs[update.input_index] = true;
}
int64_t input_index = xla_args.size() - config.variable_size();
for (const tf2xla::Variable& variable : config.variable()) {
if (variable.readonly() == updated_inputs[input_index]) {
return errors::InvalidArgument(
"Variable \"", variable.node_name(), "\" is marked as ",
variable.readonly() ? "" : "not ", "readonly, but is ",
updated_inputs[input_index] ? "" : "not ",
"modified by the computation.");
}
++input_index;
}
}
return absl::OkStatus();
}
Status ConvertVarHandlesToAotVarHandles(GraphDef* graph_def) {
auto update_var_handle_op_node = [](NodeDef& node) -> Status {
if (node.op() == "VarHandleOp") {
node.set_op(tfcompile::kXlaAotOnlyVarHandleOp);
const auto& it = node.attr().find("allowed_devices");
if (it != node.attr().end()) {
if (!it->second.list().s().empty()) {
return errors::InvalidArgument(
"VarHandleOp with non-empty allowed devices is not supported.");
}
node.mutable_attr()->erase("allowed_devices");
}
}
return absl::OkStatus();
};
for (auto& node : *graph_def->mutable_node()) {
TF_RETURN_IF_ERROR(update_var_handle_op_node(node));
}
for (auto& fn : *graph_def->mutable_library()->mutable_function()) {
for (auto& node : *fn.mutable_node_def()) {
TF_RETURN_IF_ERROR(update_var_handle_op_node(node));
}
}
return absl::OkStatus();
}
}
Status ConvertGraphDefToXla(GraphDef graph_def, const tf2xla::Config& config,
xla::Client* client,
xla::XlaComputation* computation) {
std::unique_ptr<Graph> graph;
TF_RETURN_IF_ERROR(ConvertVarHandlesToAotVarHandles(&graph_def));
TF_RETURN_IF_ERROR(InitGraph(graph_def, config, &graph));
TF_RETURN_IF_ERROR(
ConvertGraphToXla(std::move(graph), config, client, computation));
return absl::OkStatus();
}
} | #include "tensorflow/compiler/tf2xla/tf2xla.h"
#include <vector>
#include "tensorflow/compiler/tf2xla/tf2xla.pb.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/tensor_float_32_utils.h"
namespace tensorflow {
namespace {
class ConvertGraphDefToXlaWithTF32Disabled : public ::testing::Test {
public:
ConvertGraphDefToXlaWithTF32Disabled() {
tsl::enable_tensor_float_32_execution(false);
}
~ConvertGraphDefToXlaWithTF32Disabled() override {
tsl::enable_tensor_float_32_execution(true);
}
};
AttrValue TypeAttrValue(DataType type) {
AttrValue attr_value;
SetAttrValue(type, &attr_value);
return attr_value;
}
AttrValue StringAttrValue(StringPiece str) {
AttrValue attr_value;
SetAttrValue(str, &attr_value);
return attr_value;
}
AttrValue IntAttrValue(int i) {
AttrValue attr_value;
SetAttrValue(i, &attr_value);
return attr_value;
}
AttrValue IntVectorAttrValue(const std::vector<int>& ints) {
AttrValue attr_value;
SetAttrValue(ints, &attr_value);
return attr_value;
}
TensorShapeProto TensorShape(const std::vector<int>& dims) {
TensorShapeProto shape;
for (int i = 0; i < dims.size(); ++i) {
shape.add_dim();
shape.mutable_dim(i)->set_size(dims[i]);
}
return shape;
}
GraphDef SumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
NodeDef* sum = graph_def.add_node();
sum->set_name("sum");
sum->set_op("Add");
sum->add_input("x");
sum->add_input("y");
(*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32);
return graph_def;
}
tf2xla::Config SumConfig() {
tf2xla::Config config;
config.add_feed()->mutable_id()->set_node_name("x");
config.add_feed()->mutable_id()->set_node_name("y");
config.add_fetch()->mutable_id()->set_node_name("sum");
return config;
}
TEST(ConvertGraphDefToXla, Sum) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(x_literal);
auto y_global_or = client->TransferToServer(y_literal);
TF_EXPECT_OK(x_global_or.status());
TF_EXPECT_OK(y_global_or.status());
std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value());
std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value());
auto result_or =
client->ExecuteAndTransfer(computation, {x_global.get(), y_global.get()});
TF_EXPECT_OK(result_or.status());
xla::Literal result = std::move(result_or.value());
EXPECT_EQ("(\ns32[] 42\n)", result.ToString());
config.mutable_feed(0)->mutable_id()->set_output_index(
123);
EXPECT_TRUE(errors::IsInvalidArgument(
ConvertGraphDefToXla(graph_def, config, client, &computation)));
}
GraphDef EinsumGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* einsum = graph_def.add_node();
einsum->set_name("einsum");
einsum->set_op("Einsum");
einsum->add_input("x");
einsum->add_input("y");
(*einsum->mutable_attr())["equation"] = StringAttrValue("ij,jk->ik");
(*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT);
(*einsum->mutable_attr())["N"] = IntAttrValue(2);
return graph_def;
}
tf2xla::Config EinsumConfig() {
tf2xla::Config config;
tf2xla::Feed* x_feed = config.add_feed();
x_feed->mutable_id()->set_node_name("x");
*x_feed->mutable_shape() = TensorShape({2, 2});
tf2xla::Feed* y_feed = config.add_feed();
y_feed->mutable_id()->set_node_name("y");
*y_feed->mutable_shape() = TensorShape({2, 2});
config.add_fetch()->mutable_id()->set_node_name("einsum");
return config;
}
TEST(ConvertGraphDefToXla, EinsumIsConvertedToDotWithDefaultPrecision) {
GraphDef graph_def = EinsumGraph();
tf2xla::Config config = EinsumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_dots = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "dot") {
num_dots++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::DEFAULT);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::DEFAULT);
}
}
}
EXPECT_EQ(num_dots, 1);
}
TEST_F(ConvertGraphDefToXlaWithTF32Disabled,
EinsumIsConvertedToDotWithHighestPrecision) {
GraphDef graph_def = EinsumGraph();
tf2xla::Config config = EinsumConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_dots = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "dot") {
num_dots++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::HIGHEST);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::HIGHEST);
}
}
}
EXPECT_EQ(num_dots, 1);
}
GraphDef Conv2DGraph() {
GraphDef graph_def;
NodeDef* x = graph_def.add_node();
x->set_name("x");
x->set_op("Placeholder");
(*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* y = graph_def.add_node();
y->set_name("y");
y->set_op("Placeholder");
(*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT);
NodeDef* einsum = graph_def.add_node();
einsum->set_name("conv2d");
einsum->set_op("Conv2D");
einsum->add_input("x");
einsum->add_input("y");
(*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT);
(*einsum->mutable_attr())["padding"] = StringAttrValue("VALID");
(*einsum->mutable_attr())["strides"] = IntVectorAttrValue({1, 1, 1, 1});
return graph_def;
}
tf2xla::Config Conv2DConfig() {
tf2xla::Config config;
tf2xla::Feed* x_feed = config.add_feed();
x_feed->mutable_id()->set_node_name("x");
*x_feed->mutable_shape() = TensorShape({1, 1, 2, 2});
tf2xla::Feed* y_feed = config.add_feed();
y_feed->mutable_id()->set_node_name("y");
*y_feed->mutable_shape() = TensorShape({1, 1, 2, 2});
config.add_fetch()->mutable_id()->set_node_name("conv2d");
return config;
}
TEST(ConvertGraphDefToXla, Conv2DIsConvertedToConvolutionWithDefaultPrecision) {
GraphDef graph_def = Conv2DGraph();
tf2xla::Config config = Conv2DConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_convolutions = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "convolution") {
num_convolutions++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::DEFAULT);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::DEFAULT);
}
}
}
EXPECT_EQ(num_convolutions, 1);
}
TEST_F(ConvertGraphDefToXlaWithTF32Disabled,
Conv2DIsConvertedToConvolutionWithHighestPrecision) {
GraphDef graph_def = Conv2DGraph();
tf2xla::Config config = Conv2DConfig();
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
int num_convolutions = 0;
const xla::HloModuleProto& module_proto = computation.proto();
for (const xla::HloComputationProto& computation_proto :
module_proto.computations()) {
for (const xla::HloInstructionProto& instruction_proto :
computation_proto.instructions()) {
if (instruction_proto.opcode() == "convolution") {
num_convolutions++;
ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(),
2);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(0),
xla::PrecisionConfig::HIGHEST);
EXPECT_EQ(instruction_proto.precision_config().operand_precision(1),
xla::PrecisionConfig::HIGHEST);
}
}
}
EXPECT_EQ(num_convolutions, 1);
}
TEST(ConvertGraphDefToXla, SumWithUnusedArgument) {
GraphDef graph_def = SumGraph();
tf2xla::Config config = SumConfig();
NodeDef* unused = graph_def.add_node();
unused->set_name("unused");
unused->set_op("Placeholder");
(*unused->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32);
config.add_feed()->mutable_id()->set_node_name("unused");
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
xla::XlaComputation computation;
TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation));
auto x_literal = xla::LiteralUtil::CreateR0<int32>(10);
auto y_literal = xla::LiteralUtil::CreateR0<int32>(32);
auto x_global_or = client->TransferToServer(x_literal);
auto y_global_or = client->TransferToServer(y_literal);
auto unused_global_or = client->TransferToServer(y_literal);
TF_EXPECT_OK(x_global_or.status());
TF_EXPECT_OK(y_global_or.status());
TF_EXPECT_OK(unused_global_or.status());
std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value());
std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value());
std::unique_ptr<xla::GlobalData> unused_global =
std::move(unused_global_or.value());
auto result_or = client->ExecuteAndTransfer(
computation, {x_global.get(), y_global.get(), unused_global.get()});
TF_EXPECT_OK(result_or.status());
xla::Literal result = std::move(result_or.value());
EXPECT_EQ("(\ns32[] 42\n)", result.ToString());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/tf2xla_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab750f69-b902-438c-9461-e3c414849229 | cpp | tensorflow/tensorflow | analytical_cost_estimator | tensorflow/core/grappler/costs/analytical_cost_estimator.cc | tensorflow/core/grappler/costs/analytical_cost_estimator_test.cc | #include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include <limits>
#include <unordered_map>
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/graph/types.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/costs/op_performance_data.pb.h"
#include "tensorflow/core/grappler/costs/utils.h"
#include "tensorflow/core/grappler/costs/virtual_placer.h"
#include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/util/overflow.h"
namespace tensorflow {
namespace grappler {
namespace {
Status AddCostNode(ReadyNodeManager* node_manager, const OpContext& op_context,
int node_id, const Costs& node_costs,
gtl::FlatMap<string, CostGraphDef::Node*>* name_to_cost_node,
gtl::FlatMap<string, int>* name_to_id,
CostGraphDef* cost_graph) {
const string& op_name = op_context.name;
auto it = name_to_cost_node->find(op_name);
CostGraphDef::Node* node;
if (it != name_to_cost_node->end()) {
node = it->second;
node->clear_input_info();
node->clear_output_info();
} else {
node = cost_graph->add_node();
(*name_to_cost_node)[op_name] = node;
node->set_name(op_name);
node->set_id(node_id);
(*name_to_id)[node->name()] = node->id();
}
node->set_device(op_context.device_name);
node->set_compute_cost(node_costs.execution_time.asMicroSeconds().count());
node->set_compute_time(node_costs.compute_time.asMicroSeconds().count());
node->set_memory_time(node_costs.memory_time.asMicroSeconds().count());
node->set_temporary_memory_size(node_costs.temporary_memory);
node->set_persistent_memory_size(node_costs.persistent_memory);
node->set_inaccurate(node_costs.inaccurate);
for (const string& input : node_manager->GetCurrNode()->input()) {
int input_port;
string input_name = ParseNodeName(input, &input_port);
if (name_to_id->find(input_name) == name_to_id->end()) {
if (!IsMerge(*node_manager->GetCurrNode()))
VLOG(1) << "input: " << input
<< " not found for non-Merge node: " << op_name;
continue;
}
if (IsControlInput(input)) {
node->add_control_input(name_to_id->at(input_name));
} else {
auto* input_info = node->add_input_info();
input_info->set_preceding_node(name_to_id->at(input_name));
input_info->set_preceding_port(input_port);
}
}
for (const auto& output : op_context.op_info.outputs()) {
auto output_info = node->add_output_info();
output_info->set_alias_input_port(-1);
output_info->set_dtype(output.dtype());
*output_info->mutable_shape() = output.shape();
int64_t size = DataTypeSize(output.dtype());
for (const auto& dim : output.shape().dim()) {
size = MultiplyWithoutOverflow(size, std::max<int64_t>(1, dim.size()));
if (size < 0) {
return errors::InvalidArgument(
"Integer overflow encountered in dimension size.");
}
}
output_info->set_size(size);
}
return absl::OkStatus();
}
}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, bool use_static_shapes,
bool use_aggressive_shape_inference)
: AnalyticalCostEstimator(
cluster, std::make_unique<OpLevelCostEstimator>(),
ReadyNodeManagerFactory("FirstReady"), use_static_shapes,
use_aggressive_shape_inference) {}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager, bool use_static_shapes,
bool use_aggressive_shape_inference)
: node_estimator_(std::move(node_estimator)),
node_manager_(std::move(node_manager)),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference) {
scheduler_ = std::make_unique<VirtualScheduler>(
use_static_shapes_, use_aggressive_shape_inference_, cluster,
node_manager_.get(),
std::make_unique<VirtualPlacer>(cluster->GetDevices()));
}
AnalyticalCostEstimator::AnalyticalCostEstimator(
Cluster* cluster, std::unique_ptr<OpLevelCostEstimator> node_estimator,
std::unique_ptr<ReadyNodeManager> node_manager,
std::unique_ptr<VirtualPlacer> placer, bool use_static_shapes,
bool use_aggressive_shape_inference)
: node_estimator_(std::move(node_estimator)),
node_manager_(std::move(node_manager)),
use_static_shapes_(use_static_shapes),
use_aggressive_shape_inference_(use_aggressive_shape_inference) {
scheduler_ = std::make_unique<VirtualScheduler>(
use_static_shapes_, use_aggressive_shape_inference_, cluster,
node_manager_.get(), std::move(placer));
}
Status AnalyticalCostEstimator::Initialize(const GrapplerItem& item) {
item_ = &item;
return absl::OkStatus();
}
Status AnalyticalCostEstimator::PredictCosts(const GraphDef& optimized_graph,
RunMetadata* run_metadata,
Costs* costs) const {
std::unique_ptr<GrapplerItem> item_storage;
const GrapplerItem* item;
if (&optimized_graph == &item_->graph) {
item = item_;
} else {
GraphDef graph_copy = optimized_graph;
item_storage = std::make_unique<GrapplerItem>(
item_->WithGraph(std::move(graph_copy)));
item = item_storage.get();
}
auto status = scheduler_->Init(item);
if (!status.ok()) {
if (costs) {
costs->execution_time = Costs::Duration::max();
}
return status;
}
gtl::FlatMap<string, CostGraphDef::Node*> name_to_cost_node;
CostGraphDef* cost_graph = nullptr;
if (run_metadata) {
cost_graph = run_metadata->mutable_cost_graph();
for (auto& node : *cost_graph->mutable_node()) {
name_to_cost_node[node.name()] = &node;
}
}
std::vector<string> inaccurate_nodes;
int nodes_executed = 0;
int node_id = 0;
gtl::FlatMap<string, int> name_to_id;
Costs node_costs;
do {
++nodes_executed;
OpContext op_context = scheduler_->GetCurrNode();
node_costs = node_estimator_->PredictCosts(op_context);
if (node_costs.inaccurate) {
inaccurate_nodes.push_back(op_context.name);
if (node_costs.num_ops_with_unknown_shapes > 0)
VLOG(4) << op_context.name << " has "
<< node_costs.num_ops_with_unknown_shapes << " unknown shapes";
}
if (cost_graph) {
Status s =
AddCostNode(node_manager_.get(), op_context, node_id++, node_costs,
&name_to_cost_node, &name_to_id, cost_graph);
if (!s.ok()) {
return s;
}
}
} while (scheduler_->MarkCurrNodeExecuted(node_costs));
VLOG(1) << inaccurate_nodes.size() << " out of " << nodes_executed
<< " nodes have inaccurate time estimation";
if (VLOG_IS_ON(3)) {
for (const auto& node : inaccurate_nodes) {
VLOG(4) << "Node with inaccurate time estimation: " << node;
}
}
if (costs) {
*costs = scheduler_->Summary(run_metadata);
} else if (run_metadata) {
scheduler_->GenerateRunMetadata(run_metadata);
}
if (VLOG_IS_ON(1)) {
bool verbose = VLOG_IS_ON(2);
if (run_metadata) {
VLOG(1) << GetStatsStringFromRunMetadata(*run_metadata, verbose);
} else {
RunMetadata run_metadata;
scheduler_->GenerateRunMetadata(&run_metadata);
VLOG(1) << GetStatsStringFromRunMetadata(run_metadata, verbose);
}
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/costs/virtual_scheduler.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/grappler/clusters/virtual_cluster.h"
#include "tensorflow/core/grappler/costs/analytical_cost_estimator.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
class AnalyticalCostEstimatorTest : public ::testing::Test {
protected:
void SetUp() override {
std::unordered_map<string, DeviceProperties> devices;
DeviceProperties cpu_device;
cpu_device.set_type("CPU");
cpu_device.set_num_cores(4);
cpu_device.set_frequency(2600);
cpu_device.set_bandwidth(24 * 1024 * 1024);
devices["/job:localhost/replica:0/task:0/cpu:0"] = cpu_device;
DeviceProperties gpu_device;
gpu_device.set_type("GPU");
gpu_device.set_num_cores(12);
gpu_device.set_frequency(1100);
gpu_device.set_bandwidth(180 * 1024 * 1024);
(*gpu_device.mutable_environment())["architecture"] = "6";
devices["/job:localhost/replica:0/task:0/device:GPU:0"] = gpu_device;
cluster_.reset(new VirtualCluster(devices));
}
GrapplerItem CreateMiniGraph() {
const int batch = 1;
const int width = 28;
const int height = 28;
const int num_channels = 1;
const int num_labels = 10;
const int kernel_size = 3;
const int conv_filters = 32;
Scope s = Scope::NewRootScope();
auto images = ops::RandomUniform(
s.WithOpName("image"), {batch, width, height, num_channels}, DT_FLOAT);
auto labels = ops::RandomUniform(s.WithOpName("label"), {batch, num_labels},
DT_FLOAT);
auto w = ops::Variable(
s.WithOpName("W"),
{kernel_size, kernel_size, num_channels, conv_filters}, DT_FLOAT);
auto b = ops::Variable(s.WithOpName("B"), {conv_filters}, DT_FLOAT);
auto conv =
ops::Conv2D(s.WithOpName("conv"), images, w, {1, 1, 1, 1}, "SAME");
auto bias = ops::Add(s.WithOpName("bias"), conv, b);
auto relu = ops::Relu(s.WithOpName("relu"), bias);
auto flat_shape = ops::Const(s.WithOpName("flat_shape"),
{batch, width * height * conv_filters});
auto flat = ops::Reshape(s.WithOpName("flat"), relu, flat_shape);
auto w2 =
ops::Variable(s.WithOpName("W2"),
{width * height * conv_filters, num_labels}, DT_FLOAT);
auto b2 = ops::Variable(s.WithOpName("B2"), {num_labels}, DT_FLOAT);
auto matmul = ops::MatMul(s.WithOpName("matmul"), flat, w2);
auto logits = ops::Add(s.WithOpName("logits"), matmul, b2);
auto softmax = ops::Softmax(s.WithOpName("softmax"), logits);
auto lsm = ops::Log(s.WithOpName("lsm"), softmax);
GrapplerItem item;
item.fetch.push_back("lsm");
TF_CHECK_OK(s.ToGraphDef(&item.graph));
return item;
}
std::unique_ptr<VirtualCluster> cluster_;
};
TEST_F(AnalyticalCostEstimatorTest, SimpleTest) {
GrapplerItem item = CreateMiniGraph();
AnalyticalCostEstimator estimator(cluster_.get(), true,
true);
TF_ASSERT_OK(estimator.Initialize(item));
RunMetadata run_metadata;
Costs summary;
TF_ASSERT_OK(estimator.PredictCosts(item.graph, &run_metadata, &summary));
EXPECT_EQ(Costs::NanoSeconds(9158), summary.execution_time);
EXPECT_EQ(15, summary.num_ops_total);
EXPECT_TRUE(summary.inaccurate);
EXPECT_EQ(0, summary.num_ops_with_unknown_shapes);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/analytical_cost_estimator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/costs/analytical_cost_estimator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4d9dae04-f06a-4c96-841d-131f992ed614 | cpp | google/tensorstore | ref_counted_string | tensorstore/internal/ref_counted_string.cc | tensorstore/internal/ref_counted_string_test.cc | #include "tensorstore/internal/ref_counted_string.h"
#include <cstring>
#include <new>
namespace tensorstore {
namespace internal {
RefCountedString& RefCountedString::operator=(
const RefCountedString& other) noexcept {
if (other.data_) other.header().IncrementReferenceCount();
if (data_) header().DecrementReferenceCount();
data_ = other.data_;
return *this;
}
RefCountedString& RefCountedString::operator=(std::string_view s) {
auto* data = AllocateCopy(s);
if (data_) header().DecrementReferenceCount();
data_ = data;
return *this;
}
RefCountedString& RefCountedString::operator=(const char* s) {
return *this = std::string_view(s);
}
char* RefCountedString::Allocate(size_t size) {
if (size == 0) return nullptr;
void* ptr = ::operator new(size + sizeof(Header));
new (ptr) Header{size};
return static_cast<char*>(ptr) + sizeof(Header);
}
const char* RefCountedString::AllocateCopy(std::string_view s) {
if (s.empty()) return nullptr;
char* data = Allocate(s.size());
std::memcpy(data, s.data(), s.size());
return data;
}
void RefCountedString::Header::Deallocate() const {
::operator delete(const_cast<Header*>(this), length + sizeof(Header));
}
}
} | #include "tensorstore/internal/ref_counted_string.h"
#include <string>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
using ::tensorstore::internal::RefCountedString;
using ::tensorstore::internal::RefCountedStringWriter;
TEST(RefCountedStringTest, DefaultConstruct) {
RefCountedString s;
EXPECT_EQ("", std::string_view(s));
EXPECT_EQ("", std::string(s));
EXPECT_TRUE(s.empty());
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0, s.size());
EXPECT_EQ(nullptr, s.begin());
EXPECT_EQ(nullptr, s.end());
EXPECT_EQ(s, s);
auto other = s;
EXPECT_EQ(nullptr, other.data());
}
TEST(RefCountedStringTest, EmptyStringConstruct) {
RefCountedString s("");
EXPECT_EQ("", std::string_view(s));
EXPECT_EQ("", std::string(s));
EXPECT_TRUE(s.empty());
EXPECT_EQ(nullptr, s.data());
EXPECT_EQ(0, s.size());
EXPECT_EQ(nullptr, s.begin());
EXPECT_EQ(nullptr, s.end());
EXPECT_EQ(s, s);
}
TEST(RefCountedStringTest, NonEmptyStringConstruct) {
RefCountedString s("abc");
EXPECT_EQ("abc", std::string_view(s));
EXPECT_EQ("abc", std::string(s));
EXPECT_FALSE(s.empty());
EXPECT_EQ(3, s.size());
EXPECT_EQ("abc", s);
EXPECT_NE("abd", s);
EXPECT_EQ(s, "abc");
EXPECT_LT("ab", s);
EXPECT_LE("abc", s);
EXPECT_GT("abd", s);
}
TEST(RefCountedStringTest, Copy) {
RefCountedString x("abc");
RefCountedString y = x;
EXPECT_EQ(x.data(), y.data());
}
TEST(RefCountedStringTest, Move) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, EmptyMoveAssignNonEmpty) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y;
y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, EmptyMoveAssignEmpty) {
RefCountedString x;
RefCountedString y;
y = std::move(x);
EXPECT_TRUE(y.empty());
EXPECT_TRUE(x.empty());
}
TEST(RefCountedStringTest, NonEmptyMoveAssignNonEmpty) {
RefCountedString x("abc");
const char* ptr = x.data();
RefCountedString y("def");
y = std::move(x);
EXPECT_EQ(y, "abc");
EXPECT_EQ(ptr, y.data());
}
TEST(RefCountedStringTest, NonEmptyMoveAssignEmpty) {
RefCountedString x;
RefCountedString y("def");
y = std::move(x);
EXPECT_TRUE(y.empty());
}
TEST(RefCountedStringTest, NonEmptyCopyAssignNonEmpty) {
RefCountedString x("abc");
RefCountedString y("def");
y = x;
EXPECT_EQ("abc", y);
}
TEST(RefCountedStringTest, EmptyCopyAssignNonEmpty) {
RefCountedString x("abc");
RefCountedString y;
y = x;
EXPECT_EQ("abc", y);
}
TEST(RefCountedStringTest, NonEmptyCopyAssignEmpty) {
RefCountedString x;
RefCountedString y("def");
y = x;
EXPECT_EQ("", y);
}
TEST(RefCountedStringTest, EmptyCopyAssignEmpty) {
RefCountedString x;
RefCountedString y;
y = x;
EXPECT_EQ("", y);
}
TEST(RefCountedStringTest, NonEmptyAssignFromStringView) {
RefCountedString x("def");
x = std::string_view("abc");
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, EmptyAssignFromStringView) {
RefCountedString x;
x = std::string_view("abc");
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, NonEmptyAssignFromCStr) {
RefCountedString x("def");
x = "abc";
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, EmptyAssignFromCStr) {
RefCountedString x;
x = "abc";
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, SelfAssign) {
RefCountedString x("abc");
x = x;
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, SelfAssignStringView) {
RefCountedString x("abc");
x = std::string_view(x);
EXPECT_EQ("abc", x);
}
TEST(RefCountedStringTest, Comparison) {
RefCountedString a("abc");
RefCountedString a1("abc");
std::string_view a_sv = "abc";
const char* a_cstr = "abc";
RefCountedString b("def");
std::string_view b_sv = "def";
const char* b_cstr = "def";
EXPECT_TRUE(a == a);
EXPECT_TRUE(a == a1);
EXPECT_TRUE(a == a_sv);
EXPECT_TRUE(a == a_cstr);
EXPECT_TRUE(a_sv == a);
EXPECT_TRUE(a_cstr == a);
EXPECT_FALSE(a != a);
EXPECT_FALSE(a != a1);
EXPECT_FALSE(a != a_sv);
EXPECT_FALSE(a != a_cstr);
EXPECT_FALSE(a_sv != a);
EXPECT_FALSE(a_cstr != a);
EXPECT_TRUE(a <= a);
EXPECT_TRUE(a <= a_sv);
EXPECT_TRUE(a <= a_cstr);
EXPECT_TRUE(a_sv <= a);
EXPECT_TRUE(a_cstr <= a);
EXPECT_TRUE(a <= a1);
EXPECT_TRUE(a >= a);
EXPECT_TRUE(a >= a_sv);
EXPECT_TRUE(a >= a_cstr);
EXPECT_TRUE(a_sv >= a);
EXPECT_TRUE(a_cstr >= a);
EXPECT_TRUE(a >= a1);
EXPECT_TRUE(a <= b);
EXPECT_TRUE(a <= b_sv);
EXPECT_TRUE(a <= b_cstr);
EXPECT_TRUE(a_sv <= b);
EXPECT_TRUE(a_cstr <= b);
EXPECT_TRUE(a <= b_sv);
EXPECT_TRUE(a <= b_cstr);
EXPECT_TRUE(a < b);
EXPECT_TRUE(a < b_sv);
EXPECT_TRUE(a < b_cstr);
EXPECT_TRUE(a_sv < b);
EXPECT_TRUE(a_cstr < b);
EXPECT_FALSE(a > b);
EXPECT_FALSE(a_sv > b);
EXPECT_FALSE(a_cstr > b);
EXPECT_FALSE(a > b_sv);
EXPECT_FALSE(a > b_cstr);
EXPECT_FALSE(a >= b);
EXPECT_FALSE(a >= b_sv);
EXPECT_FALSE(a >= b_cstr);
EXPECT_FALSE(a_sv >= b);
EXPECT_FALSE(a_cstr >= b);
}
TEST(RefCountedStringTest, StdStringConversion) {
std::string s = static_cast<std::string>(RefCountedString("abc"));
EXPECT_EQ("abc", s);
}
TEST(RefCountedStringTest, Indexing) {
RefCountedString x = "abc";
EXPECT_EQ('a', x[0]);
EXPECT_EQ('c', x[2]);
}
TEST(RefCountedStringTest, Writer) {
RefCountedStringWriter writer(3);
memcpy(writer.data(), "abc", 3);
RefCountedString s = std::move(writer);
EXPECT_EQ("abc", s);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/ref_counted_string.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/ref_counted_string_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7a0a8077-f919-4e65-aa04-39bfe4a9ae57 | cpp | google/tensorstore | nditerable_copy | tensorstore/internal/nditerable_copy.cc | tensorstore/internal/nditerable_copy_test.cc | #include "tensorstore/internal/nditerable_copy.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/element_copy_function.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_buffer_management.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal {
NDIterableCopyManager::NDIterableCopyManager(const NDIterable* input,
const NDIterable* output)
: Base{{{input, output}}} {
assert(input->dtype() == output->dtype());
}
NDIterableCopyManager::BufferParameters
NDIterableCopyManager::GetBufferParameters(
NDIterable::IterationLayoutView layout) const {
BufferParameters result;
auto input_constraint = input()->GetIterationBufferConstraint(layout);
auto output_constraint = output()->GetIterationBufferConstraint(layout);
if (!input_constraint.external || !output_constraint.external) {
result.input_buffer_kind = result.output_buffer_kind = std::max(
input_constraint.min_buffer_kind, output_constraint.min_buffer_kind);
} else {
result.input_buffer_kind = input_constraint.min_buffer_kind;
result.output_buffer_kind = output_constraint.min_buffer_kind;
}
result.buffer_source =
input_constraint.external
? (output_constraint.external ? BufferSource::kExternal
: BufferSource::kOutput)
: (output_constraint.external ? BufferSource::kInput
: BufferSource::kBoth);
return result;
}
std::ptrdiff_t NDIterableCopyManager::GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout) const {
auto buffer_parameters = GetBufferParameters(layout);
std::ptrdiff_t num_bytes = 0;
num_bytes += input()->GetWorkingMemoryBytesPerElement(
layout, buffer_parameters.input_buffer_kind);
num_bytes += output()->GetWorkingMemoryBytesPerElement(
layout, buffer_parameters.output_buffer_kind);
if (buffer_parameters.buffer_source == BufferSource::kExternal) {
num_bytes += input()->dtype()->size;
if (std::max(buffer_parameters.input_buffer_kind,
buffer_parameters.output_buffer_kind) ==
IterationBufferKind::kIndexed) {
num_bytes += sizeof(Index);
}
}
return num_bytes;
}
bool NDIteratorCopyManager::CopyImplBoth(NDIteratorCopyManager* self,
tensorstore::span<const Index> indices,
IterationBufferShape block_shape,
absl::Status* status) {
IterationBufferPointer input_pointer, output_pointer;
return self->input_->GetBlock(indices, block_shape, &input_pointer, status) &&
self->output_->GetBlock(indices, block_shape, &output_pointer,
status) &&
self->copy_elements_function_(nullptr, block_shape, input_pointer,
output_pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, output_pointer,
status);
}
bool NDIteratorCopyManager::CopyImplInput(
NDIteratorCopyManager* self, tensorstore::span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) {
IterationBufferPointer pointer;
return self->input_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, pointer, status);
}
bool NDIteratorCopyManager::CopyImplOutput(
NDIteratorCopyManager* self, tensorstore::span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) {
IterationBufferPointer pointer;
return self->output_->GetBlock(indices, block_shape, &pointer, status) &&
self->input_->GetBlock(indices, block_shape, &pointer, status) &&
self->output_->UpdateBlock(indices, block_shape, pointer, status);
}
bool NDIteratorCopyManager::CopyImplExternal(
NDIteratorCopyManager* self, tensorstore::span<const Index> indices,
IterationBufferShape block_shape, absl::Status* status) {
return self->input_->GetBlock(indices, block_shape,
&self->buffer_manager_.buffer_pointers()[0][0],
status) &&
self->output_->GetBlock(indices, block_shape,
&self->buffer_manager_.buffer_pointers()[1][0],
status) &&
self->output_->UpdateBlock(
indices, block_shape,
self->buffer_manager_.buffer_pointers()[1][0], status);
}
NDIteratorCopyManager::NDIteratorCopyManager(
const NDIterableCopyManager& iterable,
NDIterable::IterationBufferLayoutView layout, ArenaAllocator<> allocator)
: buffer_manager_(allocator) {
auto buffer_parameters = iterable.GetBufferParameters(layout);
input_ = iterable.input()->GetIterator(
{layout, buffer_parameters.input_buffer_kind});
output_ = iterable.output()->GetIterator(
{layout, buffer_parameters.output_buffer_kind});
switch (buffer_parameters.buffer_source) {
case NDIterableCopyManager::BufferSource::kInput:
copy_impl_ = NDIteratorCopyManager::CopyImplInput;
break;
case NDIterableCopyManager::BufferSource::kOutput:
copy_impl_ = NDIteratorCopyManager::CopyImplOutput;
break;
case NDIterableCopyManager::BufferSource::kBoth:
copy_impl_ = NDIteratorCopyManager::CopyImplBoth;
copy_elements_function_ =
iterable.input()
->dtype()
->copy_assign[buffer_parameters.input_buffer_kind];
break;
case NDIterableCopyManager::BufferSource::kExternal:
copy_impl_ = NDIteratorCopyManager::CopyImplExternal;
buffer_manager_.Initialize(layout.block_shape,
{{iterable.input()->dtype()}},
{{{{buffer_parameters.input_buffer_kind,
buffer_parameters.output_buffer_kind}}}});
break;
}
}
NDIterableCopier::NDIterableCopier(const NDIterable& input,
const NDIterable& output,
tensorstore::span<const Index> shape,
IterationConstraints constraints,
Arena* arena)
: NDIterableCopier(NDIterableCopyManager(&input, &output), shape,
constraints, arena) {}
NDIterableCopier::NDIterableCopier(
const NDIterableCopyManager& iterable_copy_manager,
tensorstore::span<const Index> shape, IterationConstraints constraints,
Arena* arena)
: layout_info_(iterable_copy_manager, shape, constraints),
block_shape_(GetNDIterationBlockShape(
iterable_copy_manager.GetWorkingMemoryBytesPerElement(
layout_info_.layout_view()),
layout_info_.iteration_shape)),
iterator_copy_manager_(iterable_copy_manager,
{layout_info_.layout_view(), block_shape_},
arena) {}
absl::Status NDIterableCopier::Copy() {
tensorstore::span<const Index> iteration_shape = layout_info_.iteration_shape;
std::fill_n(position_, iteration_shape.size(), static_cast<Index>(0));
if (layout_info_.empty) {
return absl::OkStatus();
}
absl::Status copy_status;
if (Index inner_block_size = block_shape_[1];
inner_block_size != iteration_shape.back()) {
assert(block_shape_[0] == 1);
for (Index block_size = inner_block_size; block_size;) {
if (!iterator_copy_manager_.Copy(
tensorstore::span<const Index>(position_, iteration_shape.size()),
{1, block_size}, ©_status)) {
return GetElementCopyErrorStatus(std::move(copy_status));
}
block_size = StepBufferPositionForward(iteration_shape, block_size,
inner_block_size, position_);
}
} else {
const Index outer_block_size = block_shape_[0];
for (Index block_size = outer_block_size; block_size;) {
if (!iterator_copy_manager_.Copy(
tensorstore::span<const Index>(position_, iteration_shape.size()),
{block_size, inner_block_size}, ©_status)) {
return GetElementCopyErrorStatus(std::move(copy_status));
}
block_size = StepBufferPositionForward(
iteration_shape.first(iteration_shape.size() - 1), block_size,
outer_block_size, position_);
}
}
return absl::OkStatus();
}
}
} | #include "tensorstore/internal/nditerable_copy.h"
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::dtype_v;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::Shared;
using ::tensorstore::internal::GetElementwiseInputTransformNDIterable;
using ::tensorstore::internal::GetElementwiseOutputTransformNDIterable;
using ::tensorstore::internal::GetTransformedArrayNDIterable;
TEST(NDIterableCopyTest, Example) {
auto source_array = MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
auto dest_array = tensorstore::AllocateArray<int>(
{2, 3}, tensorstore::c_order, tensorstore::value_init);
auto dest_element_transform = [](const int* source, int* dest, void* arg) {
auto* status = static_cast<absl::Status*>(arg);
if (*source == 5) {
*status = absl::UnknownError("5");
return false;
}
*dest = *source;
return true;
};
tensorstore::internal::ElementwiseClosure<2, void*> dest_closure =
tensorstore::internal::SimpleElementwiseFunction<
decltype(dest_element_transform)(const int, int),
void*>::Closure(&dest_element_transform);
tensorstore::internal::Arena arena;
auto source_iterable =
GetTransformedArrayNDIterable(source_array, &arena).value();
auto dest_iterable = GetElementwiseOutputTransformNDIterable(
GetTransformedArrayNDIterable(dest_array, &arena).value(), dtype_v<int>,
dest_closure, &arena);
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *dest_iterable, dest_array.shape(),
tensorstore::c_order, &arena);
EXPECT_EQ(absl::UnknownError("5"), copier.Copy());
EXPECT_EQ(MakeArray<int>({{1, 2, 3}, {4, 0, 0}}), dest_array);
}
template <typename IntermediateElement, typename SourceArray,
typename SourceElementTransform, typename DestElementTransform,
typename DestArray>
absl::Status TestCopy(tensorstore::IterationConstraints constraints,
SourceArray source_array,
SourceElementTransform source_element_transform,
DestElementTransform dest_element_transform,
DestArray dest_array) {
tensorstore::internal::Arena arena;
tensorstore::internal::ElementwiseClosure<2, void*> source_closure =
tensorstore::internal::SimpleElementwiseFunction<
SourceElementTransform(typename SourceArray::Element,
IntermediateElement),
void*>::Closure(&source_element_transform);
tensorstore::internal::ElementwiseClosure<2, void*> dest_closure =
tensorstore::internal::SimpleElementwiseFunction<
DestElementTransform(IntermediateElement,
typename DestArray::Element),
void*>::Closure(&dest_element_transform);
auto source_iterable = GetElementwiseInputTransformNDIterable(
{{GetTransformedArrayNDIterable(source_array, &arena).value()}},
dtype_v<IntermediateElement>, source_closure, &arena);
auto dest_iterable = GetElementwiseOutputTransformNDIterable(
GetTransformedArrayNDIterable(dest_array, &arena).value(),
dtype_v<IntermediateElement>, dest_closure, &arena);
return tensorstore::internal::NDIterableCopier(
*source_iterable, *dest_iterable, dest_array.shape(), constraints,
&arena)
.Copy();
}
TEST(NDIterableCopyTest, ExternalBuffer) {
for (const bool indexed_source : {false, true}) {
for (const bool indexed_dest : {false, true}) {
SCOPED_TRACE(absl::StrCat("indexed_source=", indexed_source,
", indexed_dest=", indexed_dest)
.c_str());
auto source = tensorstore::MakeArray<int>({{1, 2, 3}, {4, 5, 6}});
tensorstore::TransformedArray<Shared<const int>> tsource = source;
if (indexed_source) {
tsource = (source |
tensorstore::Dims(0, 1).OuterIndexArraySlice(
MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2})))
.value();
}
auto dest = tensorstore::AllocateArray<double>(source.shape());
tensorstore::TransformedArray<Shared<double>> tdest = dest;
if (indexed_dest) {
tdest =
(dest | tensorstore::Dims(0, 1).OuterIndexArraySlice(
MakeArray<Index>({0, 1}), MakeArray<Index>({0, 1, 2})))
.value();
}
EXPECT_EQ(absl::OkStatus(),
(TestCopy<unsigned int>(
{}, tsource,
[](const int* source, unsigned int* dest, void* status) {
*dest = *source * 2;
},
[](const unsigned int* source, double* dest, void* status) {
*dest = *source + 100.0;
},
tdest)));
EXPECT_EQ(tensorstore::MakeArray<double>(
{{102.0, 104.0, 106.0}, {108.0, 110.0, 112.0}}),
dest);
}
}
}
class MaybeUnitBlockSizeTest : public ::testing::TestWithParam<bool> {
public:
MaybeUnitBlockSizeTest() {
#ifndef NDEBUG
tensorstore::internal::SetNDIterableTestUnitBlockSize(GetParam());
#endif
}
~MaybeUnitBlockSizeTest() {
#ifndef NDEBUG
tensorstore::internal::SetNDIterableTestUnitBlockSize(false);
#endif
}
};
INSTANTIATE_TEST_SUITE_P(NormalBlockSize, MaybeUnitBlockSizeTest,
::testing::Values(false));
#ifndef NDEBUG
INSTANTIATE_TEST_SUITE_P(UnitBlockSize, MaybeUnitBlockSizeTest,
::testing::Values(true));
#endif
TEST_P(MaybeUnitBlockSizeTest, InnerIndexArray) {
constexpr size_t length = 5000;
auto source = tensorstore::AllocateArray<int>({length});
auto dest = tensorstore::AllocateArray<int>({length});
auto expected = tensorstore::AllocateArray<int>({length});
auto indices = tensorstore::AllocateArray<int64_t>({length});
for (int i = 0; i < length; ++i) {
source(i) = -i;
dest(i) = 42;
indices(i) = length - 1 - i;
expected(i) = -(length - 1 - i);
}
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
tensorstore::TransformedArray<Shared<const int>> tsource,
source | tensorstore::Dims(0).IndexArraySlice(indices));
tensorstore::TransformedArray<Shared<int>> tdest = dest;
tensorstore::internal::Arena arena;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto source_iterable, GetTransformedArrayNDIterable(tsource, &arena));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto dest_iterable, GetTransformedArrayNDIterable(tdest, &arena));
TENSORSTORE_ASSERT_OK(tensorstore::internal::NDIterableCopier(
*source_iterable, *dest_iterable, dest.shape(),
{}, &arena)
.Copy());
EXPECT_EQ(expected, dest);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_copy.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_copy_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
9234e0ca-6ac8-40ec-8b17-74532a22dca1 | cpp | tensorflow/tensorflow | hlo_creation_utils | third_party/xla/xla/service/hlo_creation_utils.cc | third_party/xla/xla/service/hlo_creation_utils_test.cc | #include "xla/service/hlo_creation_utils.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/lib/comparators.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/builder/xla_computation.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
using absl::StrCat;
absl::StatusOr<HloInstruction*> MakeUnaryHlo(HloOpcode opcode,
HloInstruction* operand,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape unary_op_shape,
ShapeInference::InferUnaryOpShape(opcode, operand));
return computation->AddInstruction(
HloInstruction::CreateUnary(unary_op_shape, opcode, operand), metadata);
}
HloInstruction* MakeCopyHlo(HloInstruction* from, const Shape& to) {
return from->AddInstruction(
HloInstruction::CreateUnary(to, HloOpcode::kCopy, from));
}
absl::StatusOr<HloInstruction*> MakeBinaryHlo(
HloOpcode opcode, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(Shape binary_op_shape,
ShapeInference::InferBinaryOpShape(opcode, lhs, rhs));
return computation->AddInstruction(
HloInstruction::CreateBinary(binary_op_shape, opcode, lhs, rhs), metadata,
frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeCompareHlo(
ComparisonDirection direction, HloInstruction* lhs, HloInstruction* rhs,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
Shape binary_op_shape,
ShapeInference::InferBinaryOpShape(HloOpcode::kCompare, lhs, rhs));
return computation->AddInstruction(
HloInstruction::CreateCompare(binary_op_shape, lhs, rhs, direction),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakePadHlo(
HloInstruction* operand, HloInstruction* padding_value,
const PaddingConfig& padding_config, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, padding_value->parent());
TF_ASSIGN_OR_RETURN(
Shape pad_shape,
ShapeInference::InferPadShape(operand->shape(), padding_value->shape(),
padding_config));
return computation->AddInstruction(
HloInstruction::CreatePad(pad_shape, operand, padding_value,
padding_config),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeSliceHlo(
HloInstruction* operand, absl::Span<const int64_t> start_indices,
absl::Span<const int64_t> limit_indices, absl::Span<const int64_t> strides,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape slice_shape, ShapeInference::InferSliceShape(
operand->shape(), start_indices,
limit_indices, strides));
return computation->AddInstruction(
HloInstruction::CreateSlice(slice_shape, operand, start_indices,
limit_indices, strides),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeConvolveHlo(
HloInstruction* lhs, HloInstruction* rhs, int64_t feature_group_count,
int64_t batch_group_count, const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(
Shape convolve_shape,
ShapeInference::InferConvolveShape(
lhs->shape(), rhs->shape(), feature_group_count, batch_group_count,
window, dimension_numbers, preferred_element_type));
return computation->AddInstruction(
HloInstruction::CreateConvolve(
convolve_shape, lhs, rhs, feature_group_count, batch_group_count,
window, dimension_numbers, precision_config),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeTransposeHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions) {
TF_ASSIGN_OR_RETURN(
Shape transpose_shape,
ShapeInference::InferTransposeShape(operand->shape(), dimensions));
return operand->AddInstruction(
HloInstruction::CreateTranspose(transpose_shape, operand, dimensions));
}
absl::StatusOr<HloInstruction*> MakeReshapeHlo(const Shape& result_shape,
HloInstruction* operand) {
return operand->AddInstruction(
HloInstruction::CreateReshape(result_shape, operand));
}
absl::StatusOr<HloInstruction*> MakeReshapeHlo(
absl::Span<const int64_t> result_shape_dim_bounds,
HloInstruction* operand) {
Shape new_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_dim_bounds);
return MakeReshapeHlo(new_shape, operand);
}
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, absl::Span<HloInstruction* const> start_indices,
absl::Span<const int64_t> slice_sizes, const OpMetadata* metadata) {
if (start_indices.empty() || slice_sizes.empty()) {
return operand;
}
HloComputation* computation = operand->parent();
std::vector<Shape> scalar_start_indices_shapes(
start_indices.size(),
ShapeUtil::MakeShape(start_indices[0]->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_slice_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(), scalar_start_indices_shapes, slice_sizes));
return computation->AddInstruction(
HloInstruction::CreateDynamicSlice(dynamic_slice_shape, operand,
start_indices, slice_sizes),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicSliceHlo(
HloInstruction* operand, HloInstruction* start_indices,
absl::Span<const int64_t> slice_sizes, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, start_indices->parent());
int64_t rank = start_indices->shape().dimensions(0);
std::vector<HloInstruction*> scalar_start_indices;
for (int i = 0; i < rank; ++i) {
auto slice = computation->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {1}),
start_indices, {i}, {i + 1}, {1}));
scalar_start_indices.push_back(
computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {}),
slice)));
}
std::vector<Shape> scalar_start_indices_shapes(
rank, ShapeUtil::MakeShape(start_indices->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_slice_shape,
ShapeInference::InferDynamicSliceShape(
operand->shape(), scalar_start_indices_shapes, slice_sizes));
return computation->AddInstruction(
HloInstruction::CreateDynamicSlice(dynamic_slice_shape, operand,
scalar_start_indices, slice_sizes),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
HloInstruction* start_indices, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, update->parent());
CHECK_EQ(computation, start_indices->parent());
int64_t rank = start_indices->shape().dimensions(0);
std::vector<HloInstruction*> scalar_start_indices;
for (int i = 0; i < rank; ++i) {
auto slice = computation->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {1}),
start_indices, {i}, {i + 1}, {1}));
scalar_start_indices.push_back(
computation->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(start_indices->shape().element_type(), {}),
slice)));
}
std::vector<Shape> scalar_start_indices_shapes(
rank, ShapeUtil::MakeShape(start_indices->shape().element_type(), {}));
TF_ASSIGN_OR_RETURN(
Shape dynamic_update_slice_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(), scalar_start_indices_shapes));
return computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(
dynamic_update_slice_shape, operand, update, scalar_start_indices),
metadata);
}
absl::StatusOr<HloInstruction*> MakeDynamicUpdateSliceHlo(
HloInstruction* operand, HloInstruction* update,
absl::Span<HloInstruction* const> start_indices,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
CHECK_EQ(computation, update->parent());
std::vector<Shape> scalar_start_indices_shapes;
scalar_start_indices_shapes.reserve(start_indices.size());
for (auto start_index : start_indices) {
scalar_start_indices_shapes.push_back(start_index->shape());
}
TF_ASSIGN_OR_RETURN(
Shape dynamic_update_slice_shape,
ShapeInference::InferDynamicUpdateSliceShape(
operand->shape(), update->shape(), scalar_start_indices_shapes));
return computation->AddInstruction(
HloInstruction::CreateDynamicUpdateSlice(dynamic_update_slice_shape,
operand, update, start_indices),
metadata);
}
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
absl::Span<const int64_t> result_shape_bounds, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
Shape broadcast_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
result_shape_bounds);
return MakeBroadcastHlo(operand, broadcast_dimensions, broadcast_shape,
metadata, frontend_attributes);
}
HloInstruction* MakeBroadcastHlo(
HloInstruction* operand, absl::Span<const int64_t> broadcast_dimensions,
const Shape& shape, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = operand->parent();
return computation->AddInstruction(
HloInstruction::CreateBroadcast(shape, operand, broadcast_dimensions),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeGetTupleElementHlo(
HloInstruction* operand, int64_t index, const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(
Shape gte_shape,
ShapeInference::InferGetTupleElementShape(operand->shape(), index));
return computation->AddInstruction(
HloInstruction::CreateGetTupleElement(gte_shape, operand, index),
metadata);
}
absl::StatusOr<HloInstruction*> MakeConcatHlo(
absl::Span<HloInstruction* const> operands, int64_t dimension,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
CHECK_GT(operands.size(), 0);
HloComputation* computation = operands[0]->parent();
CHECK(absl::c_all_of(operands, [&](HloInstruction* instr) {
return instr->parent() == computation;
}));
std::vector<const Shape*> operand_shapes;
absl::c_transform(operands, std::back_inserter(operand_shapes),
[](HloInstruction* instr) { return &instr->shape(); });
TF_ASSIGN_OR_RETURN(Shape concat_shape, ShapeInference::InferConcatOpShape(
operand_shapes, dimension));
return computation->AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, operands, dimension),
metadata, frontend_attributes);
}
HloInstruction* MakeConvertToHlo(HloInstruction* hlo, PrimitiveType type,
const OpMetadata* metadata) {
if (hlo->shape().element_type() == type) {
return hlo;
}
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
if (primitive_util::IsSubByteNonPredType(shape.element_type())) {
shape.mutable_layout()->set_element_size_in_bits(
primitive_util::BitWidth(shape.element_type()));
} else {
shape.mutable_layout()->set_element_size_in_bits(0);
}
hlo = hlo->parent()->AddInstruction(HloInstruction::CreateConvert(shape, hlo),
metadata);
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
HloInstruction* MakeBitcastHlo(HloInstruction* hlo, const Shape& shape,
const OpMetadata* metadata) {
return hlo->parent()->AddInstruction(
HloInstruction::CreateBitcast(shape, hlo), metadata);
}
HloInstruction* MakeBitcastConvertToHlo(HloInstruction* hlo, PrimitiveType type,
const OpMetadata* metadata) {
if (hlo->shape().element_type() == type) {
return hlo;
}
Shape shape = ShapeUtil::ChangeElementType(hlo->shape(), type);
if (type == PRED || hlo->shape().element_type() == PRED) {
return MakeConvertToHlo(hlo, type);
}
hlo = hlo->parent()->AddInstruction(
HloInstruction::CreateBitcastConvert(shape, hlo), metadata);
CHECK_EQ(hlo->shape().element_type(), type);
return hlo;
}
HloInstruction* MakeIotaHlo(HloComputation* computation, const Shape& shape,
int64_t iota_dimension) {
return computation->AddInstruction(
HloInstruction::CreateIota(shape, iota_dimension));
}
absl::StatusOr<HloInstruction*> MakeDotHlo(
HloInstruction* lhs, HloInstruction* rhs,
const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config,
std::optional<PrimitiveType> preferred_element_type,
std::vector<SparsityDescriptor> sparsity,
absl::Span<HloInstruction* const> sparse_meta, const OpMetadata* metadata) {
HloComputation* computation = lhs->parent();
CHECK_EQ(computation, rhs->parent());
TF_ASSIGN_OR_RETURN(Shape dot_shape,
ShapeInference::InferDotOpShape(
lhs->shape(), rhs->shape(), dim_numbers,
preferred_element_type, absl::MakeSpan(sparsity)));
return computation->AddInstruction(
HloInstruction::CreateDot(dot_shape, lhs, rhs, dim_numbers,
precision_config, sparsity, sparse_meta),
metadata);
}
absl::StatusOr<HloInstruction*> MakeMapHlo(
absl::Span<HloInstruction* const> operands, HloComputation* map_computation,
const OpMetadata* metadata) {
CHECK(!operands.empty()) << "Map Hlo requires at least one operand.";
HloComputation* computation = operands.front()->parent();
std::vector<const Shape*> operand_shapes;
int64_t max_operand_rank = 0;
for (const HloInstruction* operand : operands) {
CHECK_EQ(computation, operand->parent());
operand_shapes.push_back(&operand->shape());
max_operand_rank = std::max(max_operand_rank, operand->shape().rank());
}
std::vector<int64_t> map_dims(max_operand_rank);
std::iota(map_dims.begin(), map_dims.end(), 0);
TF_ASSIGN_OR_RETURN(
Shape map_shape,
ShapeInference::InferMapShape(
operand_shapes, map_computation->ComputeProgramShape(), map_dims));
return computation->AddInstruction(
HloInstruction::CreateMap(map_shape, operands, map_computation),
metadata);
}
HloInstruction* MakeReducePrecisionHlo(HloInstruction* operand,
int exponent_bits, int mantissa_bits,
const OpMetadata* metadata) {
return operand->parent()->AddInstruction(
HloInstruction::CreateReducePrecision(operand->shape(), operand,
exponent_bits, mantissa_bits),
metadata);
}
namespace {
static HloComputation* MakeBinaryScalarComputation(HloOpcode binary_opcode,
PrimitiveType dtype,
HloInstruction* ctx,
HloModule* module) {
CHECK_NE(ctx, nullptr);
HloComputation::Builder b(
absl::StrCat(ctx->name(), ".reduce_sub_computation"));
const Shape scalar_shape = ShapeUtil::MakeShape(dtype, {});
HloInstruction* lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
HloInstruction* rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
b.AddInstruction(
HloInstruction::CreateBinary(scalar_shape, binary_opcode, lhs, rhs));
CHECK_NE(module, nullptr);
return module->AddEmbeddedComputation(b.Build());
}
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloComputation* reduce_computation,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
auto scalar_shape = ShapeUtil::MakeShape(operand->shape().element_type(), {});
auto result_shape = ShapeUtil::DeleteDimensions(dimensions, operand->shape());
return operand->parent()->AddInstruction(
HloInstruction::CreateReduce(result_shape, operand, init_value,
dimensions, reduce_computation),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReduceWindowHlo(
HloInstruction* operand, HloInstruction* init_value, const Window& window,
HloComputation* reduce_computation, const OpMetadata* metadata) {
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferReduceWindowShape(
operand->shape(), init_value->shape(), window,
reduce_computation->ComputeProgramShape()));
return operand->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(inferred_shape, operand, init_value,
window, reduce_computation),
metadata);
}
absl::StatusOr<HloInstruction*> MakeReduceWindowHlo(
HloInstruction* operand, HloInstruction* init_value, const Window& window,
HloOpcode binary_opcode, const OpMetadata* metadata) {
HloComputation* reduce_computation = MakeBinaryScalarComputation(
binary_opcode, operand->shape().element_type(), operand,
operand->GetModule());
TF_ASSIGN_OR_RETURN(Shape inferred_shape,
ShapeInference::InferReduceWindowShape(
operand->shape(), init_value->shape(), window,
reduce_computation->ComputeProgramShape()));
return operand->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(inferred_shape, operand, init_value,
window, reduce_computation),
metadata);
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
absl::Span<const int64_t> dimensions, HloOpcode binary_opcode,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
HloComputation* reduce_computation = MakeBinaryScalarComputation(
binary_opcode, operand->shape().element_type(), operand,
operand->GetModule());
return MakeReduceHlo(operand, init_value, dimensions, reduce_computation,
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
HloInstruction* operand, HloInstruction* init_value,
HloOpcode binary_opcode, HloModule* module, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
DCHECK_NE(nullptr, module);
std::vector<int64_t> all_dims(operand->shape().rank());
std::iota(all_dims.begin(), all_dims.end(), 0);
HloComputation* reduce_computation = MakeBinaryScalarComputation(
binary_opcode, operand->shape().element_type(), operand, module);
return MakeReduceHlo(operand, init_value, all_dims, reduce_computation,
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReduceHlo(
absl::Span<HloInstruction* const> operands,
absl::Span<HloInstruction* const> init_values,
absl::Span<const int64_t> dimensions, HloComputation* reduce_computation,
const OpMetadata* metadata, const FrontendAttributes* frontend_attributes) {
CHECK(!operands.empty());
CHECK_EQ(operands.size(), init_values.size());
auto root = reduce_computation->root_instruction();
if (root->shape().IsTuple()) {
CHECK_EQ(root->shape().tuple_shapes_size(), operands.size());
} else {
CHECK_EQ(operands.size(), 1);
}
std::vector<Shape> expected_shapes;
for (auto operand : operands) {
expected_shapes.push_back(ShapeUtil::FilterDimensions(
[&](const int64_t dim) {
return !absl::c_linear_search(dimensions, dim);
},
operand->shape()));
}
auto output_shape = ShapeUtil::MakeMaybeTupleShape(expected_shapes);
return operands[0]->parent()->AddInstruction(
HloInstruction::CreateReduce(output_shape, operands, init_values,
dimensions, reduce_computation),
metadata, frontend_attributes);
}
absl::StatusOr<HloInstruction*> MakeReverseHlo(
HloInstruction* operand, absl::Span<const int64_t> dimensions,
const OpMetadata* metadata) {
HloComputation* computation = operand->parent();
TF_ASSIGN_OR_RETURN(Shape reverse_shape, ShapeInference::InferReverseShape(
operand->shape(), dimensions));
return computation->AddInstruction(
HloInstruction::CreateReverse(reverse_shape, operand, dimensions),
metadata);
}
absl::StatusOr<HloInstruction*> MakeSelectHlo(
HloInstruction* pred, HloInstruction* on_true, HloInstruction* on_false,
HloInstruction* derived_from, const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes) {
HloComputation* computation = pred->parent();
DCHECK_EQ(computation, on_true->parent());
DCHECK_EQ(computation, on_false->parent());
Shape op_shape = on_true->shape();
if (ShapeUtil::IsScalar(pred->shape())) {
if (!ShapeUtil::IsScalar(op_shape) && !op_shape.IsTuple()) {
pred = computation->AddInstruction(
HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(op_shape, PrimitiveType::PRED), pred,
{}),
metadata, frontend_attributes);
if (derived_from) {
derived_from->SetupDerivedInstruction(pred);
}
}
}
TF_RET_CHECK(!op_shape.IsTuple());
HloOpcode select_op_code = HloOpcode::kSelect;
TF_ASSIGN_OR_RETURN(Shape select_shape,
ShapeInference::InferTernaryOpShape(select_op_code, pred,
on_true, on_false));
HloInstruction* select = computation->AddInstruction(
HloInstruction::CreateTernary(select_shape, select_op_code, pred, on_true,
on_false),
metadata, frontend_attributes);
if (derived_from) {
derived_from->SetupDerivedInstruction(select);
}
return select;
}
HloInstruction* MaybeMakeTuple(absl::Span<HloInstruction* const> operands) {
CHECK(!operands.empty());
if (operands.size() == 1) {
return operands[0];
}
return operands[0]->parent()->AddInstruction(
HloInstruction::CreateTuple(operands));
}
absl::StatusOr<HloInstruction*> MakeSortHlo(
const Shape& sort_shape, absl::Span<HloInstruction* const> operands,
int64_t dimension_to_sort, bool is_stable, HloComputation::Builder* builder,
HloModule* module, const OpMetadata* metadata) {
CHECK(!operands.empty()) << "Sort Hlo requires at least one operand.";
HloComputation* compare_computation;
XlaBuilder b("Sort.Compare");
if (metadata != nullptr) {
b.SetOpMetadata(*metadata);
}
std::vector<PrimitiveType> operand_types(operands.size());
for (int64_t i = 0; i < operands.size(); ++i) {
operand_types[i] = operands[i]->shape().element_type();
}
XlaComputation comparator = CreateScalarLtComputation(operand_types, &b);
TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comparator.GetProgramShape());
HloModuleConfig config(program_shape);
TF_ASSIGN_OR_RETURN(auto new_module,
HloModule::CreateFromProto(comparator.proto(), config));
HloCloneContext context(module);
compare_computation =
module->DeepCloneComputation(new_module->entry_computation(), &context);
return builder->AddInstruction(HloInstruction::CreateSort(
sort_shape, dimension_to_sort, operands, compare_computation, is_stable));
}
absl::StatusOr<HloInstruction*> CollapseFirstNDims(HloInstruction* operand,
int64_t n) {
CHECK_GT(n, 0);
const Shape& operand_shape = operand->shape();
CHECK_GE(operand_shape.dimensions_size(), n);
int64_t new_shape_leading_bound = 1;
bool new_shape_leading_is_dynamic = false;
for (int64_t i = 0; i < n; i++) {
new_shape_leading_bound *= operand_shape.dimensions(i);
new_shape_leading_is_dynamic |= operand_shape.is_dynamic_dimension(i);
}
std::vector<int64_t> new_shape_dims;
new_shape_dims.reserve(operand_shape.dimensions_size() - n + 1);
new_shape_dims.push_back(new_shape_leading_bound);
std::copy(operand_shape.dimensions().begin() + n,
operand_shape.dimensions().end(),
std::back_inserter(new_shape_dims));
std::vector<bool> new_shape_dynamic_dims;
new_shape_dynamic_dims.reserve(operand_shape.dimensions_size() - n + 1);
new_shape_dynamic_dims.push_back(new_shape_leading_is_dynamic);
std::copy(operand_shape.dynamic_dimensions().begin() + n,
operand_shape.dynamic_dimensions().end(),
std::back_inserter(new_shape_dynamic_dims));
Shape output_shape = ShapeUtil::MakeShape(
operand_shape.element_type(), new_shape_dims, new_shape_dynamic_dims);
return MakeReshapeHlo(output_shape, operand);
}
absl::StatusOr<HloInstruction*> PrependDegenerateDims(HloInstruction* operand,
int64_t n) {
CHECK_GT(n, 0);
std::vector<int64_t> new_shape_dims;
const Shape& operand_shape = operand->shape();
new_shape_dims.reserve(n + operand_shape.dimensions_size());
new_shape_dims.insert(new_shape_dims.begin(), n, 1);
absl::c_copy(operand_shape.dimensions(), std::back_inserter(new_shape_dims));
return MakeReshapeHlo(new_shape_dims, operand);
}
absl::StatusOr<HloInstruction*> ExpandFirstDimIntoNDims(
HloInstruction* operand, absl::Span<const int64_t> expanded_dims) {
CHECK_GT(operand->shape().dimensions_size(), 0);
CHECK_EQ(operand->shape().dimensions(0), Product(expanded_dims));
std::vector<int64_t> expanded_shape_dim_bounds;
expanded_shape_dim_bounds.reserve(expanded_dims.size() +
operand->shape().dimensions_size() - 1);
absl::c_copy(expanded_dims, std::back_inserter(expanded_shape_dim_bounds));
std::copy(operand->shape().dimensions().begin() + 1,
operand->shape().dimensions().end(),
std::back_inserter(expanded_shape_dim_bounds));
Shape new_shape = ShapeUtil::MakeShape(operand->shape().element_type(),
expanded_shape_dim_bounds);
return MakeReshapeHlo(new_shape, operand);
}
absl::StatusOr<HloInstruction*> ElideDegenerateDims(
HloInstruction* operand, absl::Span<const int64_t> dims_to_elide) {
return MakeReshapeHlo(ShapeUtil::FilterDimensions(
[&](int64_t dim) {
return !absl::c_linear_search(dims_to_elide, dim);
},
operand->shape()),
operand);
}
absl::StatusOr<HloInstruction*> InsertDegenerateDims(
HloInstruction* operand, absl::Span<const int64_t> dims_to_insert) {
CHECK(absl::c_is_sorted(dims_to_insert));
const Shape& operand_shape = operand->shape();
int64_t output_shape_rank =
operand_shape.dimensions_size() + dims_to_insert.size();
for (auto dim_to_insert : dims_to_insert) {
CHECK_LT(dim_to_insert, output_shape_rank);
}
std::vector<int64_t> output_shape_dim_bounds;
output_shape_dim_bounds.reserve(output_shape_rank);
int64_t operand_dims_idx = 0;
int64_t dims_to_insert_idx = 0;
for (int64_t i = 0; i < output_shape_rank; ++i) {
if (dims_to_insert_idx < dims_to_insert.size() &&
i == dims_to_insert[dims_to_insert_idx]) {
output_shape_dim_bounds.push_back(1);
++dims_to_insert_idx;
} else {
output_shape_dim_bounds.push_back(
operand_shape.dimensions(operand_dims_idx));
++operand_dims_idx;
}
}
Shape output_shape = ShapeUtil::MakeShape(operand_shape.element_type(),
output_shape_dim_bounds);
return MakeReshapeHlo(output_shape, operand);
}
absl::StatusOr<HloInstruction*> PadVectorWithZeros(HloInstruction* operand,
int64_t zeros_to_prepend,
int64_t zeros_to_append) {
HloComputation* computation = operand->parent();
CHECK_EQ(operand->shape().dimensions_size(), 1);
PaddingConfig padding_config;
PaddingConfig::PaddingConfigDimension padding_config_dim;
padding_config_dim.set_edge_padding_low(zeros_to_prepend);
padding_config_dim.set_edge_padding_high(zeros_to_append);
*padding_config.add_dimensions() = padding_config_dim;
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(operand->shape().element_type())));
return MakePadHlo(operand, zero, padding_config);
}
HloInstruction* BroadcastZeros(HloComputation* computation,
PrimitiveType element_type,
absl::Span<const int64_t> broadcast_dimensions) {
return BroadcastZeros(
computation, ShapeUtil::MakeShape(element_type, broadcast_dimensions));
}
HloInstruction* BroadcastZeros(HloComputation* computation,
const Shape& broadcast_shape) {
HloInstruction* zero =
computation->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(broadcast_shape.element_type())));
return MakeBroadcastHlo(zero, {}, broadcast_shape);
}
HloInstruction* BroadcastOnes(HloComputation* computation,
PrimitiveType element_type,
absl::Span<const int64_t> broadcast_dimensions) {
HloInstruction* one = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(element_type)));
return MakeBroadcastHlo(one, {},
broadcast_dimensions);
}
absl::StatusOr<HloInstruction*> MakeFusionInstruction(
HloInstruction* fused, HloInstruction::FusionKind kind) {
HloComputation* comp = fused->parent();
HloInstruction* fusion_instruction = comp->AddInstruction(
HloInstruction::CreateFusion(fused->shape(), kind, fused));
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(fused, fusion_instruction));
return fusion_instruction;
}
HloInstruction* CreateDummyOp(HloComputation::Builder* b, const Shape& shape) {
if (shape.IsArray()) {
auto zero = b->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(shape.element_type())));
return b->AddInstruction(HloInstruction::CreateBroadcast(shape, zero, {}));
}
CHECK(shape.IsTuple());
std::vector<HloInstruction*> sub_instructions;
for (const Shape& subshape : shape.tuple_shapes()) {
sub_instructions.push_back(CreateDummyOp(b, subshape));
}
return b->AddInstruction(HloInstruction::CreateTuple(sub_instructions));
}
absl::StatusOr<std::unique_ptr<HloComputation>> CreateComputationWithSignature(
absl::Span<const Shape* const> domain, const Shape& range,
absl::string_view name) {
HloComputation::Builder b{name};
int64_t param_idx = 0;
for (const Shape* param_shape : domain) {
b.AddInstruction(HloInstruction::CreateParameter(
param_idx, *param_shape, StrCat("param.", param_idx)));
param_idx++;
}
CreateDummyOp(&b, range);
return b.Build();
}
HloInstruction* CreateDegenerateRemovingReshape(HloInstruction* hlo,
const int64_t index_to_remove) {
Shape input_shape = hlo->shape();
std::vector<int64_t> dims;
dims.reserve(input_shape.rank() - 1);
for (int64_t index = 0; index < input_shape.rank(); index++) {
if (index == index_to_remove) {
continue;
}
int64_t dim_size = input_shape.dimensions(index);
dims.push_back(dim_size);
}
Shape new_shape = ShapeUtil::MakeShape(input_shape.element_type(), dims);
return hlo->AddInstruction(HloInstruction::CreateReshape(new_shape, hlo));
}
HloInstruction* CreateDegenerateAddingReshape(HloInstruction* hlo,
const int index_to_add) {
Shape input_shape = hlo->shape();
std::vector<int64_t> dims;
dims.reserve(input_shape.rank() - 1);
for (int64_t index = 0; index < input_shape.rank(); index++) {
if (index == index_to_add) {
dims.push_back(1);
}
int64_t dim_size = input_shape.dimensions(index);
dims.push_back(dim_size);
}
if (index_to_add == input_shape.rank()) {
dims.push_back(1);
}
Shape new_shape = ShapeUtil::MakeShape(input_shape.element_type(), dims);
return hlo->AddInstruction(HloInstruction::CreateReshape(new_shape, hlo));
}
HloInstruction* ExpandDegenerateReshape(HloInstruction* inst) {
std::optional<ShapeUtil::ShapeEqualityDescriptor> reshape_degenerate =
inst->ReshapeMerelyInsertsOrDeletes1SizedDimensions();
if (reshape_degenerate.has_value()) {
if (reshape_degenerate->deleted_dimensions.empty() &&
reshape_degenerate->inserted_dimensions.size() == 1) {
return nullptr;
}
if (reshape_degenerate->inserted_dimensions.empty() &&
reshape_degenerate->deleted_dimensions.size() == 1) {
return nullptr;
}
absl::c_reverse(reshape_degenerate->deleted_dimensions);
HloInstruction* degenerate_removing_hlo = nullptr;
if (!reshape_degenerate->deleted_dimensions.empty()) {
degenerate_removing_hlo = CreateDegenerateRemovingReshape(
inst->mutable_operand(0), reshape_degenerate->deleted_dimensions[0]);
for (int64_t r = 1; r < reshape_degenerate->deleted_dimensions.size();
r++) {
degenerate_removing_hlo = CreateDegenerateRemovingReshape(
degenerate_removing_hlo, reshape_degenerate->deleted_dimensions[r]);
}
}
HloInstruction* degenerate_adding_hlo = degenerate_removing_hlo != nullptr
? degenerate_removing_hlo
: inst->mutable_operand(0);
if (!reshape_degenerate->inserted_dimensions.empty()) {
for (int64_t a = 0; a < reshape_degenerate->inserted_dimensions.size();
a++) {
degenerate_adding_hlo = CreateDegenerateAddingReshape(
degenerate_adding_hlo, reshape_degenerate->inserted_dimensions[a]);
}
}
return degenerate_adding_hlo;
}
return nullptr;
}
} | #include "xla/service/hlo_creation_utils.h"
#include <memory>
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
namespace m = match;
class HloCreationUtilsTest : public HloTestBase {
protected:
std::unique_ptr<VerifiedHloModule> CreateModuleWithProgramShape(
PrimitiveType primitive_type, absl::Span<const int64_t> input_shape_dims,
absl::Span<const int64_t> output_shape_dims, HloInstruction** param,
HloComputation** entry_computation) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type, output_shape_dims);
auto module = CreateNewVerifiedModule("test");
*entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape}, output_shape, "entry")
.value());
*param = (*entry_computation)->parameter_instruction(0);
return module;
}
std::unique_ptr<VerifiedHloModule> CreateModuleWithProgramShape(
PrimitiveType primitive_type, absl::Span<const int64_t> input_shape_dims,
absl::Span<const int64_t> output_shape_dims, HloInstruction** param,
HloComputation** entry_computation, PrimitiveType primitive_type_output) {
Shape input_shape = ShapeUtil::MakeShape(primitive_type, input_shape_dims);
Shape output_shape =
ShapeUtil::MakeShape(primitive_type_output, output_shape_dims);
auto module = CreateNewVerifiedModule("test");
*entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape}, output_shape, "entry")
.value());
*param = (*entry_computation)->parameter_instruction(0);
return module;
}
};
TEST_F(HloCreationUtilsTest, CollapseFirst1Dim) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{2}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_1_dims_collapsed,
CollapseFirstNDims(param, 1));
entry_computation->set_root_instruction(first_1_dims_collapsed);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({3, 4})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR1<int32_t>({3, 4}));
}
TEST_F(HloCreationUtilsTest, CollapseFirst2Dims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(
S32, {2, 3, 2}, {6, 2}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_2_dims_collapsed,
CollapseFirstNDims(param, 2));
entry_computation->set_root_instruction(first_2_dims_collapsed);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}},
{{-1, -2}, {-3, -4}, {-5, -6}}})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<int32_t>(
{{1, 2}, {3, 4}, {5, 6}, {-1, -2}, {-3, -4}, {-5, -6}}));
}
TEST_F(HloCreationUtilsTest, Prepend1DegenerateDim) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_1_degenerate_dim_prepended,
PrependDegenerateDims(param, 1));
entry_computation->set_root_instruction(with_1_degenerate_dim_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({9, 10})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{9, 10}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{1, 1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_2_degenerate_dims_prepended,
PrependDegenerateDims(param, 2));
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({9, 10})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR3<int32_t>({{{9, 10}}}));
}
TEST_F(HloCreationUtilsTest, Prepend2DegenerateDimsToScalar) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{1, 1},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * with_2_degenerate_dims_prepended,
PrependDegenerateDims(param, 2));
entry_computation->set_root_instruction(with_2_degenerate_dims_prepended);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(9)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{9}}));
}
TEST_F(HloCreationUtilsTest, ExpandFirstDimInto3Dims) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {6},
{3, 1, 2},
¶m, &entry_computation);
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * first_dim_expanded,
ExpandFirstDimIntoNDims(param, {3, 1, 2}));
entry_computation->set_root_instruction(first_dim_expanded);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR3<int32_t>({{{1, 2}}, {{3, 4}}, {{5, 6}}}));
}
TEST_F(HloCreationUtilsTest, PadVectorWithZeros) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2},
{6}, ¶m,
&entry_computation);
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * zero_padded_param,
PadVectorWithZeros(param, 3, 1));
entry_computation->set_root_instruction(zero_padded_param);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR1<int32_t>({3, 4})}));
CHECK_EQ(result_literal, LiteralUtil::CreateR1<int32_t>({0, 0, 0, 3, 4, 0}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_S32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation);
HloInstruction* zeros =
BroadcastZeros(module->entry_computation(), S32, {2, 2});
entry_computation->set_root_instruction(zeros);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, BroadcastZeros_F32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {},
{2, 2},
¶m, &entry_computation);
HloInstruction* zeros =
BroadcastZeros(module->entry_computation(), F32, {2, 2});
entry_computation->set_root_instruction(zeros);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBitcastConvertToHlo_S32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2, 2},
{2, 2},
¶m, &entry_computation, F32);
auto* input = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})));
HloInstruction* output = MakeBitcastConvertToHlo(input, F32);
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeIotaHlo_I32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation, F32);
HloInstruction* output = MakeIotaHlo(module->entry_computation(),
ShapeUtil::MakeShape(F32, {2, 2}), 0);
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0.0)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {1.0f, 1.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBroadcast_F32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {},
{2, 2},
¶m, &entry_computation);
auto* input = MakeR0ConstantHlo<float>(module->entry_computation(), 0);
HloInstruction* output = MakeBroadcastHlo(input, {}, {2, 2});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<float>(0.0f)}));
CHECK_EQ(result_literal,
LiteralUtil::CreateR2<float>({{0.0f, 0.0f}, {0.0f, 0.0f}}));
}
TEST_F(HloCreationUtilsTest, MakeBroadcast_Shape_I32) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {},
{2, 2},
¶m, &entry_computation);
auto* input = MakeR0ConstantHlo<int32_t>(module->entry_computation(), 0);
HloInstruction* output =
MakeBroadcastHlo(input, {}, ShapeUtil::MakeShape(S32, {2, 2}));
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {LiteralUtil::CreateR0<int32_t>(0.0)}));
CHECK_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleCrashesWithEmptyOperands) {
EXPECT_DEATH(MaybeMakeTuple({}), "");
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleForwardsSingleElement) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(S32, {2, 2},
{2, 2},
¶m, &entry_computation);
HloInstruction* output = MaybeMakeTuple({param});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module,
{LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}})}));
EXPECT_EQ(result_literal, LiteralUtil::CreateR2<int32_t>({{0, 0}, {0, 0}}));
}
TEST_F(HloCreationUtilsTest, MaybeMakeTupleTuplizesMultipleOperands) {
Shape input_shape0 = ShapeUtil::MakeShape(S32, {2});
Shape input_shape1 = ShapeUtil::MakeShape(F32, {3, 3});
Shape output_shape =
ShapeUtil::MakeTupleShapeWithPtrs({&input_shape1, &input_shape0});
auto module = CreateNewVerifiedModule("test");
HloComputation* entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape0, &input_shape1},
output_shape, "entry")
.value());
HloInstruction* output =
MaybeMakeTuple({entry_computation->parameter_instruction(1),
entry_computation->parameter_instruction(0)});
entry_computation->set_root_instruction(output);
HloEvaluator evaluator;
Literal input0 = LiteralUtil::CreateR1<int32_t>({{2, 4}});
Literal input1 =
LiteralUtil::CreateR2<float>({{3, 2, 1}, {4, 5, 6}, {9, 8, 7}});
TF_ASSERT_OK_AND_ASSIGN(
Literal result_literal,
evaluator.Evaluate(*module, {input0.Clone(), input1.Clone()}));
Literal expected_result = LiteralUtil::MakeTuple({&input1, &input0});
EXPECT_EQ(result_literal, expected_result);
}
TEST_F(HloCreationUtilsTest, DynamicUpdateSliceVectorStartIndices) {
auto module = CreateNewVerifiedModule("dus-creation-test");
auto operand_array = std::make_unique<Array2D<double>>(2, 3);
operand_array->FillUnique(1.0);
auto operand_literal =
LiteralUtil::CreateR2FromArray2D<double>(*operand_array);
Shape input_shape = ShapeUtil::MakeShape(F64, {2, 3});
Shape update_shape = ShapeUtil::MakeShape(F64, {2, 2});
HloComputation* entry_computation = module->AddEntryComputation(
CreateComputationWithSignature({&input_shape, &update_shape}, input_shape,
"entry")
.value());
auto zero = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto one = module->entry_computation()->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto update = LiteralUtil::CreateR2<double>({{-2.0, -3.0}, {-6.0, -7.0}});
HloInstruction* dus =
MakeDynamicUpdateSliceHlo(entry_computation->parameter_instruction(0),
entry_computation->parameter_instruction(1),
{zero, one})
.value();
entry_computation->set_root_instruction(dus);
HloEvaluator evaluator;
TF_ASSERT_OK_AND_ASSIGN(
Literal result, evaluator.Evaluate(*module, {&operand_literal, &update}));
auto expected = LiteralUtil::CreateR2<double>({
{1, -2, -3},
{5, -6, -7},
});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloCreationUtilsTest, ExpandDegenerateReshape) {
const char* hlo_string = R"(
HloModule module
ENTRY test {
param = f32[12,1,10,32,8] parameter(0)
ROOT reshape = f32[1,12,10,1,32,1,8] reshape(param)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto expanded =
ExpandDegenerateReshape(module->entry_computation()->root_instruction());
EXPECT_THAT(expanded, GmockMatch(m::Reshape(m::Reshape(
m::Reshape(m::Reshape(m::Parameter(0)))))));
}
TEST_F(HloCreationUtilsTest, ReduceWindow) {
const Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
HloComputation* addition = [&] {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module->AddEmbeddedComputation(embedded_builder.Build());
}();
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
Shape expected_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(HloInstruction * reduce_window,
MakeReduceWindowHlo(a_param, init, window, addition));
module->entry_computation()->set_root_instruction(
reduce_window,
true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
EXPECT_EQ(module->entry_computation()->root_instruction()->shape(),
expected_output_shape);
}
TEST_F(HloCreationUtilsTest, ReduceWindowBinaryOpcode) {
const Shape scalar_shape = ShapeUtil::MakeShape(S32, {});
std::unique_ptr<HloModule> module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 4, 4});
Shape expected_output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2});
Window window;
WindowDimension* batch_dim = window.add_dimensions();
batch_dim->set_size(1);
batch_dim->set_stride(1);
batch_dim->set_padding_low(0);
batch_dim->set_padding_high(0);
batch_dim->set_window_dilation(1);
batch_dim->set_base_dilation(1);
for (int64_t i = 0; i < 2; ++i) {
WindowDimension* dim = window.add_dimensions();
dim->set_size(2);
dim->set_stride(2);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, input_shape, "A"));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * reduce_window,
MakeReduceWindowHlo(a_param, init, window, HloOpcode::kAdd));
module->entry_computation()->set_root_instruction(
reduce_window,
true);
*module->mutable_entry_computation_layout() =
module->compute_computation_layout();
EXPECT_EQ(module->entry_computation()->root_instruction()->shape(),
expected_output_shape);
}
TEST_F(HloCreationUtilsTest, DynamicBroadcastShape) {
HloInstruction* param;
HloComputation* entry_computation;
auto module = CreateModuleWithProgramShape(F32, {10},
{10}, ¶m,
&entry_computation);
param->mutable_shape()->set_dynamic_dimension(0, true);
HloInstruction* one_constant = MakeScalarLike(param, 1.0f);
EXPECT_TRUE(one_constant->shape().is_static());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_creation_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_creation_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d211a25-07e9-497a-8042-7901507b166f | cpp | tensorflow/tensorflow | elementwise_binary | tensorflow/lite/experimental/shlo/legacy/src/elementwise_binary.cc | tensorflow/lite/experimental/shlo/legacy/test/elementwise_binary_test.cc | #include <cmath>
#include <cstddef>
#include <type_traits>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/util.h"
namespace stablehlo {
namespace {
template <typename Value>
absl::Status CheckParameters(const Value& lhs, const Value& rhs,
Value& result) {
if (!(lhs.baseline_type() == rhs.baseline_type() and
lhs.baseline_type() == result.baseline_type())) {
return absl::InvalidArgumentError(
"Constraint violation: baseline_type(on_true) = "
"baseline_type(on_false) = baseline_type(result)");
}
if constexpr (std::is_same_v<Value, QuantizedTensor>) {
if (!(lhs.is_per_tensor_quantized() and rhs.is_per_tensor_quantized() and
result.is_per_tensor_quantized())) {
return absl::InvalidArgumentError("Expected per=tensor quantization");
}
}
if (lhs.layout().has_strides() || rhs.layout().has_strides() ||
result.layout().has_strides()) {
return absl::InvalidArgumentError("Stides not supported yet");
}
return absl::OkStatus();
}
template <ElementType storage_type, ElementType expressed_type, typename Value,
typename Op>
absl::Status ElementwiseBinaryOp(const Value& lhs, const Value& rhs,
Value& result, Op&& op) {
if (auto check = CheckParameters(lhs, rhs, result); !check.ok()) {
return check;
}
using S = Storage<storage_type>;
auto lhs_buffer = lhs.buffer();
auto rhs_buffer = rhs.buffer();
auto result_buffer = result.buffer();
size_t n = lhs.num_elements();
if constexpr (std::is_same_v<Value, Tensor>) {
if (storage_type != result.element_type()) {
return absl::InvalidArgumentError("Unexpected tensor element type");
}
for (size_t i = 0; i < n; ++i) {
auto x = S::Get(lhs_buffer, i);
auto y = S::Get(rhs_buffer, i);
auto z = op(x, y);
S::Set(result_buffer, i, z);
}
} else {
static_assert(std::is_same_v<Value, QuantizedTensor>);
if (storage_type != result.storage_type()) {
return absl::InvalidArgumentError("Unexpected storage type");
} else if (expressed_type != result.expressed_type()) {
return absl::InvalidArgumentError("Unexpected expressed type");
}
const QuantizedParameter& lhs_quant_param =
lhs.type().element_type().parameters(0);
const QuantizedParameter& rhs_quant_param =
rhs.type().element_type().parameters(0);
const QuantizedParameter& result_quant_param =
result.type().element_type().parameters(0);
using ET = typename Storage<expressed_type>::Type;
ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale);
for (size_t i = 0; i < n; ++i) {
auto lhs_storage = S::Get(lhs_buffer, i);
auto rhs_storage = S::Get(rhs_buffer, i);
auto result_storage =
DequantizeOpQuantizePartial<storage_type, expressed_type>(
lhs_storage, rhs_storage, lhs_quant_param, rhs_quant_param,
result_scale_inv, result_quant_param.zero_point, op);
S::Set(result_buffer, i, result_storage);
}
if (auto status = CompleteQuantization<storage_type>(result);
!status.ok()) {
return status;
}
}
return absl::OkStatus();
}
#define DEFINE_ELEMENTWISE_BINARY_OP(name, element_type, expression) \
absl::Status name(const Tensor& lhs, const Tensor& rhs, Tensor& result) { \
return ElementwiseBinaryOp<element_type, element_type>( \
lhs, rhs, result, [](auto x, auto y) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name, storage_type, \
expressed_type, expression) \
absl::Status name(const QuantizedTensor& lhs, const QuantizedTensor& rhs, \
QuantizedTensor& result) { \
return ElementwiseBinaryOp<storage_type, expressed_type>( \
lhs, rhs, result, [](auto x, auto y) { return expression; }); \
}
#define DEFINE_ELEMENTWISE_BINARY_OP_BOOL(name, expression) \
DEFINE_ELEMENTWISE_BINARY_OP(name##_i1, ElementType::kI1, expression);
#define DEFINE_ELEMENTWISE_BINARY_OP_INT(name, expression) \
DEFINE_ELEMENTWISE_BINARY_OP(name##_si8, ElementType::kSI8, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_si16, ElementType::kSI16, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_si32, ElementType::kSI32, expression);
#define DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(name, expression) \
DEFINE_ELEMENTWISE_BINARY_OP(name##_bf16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_f16, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_OP(name##_f32, ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name##_q_si8_bf16, ElementType::kSI8, \
ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name##_q_si8_f16, ElementType::kSI8, \
ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP(name##_q_si8_f32, ElementType::kSI8, \
ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si16_bf16, ElementType::kSI16, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si16_f16, ElementType::kSI16, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si16_f32, ElementType::kSI16, ElementType::kF32, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si32_bf16, ElementType::kSI32, ElementType::kBF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si32_f16, ElementType::kSI32, ElementType::kF16, expression); \
DEFINE_ELEMENTWISE_BINARY_QUANTIZED_OP( \
name##_q_si32_f32, ElementType::kSI32, ElementType::kF32, expression);
#define CALL_BINARY_OP_BOOL_HELPER(name, lhs, rhs, result) \
case ElementType::kI1: \
return name##_i1(lhs, rhs, result);
#define CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result) \
case ElementType::kSI8: \
return name##_si8(lhs, rhs, result); \
case ElementType::kSI16: \
return name##_si16(lhs, rhs, result); \
case ElementType::kSI32: \
return name##_si32(lhs, rhs, result);
#define CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result) \
case ElementType::kBF16: \
return name##_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_f32(lhs, rhs, result);
#define CALL_BINARY_OP_BOOL_INT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_BOOL_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_INT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_INT_FLOAT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_FLOAT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_OP_BOOL_INT_FLOAT(name, lhs, rhs, result) \
{ \
auto element_type = lhs.element_type(); \
switch (element_type) { \
CALL_BINARY_OP_BOOL_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_INT_HELPER(name, lhs, rhs, result); \
CALL_BINARY_OP_FLOAT_HELPER(name, lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected tensor element type"); \
} \
}
#define CALL_BINARY_QUANTIZED_OP(name, lhs, rhs, result) \
{ \
auto storage_type = lhs.storage_type(); \
auto expressed_type = lhs.expressed_type(); \
switch (storage_type) { \
case ElementType::kSI8: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si8_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_q_si8_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_q_si8_f32(lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI16: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si16_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_q_si16_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_q_si16_f32(lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
case ElementType::kSI32: \
switch (expressed_type) { \
case ElementType::kBF16: \
return name##_q_si32_bf16(lhs, rhs, result); \
case ElementType::kF16: \
return name##_q_si32_f16(lhs, rhs, result); \
case ElementType::kF32: \
return name##_q_si32_f32(lhs, rhs, result); \
default: \
return absl::InvalidArgumentError("Unexpected expressed type"); \
} \
default: \
return absl::InvalidArgumentError("Unexpected storage type"); \
} \
}
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Add, x or y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Add, x + y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Add, x + y);
}
absl::Status Add(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Add, lhs, rhs, result);
}
absl::Status Add(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Add, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(And, x&& y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(And, x& y);
}
absl::Status And(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT(And, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Atan2, std::atan2(static_cast<float>(x),
static_cast<float>(y)));
}
absl::Status Atan2(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_FLOAT(Atan2, lhs, rhs, result);
}
absl::Status Atan2(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Atan2, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Divide, x / y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Divide, x / y);
}
absl::Status Divide(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Divide, lhs, rhs, result);
}
absl::Status Divide(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Divide, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Maximum, x or y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Maximum, (x > y) ? x : y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Maximum, (x > y) ? x : y);
}
absl::Status Maximum(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Maximum, lhs, rhs, result);
}
absl::Status Maximum(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Maximum, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Minimum, x and y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Minimum, (x > y) ? y : x);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Minimum, (x > y) ? y : x);
}
absl::Status Minimum(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Minimum, lhs, rhs, result);
}
absl::Status Minimum(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Minimum, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Multiply, x and y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Multiply, x* y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Multiply, x* y);
}
absl::Status Multiply(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT_FLOAT(Multiply, lhs, rhs, result);
}
absl::Status Multiply(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Multiply, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Or, x or y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Or, x | y);
}
absl::Status Or(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT(Or, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Power, std::pow(static_cast<float>(x),
static_cast<int>(y)));
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Power, std::powf(static_cast<float>(x),
static_cast<float>(y)));
}
absl::Status Power(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Power, lhs, rhs, result);
}
absl::Status Power(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Power, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Remainder, x % y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Remainder, std::fmod(static_cast<float>(x),
static_cast<float>(y)));
}
absl::Status Remainder(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Remainder, lhs, rhs, result);
}
absl::Status Remainder(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Remainder, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(ShiftLeft, x << y);
}
absl::Status ShiftLeft(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT(ShiftLeft, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(ShiftRightArithmetic, x >> y);
}
absl::Status ShiftRightArithmetic(const Tensor& lhs, const Tensor& rhs,
Tensor& result) {
CALL_BINARY_OP_INT(ShiftRightArithmetic, lhs, rhs, result);
}
namespace {
template <typename Int>
inline Int ShiftRightLogical(Int x, Int y) {
using UInt = typename std::make_unsigned<Int>::type;
return static_cast<UInt>(x) >> y;
}
DEFINE_ELEMENTWISE_BINARY_OP_INT(ShiftRightLogical, ShiftRightLogical(x, y));
}
absl::Status ShiftRightLogical(const Tensor& lhs, const Tensor& rhs,
Tensor& result) {
CALL_BINARY_OP_INT(ShiftRightLogical, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_INT(Subtract, x - y);
DEFINE_ELEMENTWISE_BINARY_OP_FLOAT(Subtract, x - y);
}
absl::Status Subtract(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_INT_FLOAT(Subtract, lhs, rhs, result);
}
absl::Status Subtract(const QuantizedTensor& lhs, const QuantizedTensor& rhs,
QuantizedTensor& result) {
CALL_BINARY_QUANTIZED_OP(Subtract, lhs, rhs, result);
}
namespace {
DEFINE_ELEMENTWISE_BINARY_OP_BOOL(Xor, x xor y);
DEFINE_ELEMENTWISE_BINARY_OP_INT(Xor, x ^ y);
}
absl::Status Xor(const Tensor& lhs, const Tensor& rhs, Tensor& result) {
CALL_BINARY_OP_BOOL_INT(Xor, lhs, rhs, result);
}
} | #include <cmath>
#include <initializer_list>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/debug.h"
#include "tensorflow/lite/experimental/shlo/legacy/src/storage.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/matchers.h"
#include "tensorflow/lite/experimental/shlo/legacy/test/util.h"
namespace stablehlo {
namespace testing {
template <ElementType element_type>
void test(absl::Status (*op)(const Tensor&, const Tensor&, Tensor&),
std::initializer_list<DimensionSize>&& shape,
std::vector<typename Storage<element_type>::Type>&& input1_values,
std::vector<typename Storage<element_type>::Type>&& input2_values,
std::vector<typename Storage<element_type>::Type>&& expected_values) {
Tensor input1(TensorType(Shape(shape), element_type),
std::data(input1_values));
Tensor input2(TensorType(Shape(shape), element_type),
std::data(input2_values));
Tensor expected(TensorType(Shape(shape), element_type),
std::data(expected_values));
std::vector<typename Storage<element_type>::Type> result_values(
expected_values.size());
Tensor result(TensorType(Shape(shape), element_type), result_values.data());
ASSERT_OK(op(input1, input2, result));
EXPECT_THAT(result, IsAlmostSame(expected))
<< "input1: " << input1 << "\ninput2: " << input2;
}
template <ElementType storage_type, ElementType expressed_type>
void test(
absl::Status (*op)(const QuantizedTensor&, const QuantizedTensor&,
QuantizedTensor&),
std::initializer_list<DimensionSize>&& shape,
QuantizedParameter&& quantized_parameter,
std::vector<typename Storage<expressed_type>::Type>&& input1_values,
std::vector<typename Storage<expressed_type>::Type>&& input2_values,
std::vector<typename Storage<expressed_type>::Type>&& expected_values) {
auto input1_quant_values = QuantizeVector<storage_type, expressed_type>(
input1_values, quantized_parameter);
auto input2_quant_values = QuantizeVector<storage_type, expressed_type>(
input2_values, quantized_parameter);
auto expected_quant_values = QuantizeVector<storage_type, expressed_type>(
expected_values, quantized_parameter);
std::vector<typename Storage<storage_type>::Type> result_quant_values(
expected_quant_values.size());
QuantizedTensorElementType element_type(storage_type, expressed_type,
std::move(quantized_parameter));
QuantizedTensor input1(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
input1_quant_values.data());
QuantizedTensor input2(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
input2_quant_values.data());
QuantizedTensor expected(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
expected_quant_values.data());
QuantizedTensor result(
QuantizedTensorType(Shape(shape),
QuantizedTensorElementType(element_type)),
result_quant_values.data());
ASSERT_OK(op(input1, input2, result));
EXPECT_THAT(result, IsAlmostSame(expected))
<< "input1: " << input1 << "\ninput2: " << input2;
}
TEST(ElementwiseBinary, Add) {
test<ElementType::kI1>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 1, 1});
test<ElementType::kSI8>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kSI16>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kSI32>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kBF16>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kF16>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
test<ElementType::kF32>(Add, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 2, 1});
}
TEST(ElementwiseBinary, AddQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Add, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI16, ElementType::kBF16>(
Add, {4}, {.scale = 2, .zero_point = 2}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI32, ElementType::kBF16>(
Add, {4}, {.scale = 0.5, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 30, -10});
test<ElementType::kSI8, ElementType::kF16>(
Add, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI16, ElementType::kF16>(
Add, {4}, {.scale = 2, .zero_point = 2}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI32, ElementType::kF16>(
Add, {4}, {.scale = 0.5, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 30, -10});
test<ElementType::kSI8, ElementType::kF32>(
Add, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI16, ElementType::kF32>(
Add, {4}, {.scale = 2, .zero_point = 2}, {10, 0, 20, 0}, {0, 0, 10, -10},
{10, 0, 30, -10});
test<ElementType::kSI32, ElementType::kF32>(
Add, {4}, {.scale = 0.5, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 30, -10});
}
TEST(ElementwiseBinary, And) {
test<ElementType::kI1>(And, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {0, 0, 1, 0});
test<ElementType::kSI8>(And, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {1, 0, 4, 0});
test<ElementType::kSI16>(And, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {1, 0, 4, 0});
test<ElementType::kSI32>(And, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {1, 0, 4, 0});
}
TEST(ElementwiseBinary, Atan2) {
test<ElementType::kBF16>(Atan2, {4}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kF16>(Atan2, {4}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kF32>(Atan2, {4}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
}
TEST(ElementwiseBinary, Atan2Quantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Atan2, {4}, {.scale = 1e-1, .zero_point = 0}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI8, ElementType::kF16>(
Atan2, {4}, {.scale = 1e-1, .zero_point = 2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI8, ElementType::kF32>(
Atan2, {4}, {.scale = 1e-1, .zero_point = -2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI16, ElementType::kBF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 0}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI16, ElementType::kF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI16, ElementType::kF32>(
Atan2, {4}, {.scale = 1e-3, .zero_point = -2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI32, ElementType::kBF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 0}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI32, ElementType::kF16>(
Atan2, {4}, {.scale = 1e-2, .zero_point = 2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
test<ElementType::kSI32, ElementType::kF32>(
Atan2, {4}, {.scale = 1e-3, .zero_point = -2}, {3, 0, 5, 3}, {1, 1, 4, 1},
{1.24904577239825442582f, 0, 0.89605538457134395617f,
1.24904577239825442582f});
}
TEST(ElementwiseBinary, Divide) {
test<ElementType::kSI8>(Divide, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{1, 2, -1, -2});
test<ElementType::kSI16>(Divide, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{11, 18, -8, 15});
test<ElementType::kSI32>(Divide, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{11, 18, -8, 15});
test<ElementType::kBF16>(Divide, {4}, {22, 53, -33, -77}, {2, 4, 4, -5},
{11, 13.25, -8.25, 15.4});
test<ElementType::kF16>(Divide, {4}, {22, 53, -33, -77}, {2, 4, 4, -5},
{11, 13.25, -8.25, 15.4});
test<ElementType::kF32>(Divide, {4}, {22, 53, -33, -77}, {2, 4, 4, -5},
{11, 13.25, -8.25, 15.4});
}
TEST(ElementwiseBinary, DivideQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Divide, {4}, {.scale = 1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI8, ElementType::kF16>(
Divide, {4}, {.scale = 1, .zero_point = 5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI8, ElementType::kF32>(
Divide, {4}, {.scale = 1, .zero_point = -5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI16, ElementType::kBF16>(
Divide, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI16, ElementType::kF16>(
Divide, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI16, ElementType::kF32>(
Divide, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {111, 133.25, -83.25, 155.4});
test<ElementType::kSI32, ElementType::kBF16>(
Divide, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI32, ElementType::kF16>(
Divide, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {11, 13.25, -8.25, 15.4});
test<ElementType::kSI32, ElementType::kF32>(
Divide, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {111, 133.25, -83.25, 155.4});
}
TEST(ElementwiseBinary, Maximum) {
test<ElementType::kI1>(Maximum, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 1, 1});
test<ElementType::kSI8>(Maximum, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{2, 5, 3, 3});
test<ElementType::kSI16>(Maximum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{22, 55, 4, -5});
test<ElementType::kSI32>(Maximum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{22, 55, 4, -5});
test<ElementType::kBF16>(Maximum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 5.3, 4.4, -5.5});
test<ElementType::kF16>(Maximum, {4}, {22, 55, -33, -77},
{2.5, 3.5, 4.5, -5.5}, {22, 55, 4.5, -5.5});
test<ElementType::kF32>(Maximum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 5.3, 4.4, -5.5});
}
TEST(ElementwiseBinary, MaximumQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Maximum, {4}, {.scale = 1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI8, ElementType::kF16>(
Maximum, {4}, {.scale = 1, .zero_point = 5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI8, ElementType::kF32>(
Maximum, {4}, {.scale = 1, .zero_point = -5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI16, ElementType::kBF16>(
Maximum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI16, ElementType::kF16>(
Maximum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI16, ElementType::kF32>(
Maximum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {222, 533, 4, -5});
test<ElementType::kSI32, ElementType::kBF16>(
Maximum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI32, ElementType::kF16>(
Maximum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {22, 53, 4, -5});
test<ElementType::kSI32, ElementType::kF32>(
Maximum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {222, 533, 4, -5});
}
TEST(ElementwiseBinary, Minimum) {
test<ElementType::kI1>(Minimum, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{0, 0, 1, 0});
test<ElementType::kSI8>(Minimum, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{2, 2, -3, -7});
test<ElementType::kSI16>(Minimum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{2, 3, -33, -77});
test<ElementType::kSI32>(Minimum, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{2, 3, -33, -77});
test<ElementType::kBF16>(Minimum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 4.4, -3.3, -7.7});
test<ElementType::kF16>(Minimum, {4}, {22, 55, -33, -77},
{2.5, 3.5, 4.5, -5.5}, {2.5, 3.5, -33, -77});
test<ElementType::kF32>(Minimum, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {2.2, 4.4, -3.3, -7.7});
}
TEST(ElementwiseBinary, MinimumQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Minimum, {4}, {.scale = 1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI8, ElementType::kF16>(
Minimum, {4}, {.scale = 1, .zero_point = 5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI8, ElementType::kF32>(
Minimum, {4}, {.scale = 1, .zero_point = -5}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI16, ElementType::kBF16>(
Minimum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI16, ElementType::kF16>(
Minimum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI16, ElementType::kF32>(
Minimum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {2, 4, -333, -777});
test<ElementType::kSI32, ElementType::kBF16>(
Minimum, {4}, {.scale = 5e-1, .zero_point = 0}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI32, ElementType::kF16>(
Minimum, {4}, {.scale = 1e-1, .zero_point = 10}, {22, 53, -33, -77},
{2, 4, 4, -5}, {2, 4, -33, -77});
test<ElementType::kSI32, ElementType::kF32>(
Minimum, {4}, {.scale = 5e-2, .zero_point = -10}, {222, 533, -333, -777},
{2, 4, 4, -5}, {2, 4, -333, -777});
}
TEST(ElementwiseBinary, Multiply) {
test<ElementType::kI1>(Multiply, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{0, 0, 1, 0});
test<ElementType::kSI8>(Multiply, {4}, {2, 5, -3, -7}, {2, 2, 3, 3},
{4, 10, -9, -21});
test<ElementType::kSI16>(Multiply, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{44, 165, -132, 385});
test<ElementType::kSI32>(Multiply, {4}, {22, 55, -33, -77}, {2, 3, 4, -5},
{44, 165, -132, 385});
test<ElementType::kBF16>(Multiply, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {4.84, 23.32, -14.52, 42.35});
test<ElementType::kF16>(Multiply, {4}, {22, 55, -33, -77},
{2.5, 3.5, 4.5, -5.5}, {55, 192.5, -148.5, 423.5});
test<ElementType::kF32>(Multiply, {4}, {2.2, 5.3, -3.3, -7.7},
{2.2, 4.4, 4.4, -5.5}, {4.84, 23.32, -14.52, 42.35});
}
TEST(ElementwiseBinary, MultiplyQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI8, ElementType::kF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI8, ElementType::kF32>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI16, ElementType::kBF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI16, ElementType::kF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI16, ElementType::kF32>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI32, ElementType::kBF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI32, ElementType::kF16>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
test<ElementType::kSI32, ElementType::kF32>(
Multiply, {4}, {.scale = 1e-1, .zero_point = 0}, {1.1, 2.2, -3.3, -4.4},
{0.1, 1, 0.5, 2.5}, {0.11, 2.2, -1.7, -11});
}
TEST(ElementwiseBinary, Or) {
test<ElementType::kI1>(Or, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 1, 1});
test<ElementType::kSI8>(Or, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {3, 0, 5, 1});
test<ElementType::kSI16>(Or, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {3, 0, 5, 1});
test<ElementType::kSI32>(Or, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {3, 0, 5, 1});
}
TEST(ElementwiseBinary, Power) {
test<ElementType::kSI8>(Power, {6}, {-2, 1, -3, 5, -3, 4}, {0, 1, 2, 3, 3, 2},
{1, 1, 9, 125, -27, 16});
test<ElementType::kSI16>(Power, {6}, {-2, 1, -36, 5, 3, 5},
{0, 1, 2, 3, 4, 5}, {1, 1, 1296, 125, 81, 3125});
test<ElementType::kSI32>(Power, {6}, {-2, 1, -36, 5, 3, 10},
{0, 1, 2, 3, 4, 5}, {1, 1, 1296, 125, 81, 100000});
test<ElementType::kBF16>(Power, {6}, {-2, -0, -36, 5, 3, 1000},
{2, 2, 1.1, 2, -1, 10},
{4, 0, -NAN, 25, 0.3333333333333333f, 1e+30});
test<ElementType::kF16>(Power, {6}, {-2, -0, -36, 5, 3, 10000},
{2, 2, 1.1, 2, -1, 10},
{4, 0, -NAN, 25, 0.3333333333333333f, INFINITY});
test<ElementType::kF32>(Power, {6}, {-2, -0, -36, 5, 3, 10000},
{2, 2, 1.1, 2, -1, 10},
{4, 0, -NAN, 25, 0.3333333333333333f, INFINITY});
}
TEST(ElementwiseBinary, Remainder) {
test<ElementType::kSI8>(Remainder, {4}, {17, 18, 19, 20}, {3, 4, 5, 7},
{2, 2, 4, 6});
test<ElementType::kSI16>(Remainder, {4}, {17, 18, 19, 20}, {3, 4, 5, 7},
{2, 2, 4, 6});
test<ElementType::kSI32>(Remainder, {4}, {17, -17, 17, -17}, {3, 3, -3, -3},
{2, -2, 2, -2});
test<ElementType::kBF16>(Remainder, {4}, {17, 18, 19, 20}, {3, 4, 5, 7},
{2, 2, 4, 6});
test<ElementType::kF16>(Remainder, {4}, {17, -17, 17, -17}, {3, 3, -3, -3},
{2, -2, 2, -2});
test<ElementType::kF32>(Remainder, {4}, {17.1, -17.1, 17.1, -17.1},
{3, 3, -3, -3}, {2.1, -2.1, 2.1, -2.1});
}
TEST(ElementwiseBinary, RemainderQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {7.1, -7.1, 7.1, -7.1},
{3, 3, -3, -3}, {1.1, -1.1, 1.1, -1.1});
test<ElementType::kSI8, ElementType::kF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {7.1, -7.1, 7.1, -7.1},
{3, 3, -3, -3}, {1.1, -1.1, 1.1, -1.1});
test<ElementType::kSI8, ElementType::kF32>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {7.1, -7.1, 7.1, -7.1},
{3, 3, -3, -3}, {1.1, -1.1, 1.1, -1.1});
test<ElementType::kSI16, ElementType::kBF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 4}, {17, 18, 19, 20},
{3, 4, 5, 7}, {2, 2, 4, 6});
test<ElementType::kSI16, ElementType::kF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {17, -17, 17, -17},
{3, 3, -3, -3}, {2, -2, 2, -2});
test<ElementType::kSI16, ElementType::kF32>(
Remainder, {4}, {.scale = 1e-2, .zero_point = -10},
{17.1, -17.1, 17.1, -17.1}, {3, 3, -3, -3}, {2.1, -2.1, 2.1, -2.1});
test<ElementType::kSI32, ElementType::kBF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 4}, {17, 18, 19, 20},
{3, 4, 5, 7}, {2, 2, 4, 6});
test<ElementType::kSI32, ElementType::kF16>(
Remainder, {4}, {.scale = 1e-1, .zero_point = 0}, {17, -17, 17, -17},
{3, 3, -3, -3}, {2, -2, 2, -2});
test<ElementType::kSI32, ElementType::kF32>(
Remainder, {4}, {.scale = 1e-2, .zero_point = -10},
{17.1, -17.1, 17.1, -17.1}, {3, 3, -3, -3}, {2.1, -2.1, 2.1, -2.1});
}
TEST(ElementwiseBinary, ShiftLeft) {
test<ElementType::kSI8>(ShiftLeft, {3}, {-1, 0, 1}, {1, 2, 3}, {-2, 0, 8});
test<ElementType::kSI16>(ShiftLeft, {3}, {-1, 0, 1}, {1, 2, 3}, {-2, 0, 8});
test<ElementType::kSI32>(ShiftLeft, {3}, {-1, 0, 1}, {1, 2, 3}, {-2, 0, 8});
}
TEST(ElementwiseBinary, ShiftRightArithmetic) {
test<ElementType::kSI8>(ShiftRightArithmetic, {3}, {-1, 0, 8}, {1, 2, 3},
{-1, 0, 1});
test<ElementType::kSI16>(ShiftRightArithmetic, {3}, {-1, 0, 8}, {1, 2, 3},
{-1, 0, 1});
test<ElementType::kSI32>(ShiftRightArithmetic, {3}, {-1, 0, 8}, {1, 2, 3},
{-1, 0, 1});
}
TEST(ElementwiseBinary, ShiftRightLogical) {
test<ElementType::kSI8>(ShiftRightLogical, {3}, {-1, 0, 8}, {1, 2, 3},
{0x7F, 0, 1});
test<ElementType::kSI16>(ShiftRightLogical, {3}, {-1, 0, 8}, {1, 2, 3},
{0x7FFF, 0, 1});
test<ElementType::kSI32>(ShiftRightLogical, {3}, {-1, 0, 8}, {1, 2, 3},
{0x7FFFFFFF, 0, 1});
}
TEST(ElementwiseBinary, Subtract) {
test<ElementType::kSI8>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kSI16>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kSI32>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kBF16>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kF16>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
test<ElementType::kF32>(Subtract, {4}, {1, 0, 1, 0}, {0, 0, 1, 1},
{1, 0, 0, -1});
}
TEST(ElementwiseBinary, SubtractQuantized) {
test<ElementType::kSI8, ElementType::kBF16>(
Subtract, {4}, {.scale = 1, .zero_point = 0}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI8, ElementType::kF16>(
Subtract, {4}, {.scale = 1, .zero_point = 2}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI8, ElementType::kF32>(
Subtract, {4}, {.scale = 1, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI16, ElementType::kBF16>(
Subtract, {4}, {.scale = 1e-1, .zero_point = 0}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI16, ElementType::kF16>(
Subtract, {4}, {.scale = 1e-1, .zero_point = 2}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI16, ElementType::kF32>(
Subtract, {4}, {.scale = 1e-1, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI32, ElementType::kBF16>(
Subtract, {4}, {.scale = 1e-3, .zero_point = 0}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI32, ElementType::kF16>(
Subtract, {4}, {.scale = 1e-3, .zero_point = 2}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
test<ElementType::kSI32, ElementType::kF32>(
Subtract, {4}, {.scale = 1e-3, .zero_point = -10}, {10, 0, 20, 0},
{0, 0, 10, -10}, {10, 0, 10, 10});
}
TEST(ElementwiseBinary, Xor) {
test<ElementType::kI1>(Xor, {4}, {1, 0, 1, 0}, {0, 0, 1, 1}, {1, 0, 0, 1});
test<ElementType::kSI8>(Xor, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {2, 0, 1, 1});
test<ElementType::kSI16>(Xor, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {2, 0, 1, 1});
test<ElementType::kSI32>(Xor, {4}, {3, 0, 5, 0}, {1, 0, 4, 1}, {2, 0, 1, 1});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/elementwise_binary.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/elementwise_binary_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b26549bb-b46f-41f7-a477-29175952794c | cpp | google/cel-cpp | ast_impl | base/ast_internal/ast_impl.cc | base/ast_internal/ast_impl_test.cc | #include "base/ast_internal/ast_impl.h"
#include <cstdint>
#include "absl/container/flat_hash_map.h"
namespace cel::ast_internal {
namespace {
const Type& DynSingleton() {
static auto* singleton = new Type(TypeKind(DynamicType()));
return *singleton;
}
}
const Type& AstImpl::GetType(int64_t expr_id) const {
auto iter = type_map_.find(expr_id);
if (iter == type_map_.end()) {
return DynSingleton();
}
return iter->second;
}
const Type& AstImpl::GetReturnType() const { return GetType(root_expr().id()); }
const Reference* AstImpl::GetReference(int64_t expr_id) const {
auto iter = reference_map_.find(expr_id);
if (iter == reference_map_.end()) {
return nullptr;
}
return &iter->second;
}
} | #include "base/ast_internal/ast_impl.h"
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "base/ast.h"
#include "base/ast_internal/expr.h"
#include "internal/testing.h"
namespace cel::ast_internal {
namespace {
using ::testing::Pointee;
using ::testing::Truly;
TEST(AstImpl, RawExprCtor) {
Expr expr;
auto& call = expr.mutable_call_expr();
expr.set_id(5);
call.set_function("_==_");
auto& eq_lhs = call.mutable_args().emplace_back();
eq_lhs.mutable_call_expr().set_function("_+_");
eq_lhs.set_id(3);
auto& sum_lhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_lhs.mutable_const_expr().set_int_value(2);
sum_lhs.set_id(1);
auto& sum_rhs = eq_lhs.mutable_call_expr().mutable_args().emplace_back();
sum_rhs.mutable_const_expr().set_int_value(1);
sum_rhs.set_id(2);
auto& eq_rhs = call.mutable_args().emplace_back();
eq_rhs.mutable_const_expr().set_int_value(3);
eq_rhs.set_id(4);
SourceInfo source_info;
source_info.mutable_positions()[5] = 6;
AstImpl ast_impl(std::move(expr), std::move(source_info));
Ast& ast = ast_impl;
ASSERT_FALSE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReturnType(), Type(DynamicType()));
EXPECT_EQ(ast_impl.GetReference(1), nullptr);
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 5);
EXPECT_EQ(ast_impl.source_info().positions().at(5), 6);
}
TEST(AstImpl, CheckedExprCtor) {
Expr expr;
expr.mutable_ident_expr().set_name("int_value");
expr.set_id(1);
Reference ref;
ref.set_name("com.int_value");
AstImpl::ReferenceMap reference_map;
reference_map[1] = Reference(ref);
AstImpl::TypeMap type_map;
type_map[1] = Type(PrimitiveType::kInt64);
SourceInfo source_info;
source_info.set_syntax_version("1.0");
AstImpl ast_impl(std::move(expr), std::move(source_info),
std::move(reference_map), std::move(type_map), "1.0");
Ast& ast = ast_impl;
ASSERT_TRUE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(PrimitiveType::kInt64));
EXPECT_THAT(ast_impl.GetReference(1),
Pointee(Truly([&ref](const Reference& arg) {
return arg.name() == ref.name();
})));
EXPECT_EQ(ast_impl.GetReturnType(), Type(PrimitiveType::kInt64));
EXPECT_TRUE(ast_impl.root_expr().has_ident_expr());
EXPECT_EQ(ast_impl.root_expr().ident_expr().name(), "int_value");
EXPECT_EQ(ast_impl.root_expr().id(), 1);
EXPECT_EQ(ast_impl.source_info().syntax_version(), "1.0");
EXPECT_EQ(ast_impl.expr_version(), "1.0");
}
TEST(AstImpl, CheckedExprDeepCopy) {
Expr root;
root.set_id(3);
root.mutable_call_expr().set_function("_==_");
root.mutable_call_expr().mutable_args().resize(2);
auto& lhs = root.mutable_call_expr().mutable_args()[0];
auto& rhs = root.mutable_call_expr().mutable_args()[1];
AstImpl::TypeMap type_map;
AstImpl::ReferenceMap reference_map;
SourceInfo source_info;
type_map[3] = Type(PrimitiveType::kBool);
lhs.mutable_ident_expr().set_name("int_value");
lhs.set_id(1);
Reference ref;
ref.set_name("com.int_value");
reference_map[1] = std::move(ref);
type_map[1] = Type(PrimitiveType::kInt64);
rhs.mutable_const_expr().set_int_value(2);
rhs.set_id(2);
type_map[2] = Type(PrimitiveType::kInt64);
source_info.set_syntax_version("1.0");
AstImpl ast_impl(std::move(root), std::move(source_info),
std::move(reference_map), std::move(type_map), "1.0");
Ast& ast = ast_impl;
ASSERT_TRUE(ast.IsChecked());
EXPECT_EQ(ast_impl.GetType(1), Type(PrimitiveType::kInt64));
EXPECT_THAT(ast_impl.GetReference(1), Pointee(Truly([](const Reference& arg) {
return arg.name() == "com.int_value";
})));
EXPECT_EQ(ast_impl.GetReturnType(), Type(PrimitiveType::kBool));
EXPECT_TRUE(ast_impl.root_expr().has_call_expr());
EXPECT_EQ(ast_impl.root_expr().call_expr().function(), "_==_");
EXPECT_EQ(ast_impl.root_expr().id(), 3);
EXPECT_EQ(ast_impl.source_info().syntax_version(), "1.0");
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/base/ast_internal/ast_impl.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/base/ast_internal/ast_impl_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5049f30e-5fab-4599-b26d-ef55273675f2 | cpp | tensorflow/tensorflow | calibrator | tensorflow/lite/tools/optimize/calibration/calibrator.cc | tensorflow/lite/tools/optimize/calibration/calibrator_test.cc | #include "tensorflow/lite/tools/optimize/calibration/calibrator.h"
#include <fstream>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "flatbuffers/buffer.h"
#include "flatbuffers/vector.h"
#include "tensorflow/compiler/mlir/lite/allocation.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/optimize/calibration/builtin_logging_ops/lstm.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_common.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_logger.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
#include "tensorflow/lite/tools/optimize/calibration/custom_logging_ops/lstm.h"
#include "tensorflow/lite/tools/optimize/calibration/logging_op.h"
#include "tensorflow/lite/tools/optimize/calibration/logging_op_resolver.h"
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
class Calibrator {
public:
Calibrator(const std::unordered_map<const TfLiteNode*, OperatorInfo>&
node_ptr_opinfo_map,
std::unique_ptr<LoggingOpResolver> logging_op_resolver,
ErrorReporter* error_reporter)
: node_ptr_opinfo_map_(node_ptr_opinfo_map),
logging_op_resolver_(std::move(logging_op_resolver)),
error_reporter_(error_reporter) {
logger_ = std::make_unique<Logger>();
}
KernelEvalFuncPtr GetKernelInvoke(const TfLiteNode* node) const;
Logger* GetLogger() const { return logger_.get(); }
ErrorReporter* GetErrorReporter() const { return error_reporter_; }
const OperatorInfo& GetOpInfo(const TfLiteNode* node) const {
return node_ptr_opinfo_map_.at(node);
}
std::vector<const TfLiteNode*> GetNodesUnderCalibration() {
std::vector<const TfLiteNode*> nodes;
nodes.reserve(node_ptr_opinfo_map_.size());
for (const auto& entry : node_ptr_opinfo_map_) {
nodes.push_back(entry.first);
}
return nodes;
}
private:
std::unordered_map<const TfLiteNode*, OperatorInfo> node_ptr_opinfo_map_;
std::unique_ptr<LoggingOpResolver> logging_op_resolver_;
const std::unordered_map<int, OperatorInfo> index_opinfo_;
std::unique_ptr<Logger> logger_;
ErrorReporter* error_reporter_;
};
KernelEvalFuncPtr Calibrator::GetKernelInvoke(const TfLiteNode* node) const {
auto op_info = node_ptr_opinfo_map_.at(node);
if (op_info.is_custom_op) {
return logging_op_resolver_->GetWrappedKernelInvoke(op_info.name.c_str(),
op_info.version);
}
return logging_op_resolver_->GetWrappedKernelInvoke(op_info.builtin_op_code,
op_info.version);
}
class GlobalCalibratorRegistry {
public:
Calibrator* GetCalibrator(const TfLiteNode* node) const {
if (node_to_calibrator_.find(node) == node_to_calibrator_.cend()) {
return nullptr;
}
return node_to_calibrator_.at(node);
}
void RemoveCalibrator(const TfLiteContext* context) {
Calibrator* calibrator = calibrator_registry_.at(context).get();
auto nodes = calibrator->GetNodesUnderCalibration();
for (auto node : nodes) {
node_to_calibrator_.erase(node);
}
calibrator_registry_.erase(context);
}
TfLiteStatus CreateCalibrator(
const TfLiteContext* context,
const std::unordered_map<const TfLiteNode*, OperatorInfo>& node_to_opinfo,
std::unique_ptr<LoggingOpResolver> logging_op_resolver,
Calibrator** calibrator_ptr, ErrorReporter* reporter) {
if (calibrator_registry_.find(context) != calibrator_registry_.cend()) {
reporter->Report(
"Failed to create calibrator, context already registered.");
return kTfLiteError;
}
auto calibrator = std::make_unique<Calibrator>(
node_to_opinfo, std::move(logging_op_resolver), reporter);
calibrator_registry_[context] = std::move(calibrator);
*calibrator_ptr = calibrator_registry_.at(context).get();
for (const auto& entry : node_to_opinfo) {
node_to_calibrator_[entry.first] = *calibrator_ptr;
}
return kTfLiteOk;
}
private:
absl::flat_hash_map<const TfLiteContext*, std::unique_ptr<Calibrator>>
calibrator_registry_;
absl::flat_hash_map<const TfLiteNode*, Calibrator*> node_to_calibrator_;
};
GlobalCalibratorRegistry* GetCalibratorRegistry() {
static GlobalCalibratorRegistry* registry = new GlobalCalibratorRegistry();
return registry;
}
logging_kernel_func_ptr GetLoggingEvalFunc(TfLiteContext* context,
TfLiteNode* node,
int builtin_op_code) {
switch (builtin_op_code) {
case BuiltinOperator_LSTM: {
if (node->intermediates->size == 12) {
return tflite::optimize::calibration::custom::lstm_logging_kernel;
}
return tflite::optimize::calibration::builtin::lstm_logging_kernel;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM:
return tflite::optimize::calibration::builtin::
unidirectional_sequence_lstm_logging_kernel;
default:
return nullptr;
}
}
TfLiteStatus LoggingEval(TfLiteContext* context, TfLiteNode* node) {
Calibrator* calibrator = GetCalibratorRegistry()->GetCalibrator(node);
if (!calibrator) {
TF_LITE_KERNEL_LOG(context, "No calibrator found for context.");
return kTfLiteError;
}
auto kernel_invoke = calibrator->GetKernelInvoke(node);
auto logger = calibrator->GetLogger();
auto op_info = calibrator->GetOpInfo(node);
auto error_reporter = calibrator->GetErrorReporter();
for (int i : op_info.loggable_inputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
auto builtin_op_code = calibrator->GetOpInfo(node).builtin_op_code;
auto kernel_invoke_intermediate =
GetLoggingEvalFunc(context, node, builtin_op_code);
if (kernel_invoke_intermediate == nullptr) {
TF_LITE_ENSURE_STATUS(kernel_invoke(context, node));
} else {
TF_LITE_ENSURE_STATUS(
kernel_invoke_intermediate(context, op_info.subgraph_index, node,
calibrator->GetLogger(), error_reporter));
}
for (int i : op_info.loggable_inputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
for (int i : op_info.loggable_outputs) {
auto tensor = context->tensors[i];
TF_LITE_ENSURE_STATUS(
logger->LogTensorValue(op_info.subgraph_index, i, tensor.data.f,
tensor.bytes / sizeof(float), error_reporter));
}
return kTfLiteOk;
}
std::vector<int> GetLoggableTensorIndices(
const std::vector<int>& tensor_indices,
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors,
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* tensor_buffers) {
std::vector<int> loggable;
for (auto tensor_index : tensor_indices) {
if (tensor_index == kTfLiteOptionalTensor) {
continue;
}
auto tensor = tensors->Get(tensor_index);
auto buffer_index = tensor->buffer();
const bool has_no_buffer =
(tensor_buffers->Get(buffer_index) == nullptr) ||
(tensor_buffers->Get(buffer_index)->data() == nullptr) ||
(tensor_buffers->Get(buffer_index)->data()->size() == 0);
if (has_no_buffer && tensor->type() == tflite::TensorType_FLOAT32) {
loggable.push_back(tensor_index);
}
}
return loggable;
}
TfLiteStatus GetNodeOpInfoMapAndContext(
const absl::flat_hash_map<std::tuple<int, int>, OperatorInfo>&
node_to_opinfo,
tflite::Interpreter* const interpreter,
std::unordered_map<const TfLiteNode*, OperatorInfo>* node_ptr_opinfo_map,
TfLiteContext** context) {
*context = interpreter->primary_subgraph().context();
TF_LITE_ENSURE(*context,
interpreter->execution_plan().size() <= node_to_opinfo.size());
for (const auto& entry : node_to_opinfo) {
auto op_info = entry.second;
int subgraph_index, op_index;
std::tie(subgraph_index, op_index) = entry.first;
const auto* node_and_reg =
interpreter->node_and_registration(subgraph_index, op_index);
op_info.registration = &node_and_reg->second;
node_ptr_opinfo_map->insert({&node_and_reg->first, op_info});
}
return kTfLiteOk;
}
string GetOpName(const tflite::OperatorCode& opcode) {
if (opcode.custom_code() != nullptr) {
return opcode.custom_code()->str();
}
return tflite::EnumNamesBuiltinOperator()[GetBuiltinCode(&opcode)];
}
class Reader : public CalibrationReader {
public:
Reader(const TfLiteContext* context, const Logger* logger)
: CalibrationReader(logger), context_(context) {}
~Reader() override { GetCalibratorRegistry()->RemoveCalibrator(context_); }
private:
const TfLiteContext* context_;
};
bool HasInputs(BuiltinOperator code) {
switch (code) {
case BuiltinOperator_CALL_ONCE:
case BuiltinOperator_VAR_HANDLE:
case BuiltinOperator_CUSTOM:
return false;
default:
return true;
}
}
bool HasOutputs(BuiltinOperator code) {
switch (code) {
case BuiltinOperator_ASSIGN_VARIABLE:
case BuiltinOperator_CALL_ONCE:
case BuiltinOperator_CUSTOM:
return false;
default:
return true;
}
}
}
TfLiteStatus BuildLoggingInterpreter(
const FlatBufferModel& model, const OpResolver& op_resolver,
std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader) {
return BuildLoggingInterpreter(model.GetModel(), model.error_reporter(),
op_resolver, interpreter, calibration_reader,
model.allocation());
}
TfLiteStatus BuildLoggingInterpreter(
const tflite::Model* tflite_model, ErrorReporter* error_reporter,
const OpResolver& op_resolver, std::unique_ptr<Interpreter>* interpreter,
std::unique_ptr<CalibrationReader>* calibration_reader,
const Allocation* allocation) {
if (error_reporter == nullptr) {
error_reporter = DefaultErrorReporter();
}
auto subgraphs = tflite_model->subgraphs();
auto tensor_buffers = tflite_model->buffers();
absl::flat_hash_map<std::tuple<int, int>, OperatorInfo> node_to_opinfo;
BuiltinOpsSet builtin_op_and_versions;
CustomOpsSet custom_op_and_versions;
for (size_t subgraph_index = 0; subgraph_index < subgraphs->size();
subgraph_index++) {
auto subgraph = subgraphs->Get(subgraph_index);
auto operator_codes = tflite_model->operator_codes();
auto operators = subgraph->operators();
auto tensors = subgraph->tensors();
if (!operators) {
continue;
}
for (size_t i = 0; i < operators->size(); i++) {
OperatorInfo op_info;
op_info.subgraph_index = subgraph_index;
op_info.node_index = i;
auto op = operators->Get(i);
auto operator_code = operator_codes->Get(op->opcode_index());
op_info.builtin_op_code = GetBuiltinCode(operator_code);
op_info.name = GetOpName(*operator_code);
op_info.is_custom_op = operator_code->custom_code() != nullptr;
op_info.version = operator_code->version();
auto op_inputs = op->inputs();
auto op_outputs = op->outputs();
if (op_inputs) {
op_info.inputs = std::vector<int>(op_inputs->begin(), op_inputs->end());
} else if (HasInputs(op_info.builtin_op_code)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Op %s missing inputs",
op_info.name.c_str());
}
if (op_outputs) {
op_info.outputs =
std::vector<int>(op_outputs->begin(), op_outputs->end());
} else if (HasOutputs(op_info.builtin_op_code)) {
TFLITE_LOG(TFLITE_LOG_WARNING, "Op %s missing outputs",
op_info.name.c_str());
}
op_info.loggable_inputs =
GetLoggableTensorIndices(op_info.inputs, tensors, tensor_buffers);
op_info.loggable_outputs =
GetLoggableTensorIndices(op_info.outputs, tensors, tensor_buffers);
if (op_info.is_custom_op) {
op_info.registration =
op_resolver.FindOp(op_info.name.c_str(), operator_code->version());
custom_op_and_versions.insert(
{op_info.name.c_str(), operator_code->version()});
} else {
op_info.registration = op_resolver.FindOp(GetBuiltinCode(operator_code),
operator_code->version());
builtin_op_and_versions.insert(
{op_info.builtin_op_code, operator_code->version()});
}
std::tuple<int, int> key{subgraph_index, i};
node_to_opinfo[key] = op_info;
}
}
auto logging_op_resolver = std::make_unique<LoggingOpResolver>(
builtin_op_and_versions, custom_op_and_versions, op_resolver, LoggingEval,
error_reporter);
tflite::InterpreterBuilder(tflite_model, *logging_op_resolver, error_reporter,
nullptr,
allocation)(interpreter);
if (!(*interpreter)) {
error_reporter->Report("Failed to construct interpreter");
return kTfLiteError;
}
std::unordered_map<const TfLiteNode*, OperatorInfo> node_ptr_opinfo_map;
TfLiteContext* context = nullptr;
TF_LITE_ENSURE_STATUS(GetNodeOpInfoMapAndContext(
node_to_opinfo, interpreter->get(), &node_ptr_opinfo_map, &context));
Calibrator* calibrator = nullptr;
TF_LITE_ENSURE_STATUS(GetCalibratorRegistry()->CreateCalibrator(
context, node_ptr_opinfo_map, std::move(logging_op_resolver), &calibrator,
error_reporter));
*calibration_reader = std::unique_ptr<CalibrationReader>(
new Reader(context, calibrator->GetLogger()));
return kTfLiteOk;
}
}
}
} | #include "tensorflow/lite/tools/optimize/calibration/calibrator.h"
#include <cstring>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/command_line_flags.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/tools/optimize/calibration/calibration_reader.h"
namespace {
tensorflow::string* g_test_model_dir = nullptr;
}
namespace tflite {
namespace optimize {
namespace calibration {
namespace {
std::unique_ptr<FlatBufferModel> ReadModel(const string& model_name) {
auto model_path = tensorflow::io::JoinPath(*g_test_model_dir, model_name);
return FlatBufferModel::BuildFromFile(model_path.c_str());
}
TEST(CalibratorTest, CalibrationStatsAreCollected) {
auto model = ReadModel("multi_add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
std::vector<float> ones(tensor_size, 1.0f);
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
TfLiteTensor* tensor = interpreter->tensor(interpreter->outputs()[0]);
for (size_t i = 0; i < tensor_size; i++) {
EXPECT_NEAR(tensor->data.f[i], 6.0f, eps);
}
tensor = interpreter->tensor(interpreter->outputs()[1]);
for (size_t i = 0; i < tensor_size; i++) {
EXPECT_NEAR(tensor->data.f[i], 9.0f, eps);
}
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
for (int tensor_idx = 0; tensor_idx < 4; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min, tensor_idx + 1, eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max, tensor_idx + 1, eps);
}
EXPECT_NEAR(stats.find({0, 4})->second.min, 5, eps);
EXPECT_NEAR(stats.find({0, 4})->second.max, 5, eps);
EXPECT_NEAR(stats.find({0, 5})->second.min, 6, eps);
EXPECT_NEAR(stats.find({0, 5})->second.max, 6, eps);
EXPECT_NEAR(stats.find({0, 6})->second.min, 9, eps);
EXPECT_NEAR(stats.find({0, 6})->second.max, 9, eps);
}
TEST(CalibratorTest, MultipleInvokes) {
auto model = ReadModel("multi_add.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
const float expected_values[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min,
expected_values[tensor_idx], eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max,
expected_values[tensor_idx], eps);
}
TfLiteTensor* input0 = interpreter->tensor(0);
input0->data.f[0] = 1.5f;
input0->data.f[1] = 0.5f;
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(7, stats.size());
EXPECT_NEAR(stats.find({0, 0})->second.min, 0.5f, eps);
EXPECT_NEAR(stats.find({0, 0})->second.max, 1.5f, eps);
for (int tensor_idx = 1; tensor_idx < 5; tensor_idx++) {
EXPECT_NEAR(stats.find({0, tensor_idx})->second.min,
expected_values[tensor_idx], eps);
EXPECT_NEAR(stats.find({0, tensor_idx})->second.max,
expected_values[tensor_idx], eps);
}
EXPECT_NEAR(stats.find({0, 5})->second.min, 5.5f, eps);
EXPECT_NEAR(stats.find({0, 5})->second.max, 6.5f, eps);
EXPECT_NEAR(stats.find({0, 6})->second.min, 9.0f, eps);
EXPECT_NEAR(stats.find({0, 6})->second.max, 9.0f, eps);
}
TEST(CalibratorTest, UpdateMinMax) {
auto flatbuffer_model = ReadModel("multi_add.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
auto input_0_quant_params =
std::make_unique<tflite::QuantizationParametersT>();
input_0_quant_params->min.push_back(0.5);
input_0_quant_params->max.push_back(1.5);
model.subgraphs[0]->tensors[0]->quantization =
std::move(input_0_quant_params);
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
const float expected_min[7] = {
0.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
const float expected_max[7] = {
1.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, true);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_min[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_max[tensor_idx], eps);
}
const float expected_value[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, false);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_value[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_value[tensor_idx], eps);
}
}
TEST(CalibratorTest, HandleNanValues) {
auto flatbuffer_model = ReadModel("multi_add.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1 * 8 * 8 * 3;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(float));
for (size_t j = 0; j < tensor_size; j++) {
if (j % 2 == 0) {
tensor->data.f[j] = NAN;
} else {
tensor->data.f[j] = i + 1;
}
}
}
auto input_0_quant_params =
std::make_unique<tflite::QuantizationParametersT>();
input_0_quant_params->min.push_back(0.5);
input_0_quant_params->max.push_back(1.5);
model.subgraphs[0]->tensors[0]->quantization =
std::move(input_0_quant_params);
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
const float eps = 1e-6f;
const float expected_min[7] = {
0.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
const float expected_max[7] = {
1.5f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, true);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_min[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_max[tensor_idx], eps);
}
const float expected_value[7] = {
1.0f,
2.0f,
3.0f,
4.0f,
5.0f,
6.0f,
9.0f,
};
status = reader->AddCalibrationToModel(&model, false);
for (int tensor_idx = 0; tensor_idx < 7; tensor_idx++) {
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->min[0],
expected_value[tensor_idx], eps);
EXPECT_NEAR(model.subgraphs[0]->tensors[tensor_idx]->quantization->max[0],
expected_value[tensor_idx], eps);
}
}
TEST(CalibratorTest, LSTM) {
auto flatbuffer_model = ReadModel("lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(status, kTfLiteOk);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
status = interpreter->AllocateTensors();
EXPECT_EQ(kTfLiteOk, status);
const std::vector<float> lstm_input = {0.3, 0.2};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.300000}},
{{0, 18}, {0.000000, 0.468415}},
{{0, 19}, {0.000000, 0.424350}},
{{0, 24}, {0.265968, 0.468415}},
{{0, 25}, {0.080045, 0.170588}},
{{0, 26}, {0.080045, 0.170588}},
{{0, 27}, {0.080045, 0.170588}},
{{0, 28}, {0.080045, 0.170588}},
{{0, 29}, {0.000000, 0.270944}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, UnidirectionalSequenceLSTM) {
auto flatbuffer_model = ReadModel("unidirectional_sequence_lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
const std::vector<float> lstm_input = {0.3, 0.2, 0.9, 0.8};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.900000}},
{{0, 18}, {0.000000, 0.520999}},
{{0, 19}, {0.000000, 0.711364}},
{{0, 24}, {0.247992, 0.520999}},
{{0, 25}, {0.080045, 0.824241}},
{{0, 26}, {0.080045, 0.824241}},
{{0, 27}, {0.080045, 0.824241}},
{{0, 28}, {0.080045, 0.824241}},
{{0, 29}, {0.000000, 0.413618}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CustomLSTM) {
auto flatbuffer_model = ReadModel("custom_lstm.bin");
ASSERT_TRUE(flatbuffer_model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(*flatbuffer_model,
ops::builtin::BuiltinOpResolver{},
&interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
auto readonly_model = flatbuffer_model->GetModel();
tflite::ModelT model;
readonly_model->UnPackTo(&model);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
EXPECT_EQ(interpreter->AllocateTensors(), kTfLiteOk);
const std::vector<float> lstm_input = {0.3, 0.2, 0.9, 0.8};
int input_tensor_idx = interpreter->inputs()[0];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
for (size_t j = 0; j < lstm_input.size(); j++) {
tensor->data.f[j] = lstm_input[j];
}
ASSERT_EQ(interpreter->Invoke(), kTfLiteOk);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
EXPECT_EQ(reader->GetTensorStatsAsMap(&stats), kTfLiteOk);
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {0.200000, 0.300000}},
{{0, 18}, {0.000000, 0.468415}},
{{0, 19}, {0.000000, 0.424349}},
{{0, 24}, {0.265968, 0.468415}},
{{0, 25}, {0.080045, 0.170588}},
{{0, 26}, {0.080045, 0.170588}},
{{0, 27}, {0.000000, 0.000000}},
{{0, 28}, {0.080045, 0.170588}},
{{0, 29}, {0.080045, 0.170588}},
{{0, 30}, {0.000000, 0.000000}},
{{0, 31}, {0.080045, 0.170588}},
{{0, 32}, {0.080045, 0.170588}},
{{0, 33}, {0.000000, 0.000000}},
{{0, 34}, {0.080045, 0.170588}},
{{0, 35}, {0.080045, 0.170588}},
{{0, 36}, {0.000000, 0.000000}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CalibrationWithMultipleSubgraphs) {
auto model = ReadModel("multi_subgraphs_while.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(int));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(4, stats.size());
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {1.0, 1.0}},
{{0, 4}, {4.0, 4.0}},
{{2, 2}, {1.0, 2.0}},
{{2, 6}, {2.0, 4.0}},
};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
TEST(CalibratorTest, CalibrationWithCallOnce) {
auto model = ReadModel("call_once_mul.bin");
ASSERT_TRUE(model);
std::unique_ptr<Interpreter> interpreter;
std::unique_ptr<CalibrationReader> reader;
auto status = BuildLoggingInterpreter(
*model, ops::builtin::BuiltinOpResolver{}, &interpreter, &reader);
EXPECT_EQ(kTfLiteOk, status);
ASSERT_TRUE(interpreter);
ASSERT_TRUE(reader);
absl::flat_hash_map<std::tuple<int, int>, CalibrationReader::CalibrationStats>
stats;
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_TRUE(stats.empty());
status = interpreter->AllocateTensors();
ASSERT_EQ(kTfLiteOk, status);
const size_t tensor_size = 1;
for (size_t i = 0; i < interpreter->inputs().size(); i++) {
int input_tensor_idx = interpreter->inputs()[i];
TfLiteTensor* tensor = interpreter->tensor(input_tensor_idx);
ASSERT_EQ(tensor->bytes, tensor_size * sizeof(int));
for (size_t j = 0; j < tensor_size; j++) {
tensor->data.f[j] = i + 1;
}
}
status = interpreter->Invoke();
ASSERT_EQ(kTfLiteOk, status);
status = reader->GetTensorStatsAsMap(&stats);
EXPECT_EQ(kTfLiteOk, status);
EXPECT_EQ(3, stats.size());
const float eps = 1e-6f;
const absl::flat_hash_map<std::tuple<int, int>,
CalibrationReader::CalibrationStats>
expected_calibration_result = {
{{0, 0}, {1.0, 1.0}},
{{0, 2}, {2.0, 2.0}},
{{0, 3}, {2.0, 2.0}}};
EXPECT_EQ(expected_calibration_result.size(), stats.size());
for (const auto& e : stats) {
auto expected_result = expected_calibration_result.find(e.first)->second;
EXPECT_NEAR(e.second.min, expected_result.min, eps);
EXPECT_NEAR(e.second.max, expected_result.max, eps);
}
}
}
}
}
}
int main(int argc, char** argv) {
tensorflow::string model_file;
const std::vector<tensorflow::Flag> flag_list = {
tensorflow::Flag("test_model_file", &model_file,
"Path to test tflite model file."),
};
const bool parse_result = tensorflow::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
std::cerr << "Required test_model_file\n";
std::abort();
}
g_test_model_dir =
new tensorflow::string(tensorflow::io::Dirname(model_file));
::tensorflow::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/calibrator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/optimize/calibration/calibrator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ba8d96ad-15f5-4e3a-b66c-9871fe9caa51 | cpp | tensorflow/tensorflow | mfcc | tensorflow/lite/kernels/internal/mfcc.cc | tensorflow/lite/kernels/mfcc_test.cc | #include <math.h>
#include "tensorflow/lite/kernels/internal/mfcc.h"
namespace tflite {
namespace internal {
const double kDefaultUpperFrequencyLimit = 4000;
const double kDefaultLowerFrequencyLimit = 20;
const double kFilterbankFloor = 1e-12;
const int kDefaultFilterbankChannelCount = 40;
const int kDefaultDCTCoefficientCount = 13;
Mfcc::Mfcc()
: initialized_(false),
lower_frequency_limit_(kDefaultLowerFrequencyLimit),
upper_frequency_limit_(kDefaultUpperFrequencyLimit),
filterbank_channel_count_(kDefaultFilterbankChannelCount),
dct_coefficient_count_(kDefaultDCTCoefficientCount) {}
bool Mfcc::Initialize(int input_length, double input_sample_rate) {
bool initialized = mel_filterbank_.Initialize(
input_length, input_sample_rate, filterbank_channel_count_,
lower_frequency_limit_, upper_frequency_limit_);
initialized &=
dct_.Initialize(filterbank_channel_count_, dct_coefficient_count_);
initialized_ = initialized;
return initialized;
}
void Mfcc::Compute(const std::vector<double>& spectrogram_frame,
std::vector<double>* output) const {
if (!initialized_) {
return;
}
std::vector<double> working;
mel_filterbank_.Compute(spectrogram_frame, &working);
for (int i = 0; i < working.size(); ++i) {
double val = working[i];
if (val < kFilterbankFloor) {
val = kFilterbankFloor;
}
working[i] = log(val);
}
dct_.Compute(working, output);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/core/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_MFCC();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class BaseMfccOpModel : public SingleOpModel {
public:
BaseMfccOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("upper_frequency_limit", 4000);
fbb.Int("lower_frequency_limit", 20);
fbb.Int("filterbank_channel_count", 40);
fbb.Int("dct_coefficient_count", 13);
});
fbb.Finish();
SetCustomOp("Mfcc", fbb.GetBuffer(), Register_MFCC);
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(MfccOpTest, SimpleTest) {
BaseMfccOpModel m({TensorType_FLOAT32, {1, 1, 513}}, {TensorType_INT32, {1}},
{TensorType_FLOAT32, {}});
std::vector<float> data(513);
for (int i = 0; i < data.size(); ++i) {
data[i] = i + 1;
}
m.PopulateTensor<float>(m.input1(), 0, data.data(),
data.data() + data.size());
m.PopulateTensor<int>(m.input2(), {22050});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_THAT(output_shape, ElementsAre(1, 1, 13));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878,
-0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029,
-0.0769791, -0.10806114, -0.06047613},
1e-3)));
}
TEST(MfccOpTest, ScalarInputRateTest) {
BaseMfccOpModel m({TensorType_FLOAT32, {1, 1, 513}}, {TensorType_INT32, {}},
{TensorType_FLOAT32, {}});
std::vector<float> data(513);
for (int i = 0; i < data.size(); ++i) {
data[i] = i + 1;
}
m.PopulateTensor<float>(m.input1(), 0, data.data(),
data.data() + data.size());
m.PopulateTensor<int>(m.input2(), {22050});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
std::vector<int> output_shape = m.GetOutputShape();
EXPECT_THAT(output_shape, ElementsAre(1, 1, 13));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{29.13970072, -6.41568601, -0.61903012, -0.96778652, -0.26819878,
-0.40907028, -0.15614748, -0.23203119, -0.10481487, -0.1543029,
-0.0769791, -0.10806114, -0.06047613},
1e-3)));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/internal/mfcc.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/mfcc_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b1e8462-a99b-4326-a185-6585971938a1 | cpp | tensorflow/tensorflow | constant_fold | tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc | #include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.h"
#include <algorithm>
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.h"
namespace mlir {
namespace TF {
static bool IsFoldedByDefaultPolicy(Operation* inst) {
auto get_size = [&](TypeRange types) {
int64_t size = 0;
for (auto t : types) {
auto tensor_type = mlir::cast<TensorType>(t);
if (!tensor_type.getElementType().isIntOrFloat()) continue;
if (!tensor_type.hasStaticShape()) continue;
size += tensor_type.getNumElements() *
tensor_type.getElementType().getIntOrFloatBitWidth();
}
return size;
};
int64_t results_size = get_size(inst->getResultTypes());
int64_t operands_size = get_size(inst->getOperandTypes());
constexpr int kSizeFactor = 2;
constexpr int64_t kResultsSizeThreshold = (1 << 16);
constexpr int64_t kOperandsSizeThreshold = (1 << 30);
return (operands_size <= kOperandsSizeThreshold) &&
((results_size <= kResultsSizeThreshold) ||
(results_size <= kSizeFactor * operands_size));
}
LogicalResult ConstantFoldFallbackHook(
Operation* inst, ArrayRef<Attribute> operands,
SmallVectorImpl<OpFoldResult>& results) {
if (!CanBeFolded(inst)) return failure();
if (!IsFoldedByDefaultPolicy(inst)) return failure();
bool has_empty_numerical_results =
llvm::all_of(inst->getResultTypes(), [](Type ty) {
ShapedType shaped_ty = mlir::cast<ShapedType>(ty);
Type element_ty = shaped_ty.getElementType();
return shaped_ty.hasStaticShape() && shaped_ty.getNumElements() == 0 &&
element_ty.isIntOrFloat();
});
if (has_empty_numerical_results &&
inst->isRegistered()) {
for (Type ty : inst->getResultTypes()) {
auto shaped_ty = mlir::cast<ShapedType>(ty);
results.push_back(
DenseElementsAttr::get(shaped_ty, llvm::ArrayRef<Attribute>()));
}
return success();
}
if (std::any_of(operands.begin(), operands.end(), [](Attribute attr) {
return !attr || !mlir::isa<ElementsAttr>(attr);
}))
return failure();
SmallVector<ElementsAttr, 4> inputs;
inputs.reserve(operands.size());
for (auto input : operands) {
inputs.push_back(mlir::cast<ElementsAttr>(input));
}
SmallVector<Attribute> constants;
LogicalResult status = EvaluateOperation(inst, inputs, constants);
results.assign(constants.begin(), constants.end());
return status;
}
static bool init_hooks = ([] () {
TensorFlowDialect::RegisterConstantFoldHook(ConstantFoldFallbackHook);
}(), true);
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.h"
#include <utility>
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::NotNull;
using ::testing::SizeIs;
using ConstantFoldingTest = ::mlir::quant::QuantizationTestBase;
TEST_F(ConstantFoldingTest, FoldLargeConstant) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant() -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%mul = "tf.Mul"(%cast, %scale) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* mul_op = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(mul_op);
EXPECT_THAT(results, SizeIs(1));
EXPECT_TRUE(isa<TF::ConstOp>(results[0].getDefiningOp()));
}
TEST_F(ConstantFoldingTest, NotFoldingIdentity) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant() -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%identity = "tf.Identity"(%scale) : (tensor<f32>) -> tensor<f32>
%mul = "tf.Mul"(%cast, %identity) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
EXPECT_THAT(results, SizeIs(1));
auto mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
EXPECT_THAT(mul_op, NotNull());
EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, NotFoldingArgument) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<f32>) -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%mul = "tf.Mul"(%cast, %arg0) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
EXPECT_THAT(results, SizeIs(1));
TF::MulOp mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
EXPECT_THAT(mul_op, NotNull());
EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, FoldDepthwiseConvWeight) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<*xf32>) -> (tensor<?x?x?x3xf32>) {
%cst = "tf.Const"() {value = dense<2.000000e+00> : tensor<2x3x3x1xf32>} : () -> tensor<2x3x3x1xf32>
%cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_2 = "tf.Const"() {value = dense<3.0> : tensor<f32>} : () -> tensor<f32>
%w = "tf.Mul"(%cst, %cst_2) : (tensor<2x3x3x1xf32>, tensor<f32>) -> tensor<2x3x3x1xf32>
%0 = "tf.DepthwiseConv2dNative"(%arg0, %w) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
%1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
%2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
func.return %2 : tensor<?x?x?x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
RewritePatternSet patterns(ctx_.get());
patterns.add<ConstantFoldQuantizableOperands>(ctx_.get());
EXPECT_TRUE(
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns))));
auto depthwise_conv_op =
FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func);
EXPECT_THAT(depthwise_conv_op, NotNull());
EXPECT_TRUE(isa<TF::ConstOp>(depthwise_conv_op.getFilter().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, DepthwiseConvWeightNotFoldable) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<*xf32>, %arg1: tensor<f32>) -> (tensor<?x?x?x3xf32>) {
%cst = "tf.Const"() {value = dense<2.000000e+00> : tensor<2x3x3x1xf32>} : () -> tensor<2x3x3x1xf32>
%cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%w = "tf.Mul"(%cst, %arg1) : (tensor<2x3x3x1xf32>, tensor<f32>) -> tensor<2x3x3x1xf32>
%0 = "tf.DepthwiseConv2dNative"(%arg0, %w) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
%1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
%2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
func.return %2 : tensor<?x?x?x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
RewritePatternSet patterns(ctx_.get());
patterns.add<ConstantFoldQuantizableOperands>(ctx_.get());
EXPECT_TRUE(
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns))));
auto depthwise_conv_op =
FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func);
EXPECT_THAT(depthwise_conv_op, NotNull());
EXPECT_TRUE(isa<TF::MulOp>(depthwise_conv_op.getFilter().getDefiningOp()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5bce155-aab7-45a5-b882-7ba31775153e | cpp | tensorflow/tensorflow | shardy_xla_pass | third_party/xla/xla/service/spmd/shardy/shardy_xla_pass.cc | third_party/xla/xla/service/spmd/shardy/shardy_xla_pass_test.cc | #include "xla/service/spmd/shardy/shardy_xla_pass.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "mhlo/transforms/passes.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "shardy/common/file_utils.h"
#include "shardy/dialect/sdy/transforms/propagation/passes.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/layout.h"
#include "xla/map_util.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/computation_layout.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/spmd/shardy/constants.h"
#include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_export.h"
#include "xla/service/spmd/shardy/mhlo_round_trip/mhlo_import.h"
#include "xla/service/spmd/shardy/sdy_round_trip/pipelines.h"
#include "xla/service/spmd/shardy/utils.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_layout.h"
#include "xla/shape_util.h"
#include "xla/tsl/framework/mlir/status_scoped_diagnostic_handler.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace sdy {
namespace {
absl::Status createFromProtoAndReplaceComputations(
HloModule* module, const HloModuleProto& proto) {
absl::flat_hash_map<int64_t, HloComputation*> idToComputation;
std::vector<std::unique_ptr<HloComputation>> computations;
HloComputation* entryComputation = nullptr;
for (const HloComputationProto& computationProto : proto.computations()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloComputation> computation,
HloComputation::CreateFromProto(computationProto, idToComputation));
CHECK_NE(computation.get(), nullptr);
const int64_t computationId = computationProto.id();
CHECK_NE(computationId, -1);
CHECK(!ContainsKey(idToComputation, computationId));
idToComputation[computationId] = computation.get();
if (computationId == proto.entry_computation_id()) {
CHECK_EQ(entryComputation, nullptr);
entryComputation = computation.get();
}
computations.push_back(std::move(computation));
}
CHECK_NE(entryComputation, nullptr);
absl::c_sort(computations, [](const std::unique_ptr<HloComputation>& a,
const std::unique_ptr<HloComputation>& b) {
return a->unique_id() < b->unique_id();
});
for (std::unique_ptr<HloComputation>& computation : computations) {
HloComputation* newComputation =
module->AddComputationAndUnifyNamesAndIds(std::move(computation),
false);
if (newComputation == entryComputation) {
module->ReplaceEntryComputation(newComputation);
}
}
CHECK_OK(HloDCE().Run(module));
return absl::OkStatus();
}
using OriginalParamIndexToFlattenedNum =
std::vector<absl::flat_hash_map<ShapeIndex, int64_t>>;
int64_t getFlattenedParamNumber(
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
int64_t paramNumber, const ShapeIndex& paramIndex) {
return originalParamIndexToFlattenedNum[paramNumber].at(paramIndex);
}
OriginalParamIndexToFlattenedNum getOriginalParamIndexToFlattenedNum(
HloModule* hloModule) {
OriginalParamIndexToFlattenedNum originalParamIndexToFlattened;
HloComputation* entryComputation = hloModule->entry_computation();
originalParamIndexToFlattened.reserve(entryComputation->num_parameters());
int64_t paramNumber = 0;
for (HloInstruction* paramInstruction :
entryComputation->parameter_instructions()) {
auto& paramMap = originalParamIndexToFlattened.emplace_back();
ShapeUtil::ForEachLeafShape(paramInstruction->shape(),
[&](const Shape&, const ShapeIndex& index) {
paramMap[index] = paramNumber++;
});
}
return originalParamIndexToFlattened;
}
Shape getFlattenedShape(const Shape& shape) {
std::vector<Shape> flattenedShapes;
ShapeUtil::ForEachLeafShape(
shape, [&](const Shape& subShape, const ShapeIndex& index) {
flattenedShapes.push_back(subShape);
});
if (flattenedShapes.empty()) {
return Shape();
}
return ShapeUtil::MakeMaybeTupleShape(flattenedShapes);
}
ComputationLayout getFlattenedComputationLayout(
const ComputationLayout& computationLayout, bool useTupleArgs) {
if (!computationLayout.AnyLayoutSet()) {
return computationLayout;
}
ComputationLayout flattenedComputationLayout = ComputationLayout(
ShapeLayout(getFlattenedShape(computationLayout.result_shape())));
Shape tupleShape;
tupleShape.set_element_type(PrimitiveType::TUPLE);
for (int64_t i = 0; i != computationLayout.parameter_count(); ++i) {
ShapeUtil::ForEachLeafShape(
computationLayout.parameter_shape(i),
[&](const Shape& subShape, const ShapeIndex& index) {
if (useTupleArgs) {
*tupleShape.add_tuple_shapes() = subShape;
} else {
flattenedComputationLayout.add_parameter_layout(
ShapeLayout(subShape));
}
});
}
if (useTupleArgs) {
flattenedComputationLayout.add_parameter_layout(ShapeLayout(tupleShape));
}
return flattenedComputationLayout;
}
std::pair<int64_t, ShapeIndex> getFlattenedParamNumberAndIndex(
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
int64_t parameterNumber, const ShapeIndex& parameterIndex,
bool useTupleArgs) {
int64_t flattenedIndex = getFlattenedParamNumber(
originalParamIndexToFlattenedNum, parameterNumber, parameterIndex);
if (useTupleArgs) {
return {0, ShapeIndex{flattenedIndex}};
}
return {flattenedIndex, ShapeIndex()};
}
HloInputOutputAliasConfig getFlattenedInputOutputAliasConfig(
const HloInputOutputAliasConfig& inputOutputAliasConfig,
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
bool useTupleArgs) {
HloInputOutputAliasConfig flattenedInputOutputAliasConfig(
getFlattenedShape(inputOutputAliasConfig.shape()));
int64_t resultIndex = 0;
ShapeUtil::ForEachLeafShape(
inputOutputAliasConfig.shape(),
[&](const Shape&, const ShapeIndex& index) {
if (const std::optional<HloInputOutputAliasConfig::Alias>& alias =
inputOutputAliasConfig.GetAliasedParameter(index)) {
auto [paramNumber, paramIndex] = getFlattenedParamNumberAndIndex(
originalParamIndexToFlattenedNum, alias->parameter_number,
alias->parameter_index, useTupleArgs);
CHECK_OK(flattenedInputOutputAliasConfig.SetUpAlias(
flattenedInputOutputAliasConfig.shape().IsTuple()
? ShapeIndex{resultIndex}
: ShapeIndex(),
paramNumber, paramIndex, alias->kind));
}
++resultIndex;
});
return flattenedInputOutputAliasConfig;
}
HloBufferDonorConfig getFlattenedBufferDonorsConfig(
const HloBufferDonorConfig& bufferDonorsConfig,
const OriginalParamIndexToFlattenedNum& originalParamIndexToFlattenedNum,
bool useTupleArgs) {
HloBufferDonorConfig flattenedBufferDonorsConfig;
for (const HloBufferDonorConfig::BufferDonor& bufferDonor :
bufferDonorsConfig.buffer_donor()) {
auto [paramNumber, paramIndex] = getFlattenedParamNumberAndIndex(
originalParamIndexToFlattenedNum, bufferDonor.param_number,
bufferDonor.param_index, useTupleArgs);
CHECK_OK(
flattenedBufferDonorsConfig.AddBufferDonor(paramNumber, paramIndex));
}
return flattenedBufferDonorsConfig;
}
void removeFrontendAttributes(HloModule* hloModule,
mlir::ArrayRef<mlir::StringRef> attributeNames) {
FrontendAttributes feAttrs = hloModule->frontend_attributes();
auto* map = feAttrs.mutable_map();
for (const auto& attributeName : attributeNames) {
map->erase(attributeName);
}
hloModule->set_frontend_attributes(feAttrs);
}
}
absl::StatusOr<bool> ShardyXLA::Run(
HloModule* hloModule,
const absl::flat_hash_set<absl::string_view>& executionThreads) {
LOG(INFO) << "Using Shardy for XLA SPMD propagation.";
auto mlirContext = std::make_unique<mlir::MLIRContext>();
loadAllRequiredDialects(mlirContext.get());
mlir::OwningOpRef<mlir::ModuleOp> mlirModule =
xla::llvm_ir::CreateMlirModuleOp(
mlir::UnknownLoc::get(mlirContext.get()));
TF_RETURN_IF_ERROR(
ConvertHloToMlirHlo(*mlirModule, hloModule,
false,
true));
std::string shardyDir = hloModule->config().debug_options().xla_dump_to();
if (shardyDir == "sponge") {
shardyDir = getenv("TEST_UNDECLARED_OUTPUTS_DIR");
if (shardyDir.empty()) {
LOG(WARNING) << "\"sponge\" specified as dump directory but "
"TEST_UNDECLARED_OUTPUTS_DIR is not set!";
}
}
if (!shardyDir.empty()) {
shardyDir =
tsl::io::JoinPath(shardyDir, "shardy",
std::string_view(mlirModule->getName().value_or("")));
LOG(INFO) << "Using Shardy output directory: " << shardyDir;
}
bool enableVerifier = false;
#ifndef NDEBUG
enableVerifier = true;
#endif
mlir::PassManager pm(mlirContext.get());
pm.enableVerifier(enableVerifier);
pm.addPass(mlir::sdy::createSaveModuleOpPass(shardyDir,
"sdy_module_before_xla_import"));
bool useTupleArgs = false;
mlir::DictionaryAttr moduleFrontendAttrs = getFrontendAttrs(*mlirModule);
if (moduleFrontendAttrs && moduleFrontendAttrs.get(kUseTupleArgs)) {
useTupleArgs = true;
removeFrontendAttribute(*mlirModule, kUseTupleArgs);
}
if (moduleFrontendAttrs &&
moduleFrontendAttrs.get(kPythonIntegrationComplete)) {
removeFrontendAttribute(*mlirModule, kPythonIntegrationComplete);
addSdyRoundTripImportPipeline(pm);
} else {
auto spanToArrayRef = [](absl::Span<const bool> span) {
return mlir::ArrayRef<bool>(span.data(), span.size());
};
addMhloImportPipeline(
pm,
spanToArrayRef(hloModule->config()
.allow_spmd_sharding_propagation_to_parameters()),
spanToArrayRef(
hloModule->config().allow_spmd_sharding_propagation_to_output()));
}
ComputationLayout flattenedEntryComputationLayout =
getFlattenedComputationLayout(hloModule->entry_computation_layout(),
useTupleArgs);
OriginalParamIndexToFlattenedNum originalParamIndexToFlattenedNum =
getOriginalParamIndexToFlattenedNum(hloModule);
HloInputOutputAliasConfig flattenedInputOutputAliasConfig =
getFlattenedInputOutputAliasConfig(hloModule->input_output_alias_config(),
originalParamIndexToFlattenedNum,
useTupleArgs);
HloBufferDonorConfig flattenedBufferDonorsConfig =
getFlattenedBufferDonorsConfig(hloModule->buffer_donor_config(),
originalParamIndexToFlattenedNum,
useTupleArgs);
if (runSdyShardingPropagation) {
pm.addPass(mlir::mhlo::createHloLegalizeToStablehloPass());
mlir::sdy::addPropagationPipeline(
pm, shardyDir,
hloModule->use_auto_spmd_partitioning());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
}
addMhloExportPipeline(pm);
pm.addPass(mlir::sdy::createSaveModuleOpPass(shardyDir,
"sdy_module_after_xla_export"));
tsl::StatusScopedDiagnosticHandler diagnosticHandler(mlirContext.get());
TF_RETURN_IF_ERROR(diagnosticHandler.consumeStatus(pm.run(*mlirModule)));
HloProto hloProto;
TF_RETURN_IF_ERROR(ConvertMlirHloToHlo(*mlirModule, &hloProto, useTupleArgs,
false));
TF_RETURN_IF_ERROR(
createFromProtoAndReplaceComputations(hloModule, hloProto.hlo_module()));
CHECK_OK(TupleSimplifier().Run(hloModule));
*hloModule->mutable_entry_computation_layout() =
flattenedEntryComputationLayout;
hloModule->set_input_output_alias_config(
std::move(flattenedInputOutputAliasConfig));
hloModule->set_buffer_donor_config(std::move(flattenedBufferDonorsConfig));
TF_RETURN_IF_ERROR(
hlo_sharding_util::CanonicalizeLayoutAfterShardingPropagation(
hloModule, true,
true));
removeFrontendAttributes(
hloModule,
{kUseTupleArgs, kPythonIntegrationComplete, kMeshesRoundTripAttr});
return true;
}
}
} | #include "xla/service/spmd/shardy/shardy_xla_pass.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace sdy {
using ShardyXLATest = xla::HloTestBase;
TEST_F(ShardyXLATest, AllowSpmdShardingPropagationParametersOutputRespected) {
const char* const hloString = R"(
HloModule module, allow_spmd_sharding_propagation_to_parameters={false,true}, allow_spmd_sharding_propagation_to_output={true}
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0), sharding={replicated}
%p1 = f32[8,128,512] parameter(1), sharding={devices=[2,1,1,4]<=[8] last_tile_dim_replicate}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}, sharding={devices=[2,2,2]<=[8]}
ROOT %copy = f32[8,256,128] copy(%dot), sharding={replicated}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{replicated}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding(
"{devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2]<=[8]}"));
}
TEST_F(ShardyXLATest, ElementWise) {
const char* const hloString = R"(
HloModule module
ENTRY %entry {
p0 = f32[6,3] parameter(0)
p1 = f32[6,3] parameter(1)
copy.p0 = f32[6,3] copy(p0)
copy.p1 = f32[6,3] copy(p1)
add = f32[6,3] add(copy.p0, copy.p1), sharding={devices=[2,1]<=[2]}, metadata={op_name="simple_example/add" source_file="source.txt" source_line=42}
ROOT result = f32[6,3] copy(add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* add = FindInstruction(module.get(), xla::HloOpcode::kAdd);
EXPECT_NE(add, nullptr);
EXPECT_THAT(add, op::Sharding("{devices=[2,1]<=[2]}"));
EXPECT_EQ(add->metadata().op_name(), "simple_example/add");
EXPECT_EQ(add->metadata().source_file(), "source.txt");
EXPECT_EQ(add->metadata().source_line(), 42);
for (HloInstruction* param :
module->entry_computation()->parameter_instructions()) {
EXPECT_THAT(param, op::Sharding("{devices=[2,1]<=[2]}"));
}
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1]<=[2]}"));
auto* copy = FindInstruction(module.get(), xla::HloOpcode::kCopy);
EXPECT_EQ(copy, nullptr);
}
TEST_F(ShardyXLATest, CostantSplitter) {
const char* const hloString = R"(
HloModule module
ENTRY %constant_splitter {
%p0 = f32[8,8] parameter(0), sharding={devices=[2,2]<=[4]}
%constant = f32[] constant(3.14)
%broadcast = f32[8,16] broadcast(%constant), dimensions={}
%dot = f32[8,8] dot(%broadcast, %broadcast),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %add = f32[8,8] add(%p0, %dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* dot = FindInstruction(module.get(), xla::HloOpcode::kDot);
EXPECT_EQ(dot->operand_count(), 2);
EXPECT_EQ(dot->operand(0)->opcode(), HloOpcode::kBroadcast);
EXPECT_EQ(dot->operand(1)->opcode(), HloOpcode::kBroadcast);
EXPECT_NE(dot->operand(0), dot->operand(1));
EXPECT_THAT(dot->operand(0),
op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}"));
EXPECT_THAT(
dot->operand(1),
op::Sharding("{devices=[2,1,2]<=[2,2]T(1,0) last_tile_dim_replicate}"));
EXPECT_EQ(dot->operand(0)->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_EQ(dot->operand(1)->operand(0)->opcode(), HloOpcode::kConstant);
EXPECT_NE(dot->operand(0)->operand(0), dot->operand(1)->operand(0));
}
TEST_F(ShardyXLATest, Dot) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,128] parameter(0)
%p1 = f32[8,128,512] parameter(1)
%p2 = f32[8,128] parameter(2)
%dot0 = f32[8,512,256] dot(%p1, %p0),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={2}
%dot1 = f32[8,256,512] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%dot2 = f32[8,256] dot(%p0, %p2),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%dot3 = f32[8,256,512] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1},
sharding={devices=[2,2,2]<=[8]}
ROOT %tuple = (f32[8,512,256], f32[8,256,512], f32[8,256], f32[8,256,512])
tuple(%dot0, %dot1, %dot2, %dot3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,1,2]<=[8] last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding(
"{devices=[2,1,2,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(2),
op::Sharding("{devices=[2,1,4]<=[8] last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[2,2,2]<=[2,2,2]T(0,2,1)}, "
"{devices=[2,2,2]<=[8]}, {devices=[2,2,2]<=[8] "
"last_tile_dim_replicate}, {devices=[2,2,2]<=[8]}}"));
}
TEST_F(ShardyXLATest, DotTiledBatchDim) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0)
%p1 = f32[8,512,128] parameter(1)
%add = f32[8,256,512] add(%p0, %p0)
%dot = f32[8,256,128] dot(%add, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
%res = f32[8,32768] reshape(%dot), sharding={devices=[2,2]<=[4]}
ROOT %tuple = (f32[8,32768]) tuple(%res)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,1]<=[4]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,1,1,2]<=[4] last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2]<=[4]}"));
}
TEST_F(ShardyXLATest, DotMergeOperands1) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0),
sharding={devices=[2,2,1,2]<=[8] last_tile_dim_replicate}
%p1 = f32[8,128,512] parameter(1),
sharding={devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,1,2]<=[8] last_tile_dim_replicate}"));
EXPECT_THAT(
module->entry_computation()->parameter_instruction(1),
op::Sharding(
"{devices=[2,2,1,2]<=[2,2,2]T(0,2,1) last_tile_dim_replicate}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,2,2]<=[8]}"));
}
TEST_F(ShardyXLATest, DotMergeOperands2) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]<=[8]}
%p1 = f32[8,128,512] parameter(1), sharding={devices=[2,2,2]<=[8]}
%dot = f32[8,256,128] dot(%p0, %p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,1,4]<=[8] last_tile_dim_replicate}"));
}
TEST_F(ShardyXLATest, DotMergeOperands3) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[256,512] parameter(0), sharding={devices=[2,4]<=[8]}
%p1 = f32[128,512] parameter(1), sharding={devices=[4,2]<=[2,2,2]T(2,1,0)}
%dot = f32[256,128] dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
ROOT %copy = f32[256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,4]<=[8]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[4,2]<=[2,2,2]T(2,1,0)}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,4]<=[2,2,2]T(0,2,1)}"));
}
TEST_F(ShardyXLATest, BackwardDotFromContracting) {
const char* const hloString = R"(
HloModule module
ENTRY %conv {
%p0 = f32[8,256,512] parameter(0), sharding={devices=[2,2,2]<=[8]}
%p1 = f32[8,128,512] parameter(1)
%copy1 = f32[8,128,512] copy(%p1)
%dot = f32[8,256,128] dot(%p0, %copy1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2},
sharding={devices=[2,1,2,2]<=[8] last_tile_dim_replicate}
ROOT %copy = f32[8,256,128] copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,2,2]<=[8]}"));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{devices=[2,1,2,2]<=[8] last_tile_dim_replicate}"));
}
TEST_F(ShardyXLATest, EntryComputationLayoutSingleResult) {
const char* const hloString = R"(
HloModule module, entry_computation_layout={(f32[3,8,32,4]{2,1,3,0:T(8,128)},f32[3,8,32,4]{2,1,3,0:T(8,128)})->f32[3,8,32,4]{2,1,3,0:T(8,128)}}
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%copy.p0 = f32[3,8,32,4] copy(%p0)
%copy.p1 = f32[3,8,32,4] copy(%p1)
%add = f32[3,8,32,4] add(%copy.p0, %copy.p1), sharding={devices=[2,1,1,1]<=[2]}, metadata={op_name="simple_example/add" source_file="source.txt" source_line=42}
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(
module->entry_computation_layout().ToString(),
"(f32[3,8,32,4]{2,1,3,0:T(8,128)}, "
"f32[3,8,32,4]{2,1,3,0:T(8,128)})->f32[3,8,32,4]{2,1,3,0:T(8,128)}");
}
TEST_F(ShardyXLATest, EntryComputationLayoutNestedTuple) {
const char* const hloString = R"(
HloModule module, entry_computation_layout={((f32[4,2]{0,1:T(2,128)},(f32[4,2]{0,1:T(2,128)},f32[4,2]{0,1:T(2,128)})),f32[4,2]{0,1:T(2,128)})->((f32[4,2]{0,1:T(2,128)},(f32[4,2]{0,1:T(2,128)},f32[4,2]{0,1:T(2,128)})),f32[4,2]{0,1:T(2,128)})}
ENTRY %main {
%p0 = (f32[4,2], (f32[4,2], f32[4,2])) parameter(0)
%p1 = f32[4,2] parameter(1)
ROOT %result = ((f32[4,2], (f32[4,2], f32[4,2])), f32[4,2]) tuple(%p0, %p1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation_layout().ToString(),
"(f32[4,2]{0,1:T(2,128)}, f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)})->(f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)}, f32[4,2]{0,1:T(2,128)}, "
"f32[4,2]{0,1:T(2,128)})");
}
TEST_F(ShardyXLATest, EntryComputationLayoutMissingLayout) {
const char* const hloString = R"(
HloModule module, entry_computation_layout={(f32[3,8,32,4]{2,1,3,0:T(8,128)},f32[3,8,32,4])->f32[3,8,32,4]}
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%copy.p0 = f32[3,8,32,4] copy(%p0)
%copy.p1 = f32[3,8,32,4] copy(%p1)
%add = f32[3,8,32,4] add(%copy.p0, %copy.p1), sharding={devices=[2,1,1,1]<=[2]}, metadata={op_name="simple_example/add" source_file="source.txt" source_line=42}
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation_layout().ToString(),
"(f32[3,8,32,4]{2,1,3,0:T(8,128)}, "
"f32[3,8,32,4]{3,2,1,0})->f32[3,8,32,4]{3,2,1,0}");
}
TEST_F(ShardyXLATest, InputOutputAliasConfigSingleResult) {
const char* const hloString = R"(
HloModule module, input_output_alias={ {}: (1, {}, may-alias) }
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%add = f32[3,8,32,4] add(%p0, %p1)
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{}: (1, {}, may-alias)");
}
TEST_F(ShardyXLATest, InputOutputAliasConfigSingleResultNestedParams) {
const char* const hloString = R"(
HloModule module, input_output_alias={ {}: (0, {1}, may-alias) }
ENTRY %entry {
%p0 = (f32[4,2], f32[4,2]) parameter(0)
%get-tuple-element.0 = f32[4,2] get-tuple-element((f32[4,2], f32[4,2]) %p0), index=0
%get-tuple-element.1 = f32[4,2] get-tuple-element((f32[4,2], f32[4,2]) %p0), index=1
%add = f32[4,2] add(%get-tuple-element.0, %get-tuple-element.1)
ROOT %result = f32[4,2] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{}: (1, {}, may-alias)");
}
TEST_F(ShardyXLATest, InputOutputAliasConfigNestedResultAndParams) {
const char* const hloString = R"(
HloModule module, input_output_alias={ {0, 1, 0}: (0, {1, 0}, may-alias), {1}: (1, {}, may-alias) }
ENTRY %main {
%p0 = (f32[4,2], (f32[4,2], f32[4,2])) parameter(0)
%p1 = f32[4,2] parameter(1)
ROOT %result = ((f32[4,2], (f32[4,2], f32[4,2])), f32[4,2]) tuple(%p0, %p1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{1}: (1, {}, may-alias), {3}: (3, {}, may-alias)");
}
TEST_F(ShardyXLATest, BufferDonorConfigSingleResult) {
const char* const hloString = R"(
HloModule module, buffer_donor={ (1, {}) }
ENTRY %entry {
%p0 = f32[3,8,32,4] parameter(0)
%p1 = f32[3,8,32,4] parameter(1)
%add = f32[3,8,32,4] add(%p0, %p1)
ROOT %result = f32[3,8,32,4] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->buffer_donor_config().ToShortString(), "(1, {})");
}
TEST_F(ShardyXLATest, BufferDonorConfigNestedTuple) {
const char* const hloString = R"(
HloModule module, buffer_donor={ (0, {0}), (0, {1, 1}) }
ENTRY %main {
%p0 = (f32[4,2], (f32[4,2], f32[4,2])) parameter(0)
%p1 = f32[4,2] parameter(1)
ROOT %result = ((f32[4,2], (f32[4,2], f32[4,2])), f32[4,2]) tuple(%p0, %p1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->buffer_donor_config().ToShortString(), "(0, {}), (2, {})");
}
TEST_F(ShardyXLATest, ShardingCustomCall) {
const char* const hloString = R"(
HloModule module
ENTRY %main {
%p0 = f32[8,8] parameter(0), sharding={devices=[2,1]<=[2]}
%annotate = f32[8,8] custom-call(%p0), custom_call_target="Sharding",
sharding={devices=[1,2]<=[2]}
ROOT %add = f32[8,8] add(%p0, %annotate)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->parameter_instruction(0),
op::Sharding("{devices=[2,1]<=[2]}"));
EXPECT_THAT(module->entry_computation()->root_instruction()->operand(1),
op::Copy());
}
TEST_F(ShardyXLATest, RngBitGenerator) {
const char* const hloString = R"(
HloModule module
ENTRY main {
state.1 = u64[8]{0} parameter(0), sharding={devices=[8,4]<=[32] last_tile_dim_replicate}
state.2 = u64[8]{0} add(state.1, state.1), sharding={devices=[2,16]<=[32] last_tile_dim_replicate}
rng.1 = u32[512,256] rng-bit-generator(state.1), algorithm=rng_default, sharding={devices=[16,2]<=[32]}
rng.2 = (u64[8]{0}, u32[512,256]) rng-bit-generator(state.2), algorithm=rng_default, sharding={{devices=[4,8]<=[32] last_tile_dim_replicate}, {devices=[8,4]<=[32]}}
gte = u32[512,256] get-tuple-element(rng.2), index=1
ROOT tuple = (u32[512,256], u32[512,256]) tuple(rng.1, gte)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Sharding("{{devices=[16,2]<=[32]}, {devices=[8,4]<=[32]}}"));
}
TEST_F(ShardyXLATest, WhileWithFreeVariables) {
const char* const hloString = R"(
HloModule main, entry_computation_layout={(f32[32,96]{1,0}, f32[32,96]{1,0})->f32[32,96]{1,0}}
%region_0.7 (arg_tuple.8: (f32[32,96], s32[], s32[], s32[], f32[32,96])) -> (f32[32,96], s32[], s32[], s32[], f32[32,96]) {
%arg_tuple.8 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) parameter(0)
%get-tuple-element.9 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=0
%get-tuple-element.13 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=4
%add.15 = f32[32,96]{1,0} add(f32[32,96]{1,0} %get-tuple-element.9, f32[32,96]{1,0} %get-tuple-element.13), metadata={source_file="-" source_line=25}
%get-tuple-element.10 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=1
%get-tuple-element.12 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=3
%add.14 = s32[] add(s32[] %get-tuple-element.10, s32[] %get-tuple-element.12), metadata={source_file="-" source_line=24}
%get-tuple-element.11 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.8), index=2
ROOT %tuple.16 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) tuple(f32[32,96]{1,0} %add.15, s32[] %add.14, s32[] %get-tuple-element.11, s32[] %get-tuple-element.12, f32[32,96]{1,0} %get-tuple-element.13)
}
%region_1.17 (arg_tuple.18: (f32[32,96], s32[], s32[], s32[], f32[32,96])) -> pred[] {
%arg_tuple.18 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) parameter(0)
%get-tuple-element.19 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=0
%get-tuple-element.22 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=3
%get-tuple-element.23 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=4
%get-tuple-element.20 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=1
%get-tuple-element.21 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %arg_tuple.18), index=2
ROOT %compare.24 = pred[] compare(s32[] %get-tuple-element.20, s32[] %get-tuple-element.21), direction=LT, metadata={source_file="-" source_line=21}
}
ENTRY %main.30 (Arg_0.1: f32[32,96], Arg_1.2: f32[32,96]) -> f32[32,96] {
%Arg_0.1 = f32[32,96]{1,0} parameter(0), sharding={devices=[2,2]<=[4]}
%constant.3 = s32[] constant(0)
%constant.5 = s32[] constant(32)
%constant.4 = s32[] constant(1)
%Arg_1.2 = f32[32,96]{1,0} parameter(1), sharding={devices=[2,1,2]<=[4] last_tile_dim_replicate}
%tuple.6 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) tuple(f32[32,96]{1,0} %Arg_0.1, s32[] %constant.3, s32[] %constant.5, s32[] %constant.4, f32[32,96]{1,0} %Arg_1.2), metadata={source_file="-" source_line=19}
%while.25 = (f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) while((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %tuple.6), condition=%region_1.17, body=%region_0.7, metadata={source_file="-" source_line=19}
%get-tuple-element.27 = s32[] get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %while.25), index=1, metadata={source_file="-" source_line=19}
%get-tuple-element.26 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}, s32[], s32[], s32[], f32[32,96]{1,0}) %while.25), index=0, metadata={source_file="-" source_line=19}
%tuple.28 = (f32[32,96]{1,0}) tuple(f32[32,96]{1,0} %get-tuple-element.26)
ROOT %get-tuple-element.29 = f32[32,96]{1,0} get-tuple-element((f32[32,96]{1,0}) %tuple.28), index=0
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* whileInst =
FindInstruction(module.get(), xla::HloOpcode::kWhile);
EXPECT_NE(whileInst, nullptr);
EXPECT_THAT(module->entry_computation()->parameter_instruction(1),
op::Sharding("{devices=[2,1,2]<=[4] last_tile_dim_replicate}"));
EXPECT_THAT(whileInst,
op::Sharding("{{devices=[2,2]<=[4]}, {replicated}, {replicated}, "
"{devices=[2,2]<=[4]}, {replicated}}"));
}
TEST_F(ShardyXLATest, ShardMap) {
const char* const hloString = R"(
HloModule shard_map
region_add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
shmap_body.11 {
Arg_0.12 = f32[2,8] parameter(0)
add.14 = f32[2,8] add(Arg_0.12, Arg_0.12)
Arg_1.13 = f32[8,32] parameter(1)
dot.15 = f32[2,32] dot(add.14, Arg_1.13), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT all-reduce.16 = f32[2,32] all-reduce(dot.15), channel_id=1, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_add
}
ENTRY main {
p0 = f32[8,16] parameter(0)
custom-call.3 = f32[8,16] custom-call(p0), custom_call_target="Sharding", sharding={devices=[4,2]<=[8]}
custom-call.4 = f32[2,8] custom-call(custom-call.3), custom_call_target="SPMDFullToShardShape", sharding={manual}
p1 = f32[16,32] parameter(1)
custom-call.5 = f32[16,32] custom-call(p1), custom_call_target="Sharding", sharding={devices=[2,1,4]<=[4,2]T(1,0) last_tile_dim_replicate}
custom-call.6 = f32[8,32] custom-call(custom-call.5), custom_call_target="SPMDFullToShardShape", sharding={manual}
call.17 = f32[2,32] call(custom-call.4, custom-call.6), to_apply=shmap_body.11
custom-call.18 = f32[2,32] custom-call(call.17), custom_call_target="Sharding", sharding={manual}
ROOT custom-call.19 = f32[8,32] custom-call(custom-call.18), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,2]<=[8] last_tile_dim_replicate}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->computation_count(), 2);
EXPECT_EQ(FindInstruction(module.get(), xla::HloOpcode::kCall), nullptr);
auto* dot = FindInstruction(module.get(), xla::HloOpcode::kDot);
EXPECT_NE(dot, nullptr);
EXPECT_TRUE(dot->has_sharding());
EXPECT_TRUE(dot->sharding().IsManual());
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_EQ(root->opcode(), xla::HloOpcode::kCustomCall);
EXPECT_EQ(root->custom_call_target(), "SPMDShardToFullShape");
EXPECT_THAT(root,
op::Sharding("{devices=[4,1,2]<=[8] last_tile_dim_replicate}"));
}
TEST_F(ShardyXLATest, EmptyModule) {
const char* const hloString = R"(
HloModule pjit_f, entry_computation_layout={()->()}, num_partitions=2
ENTRY %main.2 () -> () {
ROOT %tuple.1 = () tuple()
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation_layout().ToString(), "()->()");
EXPECT_EQ(module->input_output_alias_config().ToShortString(), "");
}
TEST_F(ShardyXLATest, TestUseTuplesTrue) {
const char* const hloString = R"(
HloModule pjit_f, buffer_donor={ (1, {}) }, input_output_alias={ {}: (2, {}, must-alias) }, entry_computation_layout={(f32[8,16]{1,0:T(8,128)}, f32[16,32]{1,0:T(8,128)}, f32[8,32]{1,0:T(8,128)})->f32[8,32]{1,0:T(8,128)}}, allow_spmd_sharding_propagation_to_parameters={false,false,false}, num_partitions=8, frontend_attributes={xla.sdy.use_tuple_args="t"}
ENTRY %main.7 (Arg_0.1: f32[8,16], Arg_1.2: f32[16,32], Arg_2.3: f32[8,32]) -> f32[8,32] {
%Arg_0.1 = f32[8,16]{1,0} parameter(0)
%Arg_1.2 = f32[16,32]{1,0} parameter(1)
%dot.4 = f32[8,32]{1,0} dot(f32[8,16]{1,0} %Arg_0.1, f32[16,32]{1,0} %Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
%Arg_2.3 = f32[8,32]{1,0} parameter(2)
ROOT %add.5 = f32[8,32]{1,0} add(f32[8,32]{1,0} %dot.4, f32[8,32]{1,0} %Arg_2.3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, ShardyXLA().Run(module.get()));
VLOG(1) << module->ToString();
EXPECT_TRUE(changed);
EXPECT_EQ(module->entry_computation()->parameter_instructions().size(), 1);
EXPECT_EQ(module->buffer_donor_config().ToShortString(), "(0, {1})");
EXPECT_EQ(module->input_output_alias_config().ToShortString(),
"{}: (0, {2}, must-alias)");
EXPECT_EQ(module->entry_computation_layout().ToString(),
"((f32[8,16]{1,0:T(8,128)}, f32[16,32]{1,0:T(8,128)}, "
"f32[8,32]{1,0:T(8,128)}))->f32[8,32]{1,0:T(8,128)}");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/shardy_xla_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/spmd/shardy/shardy_xla_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4247af5c-3ab6-4144-ac73-c851293eeb69 | cpp | tensorflow/tensorflow | arg_ret_placement | tensorflow/core/common_runtime/arg_ret_placement.cc | tensorflow/core/common_runtime/arg_ret_placement_test.cc | #include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include <algorithm>
#include <cstddef>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/full_type_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow::full_type {
MemoryType MemoryTypeFromFullTypeId(FullTypeId id) {
if (id == TFT_SHAPE_TENSOR) {
return HOST_MEMORY;
}
return DEVICE_MEMORY;
}
bool LogMemoryTypeMismatch(bool use_host_memory, const FullTypeDef& ft) {
FullTypeId id = ft.type_id();
if (id == TFT_PRODUCT) {
LOG(ERROR) << "Unexpected full type information for tensor, which should "
"not start with TFT_PRODUCT\n"
<< ft.DebugString();
return false;
}
MemoryType mt_from_ft = MemoryTypeFromFullTypeId(id);
if (use_host_memory != (mt_from_ft == HOST_MEMORY)) {
VLOG(1) << "use_host_memory=" << use_host_memory
<< "but full type information is\n"
<< ft.DebugString();
return false;
}
return true;
}
Status CheckMemoryType(bool use_host_memory, const FullTypeDef& ft) {
FullTypeId id = ft.type_id();
MemoryType mt_from_ft = MemoryTypeFromFullTypeId(id);
if (id == TFT_PRODUCT) {
return errors::Internal(
"Unexpected full type information for tensor, which should not start "
"with TFT_PRODUCT\n",
ft.DebugString());
}
if (use_host_memory != (mt_from_ft == HOST_MEMORY)) {
return errors::Internal("use_host_memory=", use_host_memory,
" but full type information is\n",
ft.DebugString());
}
return absl::OkStatus();
}
static Status SetMemoryTypeForNode(
const Node* node, const DataType dtype, bool is_arg, bool weak_flag,
bool ints_on_device, MemoryTypeVector* memory_types,
std::vector<AllocatorAttributes>* alloc_attrs) {
const Node* n;
int output_idx;
if (is_arg) {
DCHECK(node->op_def().name() == "_Arg" ||
node->op_def().name() == "_DeviceArg");
output_idx = 0;
n = node;
} else {
DCHECK(node->op_def().name() == "_Retval" ||
node->op_def().name() == "_DeviceRetval");
const Edge* edge;
TF_RETURN_IF_ERROR(node->input_edge(0, &edge));
n = edge->src();
output_idx = edge->src_output();
}
MemoryType mt_from_dtype = ints_on_device ? MTypeFromDTypeIntsOnDevice(dtype)
: MTypeFromDType(dtype);
if (dtype == DT_INT32) {
if (n->def().has_experimental_type()) {
bool valid_full_type_information = false;
auto ft = n->def().experimental_type();
if (ft.type_id() == TFT_PRODUCT) {
FullTypeId id = GetArgDefaultUnset(ft, output_idx).type_id();
MemoryType mt_from_ft = MemoryTypeFromFullTypeId(id);
if ((id == TFT_TENSOR) || (id == TFT_SHAPE_TENSOR)) {
valid_full_type_information = mt_from_dtype == mt_from_ft;
} else if (id == TFT_UNSET) {
valid_full_type_information = mt_from_dtype != HOST_MEMORY;
}
}
if (!valid_full_type_information) {
if (weak_flag) {
VLOG(1) << "node=" << n->name() << " (op=" << n->def().op()
<< ") has an int32 output with unexpected full type "
<< "information with ints_on_device=" << ints_on_device
<< "\n"
<< n->def().DebugString();
} else {
return errors::Internal(
"node=", n->name(), " (op=", n->def().op(),
") has an int32 output with unexpected full type information ",
"with ints_on_device=", ints_on_device, "\n",
n->def().DebugString());
}
}
} else if (mt_from_dtype == HOST_MEMORY) {
if (weak_flag) {
VLOG(1) << "node=" << n->name() << " (op=" << n->def().op()
<< ") has a HOST_MEMORY int32 output but does not have "
<< "(TFT_SHAPE_TENSOR) full type information.";
} else {
return errors::Internal(
"node=", n->name(), " (op=", n->def().op(),
") has a HOST_MEMORY int32 output but does not have "
"(TFT_SHAPE_TENSOR) full type information.");
}
}
}
if (memory_types != nullptr) {
memory_types->push_back(mt_from_dtype);
}
if (alloc_attrs != nullptr) {
AllocatorAttributes aa;
aa.set_on_host(mt_from_dtype == HOST_MEMORY);
alloc_attrs->push_back(aa);
}
return absl::OkStatus();
}
static Status SetMemoryTypeHelper(
const absl::InlinedVector<Node*, 4UL>& nodes, const DataTypeVector& dtypes,
bool is_arg, bool weak_flag, MemoryTypeVector* memory_types,
std::vector<AllocatorAttributes>* alloc_attrs) {
DCHECK_EQ(nodes.size(), dtypes.size());
if (alloc_attrs != nullptr) {
alloc_attrs->reserve(nodes.size());
}
for (int i = 0; i < nodes.size(); ++i) {
TF_RETURN_IF_ERROR(SetMemoryTypeForNode(nodes[i], dtypes[i], is_arg,
weak_flag, false,
memory_types, alloc_attrs));
}
return absl::OkStatus();
}
static Status SetMemoryTypeHelper(
const std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes,
bool weak_flag, bool ints_on_device,
std::vector<AllocatorAttributes>* alloc_attrs) {
DCHECK(alloc_attrs != nullptr);
alloc_attrs->reserve(arg_nodes.size());
for (const auto& arg : arg_nodes) {
const AttrValue* attr_value = arg.first->attrs().Find("T");
if (attr_value == nullptr) {
return errors::Internal("Arg node missing T attribute");
}
DataType dtype = attr_value->type();
TF_RETURN_IF_ERROR(SetMemoryTypeForNode(
arg.first, dtype, true, weak_flag, ints_on_device,
nullptr, alloc_attrs));
}
return absl::OkStatus();
}
static Status SetMemoryTypeHelper(
const std::vector<std::pair<Node*, int>> ret_nodes, bool weak_flag,
bool ints_on_device, std::vector<AllocatorAttributes>* alloc_attrs) {
DCHECK(alloc_attrs != nullptr);
alloc_attrs->reserve(ret_nodes.size());
for (const auto& ret : ret_nodes) {
const AttrValue* attr_value = ret.first->attrs().Find("T");
if (attr_value == nullptr) {
return errors::Internal("Ret node missing T attribute");
}
DataType dtype = attr_value->type();
TF_RETURN_IF_ERROR(SetMemoryTypeForNode(
ret.first, dtype, false, weak_flag, ints_on_device,
nullptr, alloc_attrs));
}
return absl::OkStatus();
}
Status SetMemoryTypeForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, true,
false, &memory_types, nullptr);
}
Status WeakSetMemoryTypeForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, true,
true, &memory_types, nullptr);
}
Status SetMemoryTypeForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, false,
false, &memory_types, nullptr);
}
Status WeakSetMemoryTypeForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
MemoryTypeVector& memory_types) {
return SetMemoryTypeHelper(nodes, dtypes, false,
true, &memory_types, nullptr);
}
Status SetAllocAttrsForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, true,
false, nullptr, &alloc_attrs);
}
Status WeakSetAllocAttrsForArgs(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, true,
true, nullptr, &alloc_attrs);
}
Status SetAllocAttrsForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, false,
false, nullptr, &alloc_attrs);
}
Status WeakSetAllocAttrsForRets(const absl::InlinedVector<Node*, 4UL>& nodes,
const DataTypeVector& dtypes,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(nodes, dtypes, false,
true, nullptr, &alloc_attrs);
}
Status SingleDeviceSetAllocAttrsForArgs(
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes,
bool ints_on_device, std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(arg_nodes, false, ints_on_device,
&alloc_attrs);
}
Status WeakSingleDeviceSetAllocAttrsForArgs(
std::vector<std::pair<Node*, FunctionArgIndex>> arg_nodes,
bool ints_on_device, std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(arg_nodes, true, ints_on_device,
&alloc_attrs);
}
Status SingleDeviceSetAllocAttrsForRets(
const std::vector<std::pair<Node*, int>> ret_nodes, bool ints_on_device,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(ret_nodes, false, ints_on_device,
&alloc_attrs);
}
Status WeakSingleDeviceSetAllocAttrsForRets(
const std::vector<std::pair<Node*, int>> ret_nodes, bool ints_on_device,
std::vector<AllocatorAttributes>& alloc_attrs) {
return SetMemoryTypeHelper(ret_nodes, true, ints_on_device,
&alloc_attrs);
}
} | #include "tensorflow/core/common_runtime/arg_ret_placement.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/full_type.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace tensorflow {
class FullTypeGraphUtilsTest : public ::testing::Test {
protected:
FullTypeGraphUtilsTest()
: graph_(OpRegistry::Global()),
root_(Scope::NewRootScope().ExitOnError()) {}
Status MakeArg(Node **arg, DataType dtype) {
return NodeBuilder("arg", "_Arg", &root_.graph()->flib_def())
.Attr("T", dtype)
.Attr("index", 0)
.Finalize(root_.graph(), arg);
}
Status MakeRet(Node *src, Node **ret, DataType dtype) {
return NodeBuilder("ret", "_Retval", &root_.graph()->flib_def())
.Input(src, 0)
.Attr("T", dtype)
.Attr("index", 0)
.Finalize(root_.graph(), ret);
}
public:
Status MakeArgRet(Node **arg, Node **ret, DataType dtype) {
TF_RETURN_IF_ERROR(MakeArg(arg, dtype));
return MakeRet(*arg, ret, dtype);
}
void AddArgFullType(Node *arg, FullTypeId out_id, FullTypeId data_id) {
FullTypeDef *t = arg->mutable_def()->mutable_experimental_type();
t->set_type_id(TFT_PRODUCT);
FullTypeDef out_t;
out_t.set_type_id(out_id);
if (data_id != TFT_UNSET) {
FullTypeDef data_t;
data_t.set_type_id(data_id);
(*out_t.add_args()) = data_t;
}
(*t->add_args()) = out_t;
}
private:
Graph graph_;
Scope root_;
};
TEST_F(FullTypeGraphUtilsTest, MemoryTypesArgNoFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT64));
nodes.push_back(arg);
dtypes.push_back(DT_INT64);
TF_ASSERT_OK(
full_type::WeakSetMemoryTypeForArgs(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::DEVICE_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgNoFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT64));
nodes.push_back(arg);
dtypes.push_back(DT_INT64);
TF_ASSERT_OK(full_type::WeakSetAllocAttrsForArgs(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, MemoryTypesArgWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetMemoryTypeForArgs(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::HOST_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetAllocAttrsForArgs(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, ArgError) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
Status status = full_type::SetMemoryTypeForArgs(nodes, dtypes, memory_types);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, WeakAllocAttrsArgIgnore) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_TENSOR, TFT_INT32);
nodes.push_back(arg);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::WeakSetAllocAttrsForArgs(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, RetNoFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT64));
nodes.push_back(ret);
dtypes.push_back(DT_INT64);
TF_ASSERT_OK(
full_type::WeakSetMemoryTypeForRets(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::DEVICE_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, MemoryTypeRetWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetMemoryTypeForRets(nodes, dtypes, memory_types));
ASSERT_EQ(memory_types.size(), 1);
ASSERT_EQ(memory_types[0], MemoryType::HOST_MEMORY);
}
TEST_F(FullTypeGraphUtilsTest, AllowAttrRetWithFT) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::SetAllocAttrsForRets(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, RetError) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
MemoryTypeVector memory_types;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
Status status = full_type::SetMemoryTypeForRets(nodes, dtypes, memory_types);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, WeakAllocAttrsRetIgnore) {
absl::InlinedVector<Node *, 4UL> nodes;
DataTypeVector dtypes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
nodes.push_back(ret);
dtypes.push_back(DT_INT32);
TF_ASSERT_OK(full_type::WeakSetAllocAttrsForRets(nodes, dtypes, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgWithFTSingleDevice) {
std::vector<std::pair<Node *, FunctionArgIndex>> arg_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_TENSOR, TFT_INT32);
arg_nodes.push_back(std::make_pair(arg, FunctionArgIndex(0, 0)));
TF_ASSERT_OK(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, true, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, AllocatorAttrsArgWithUnsetFTSingleDevice) {
std::vector<std::pair<Node *, FunctionArgIndex>> arg_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_UNSET, TFT_UNSET);
arg_nodes.push_back(std::make_pair(arg, FunctionArgIndex(0, 0)));
TF_ASSERT_OK(full_type::SingleDeviceSetAllocAttrsForArgs(
arg_nodes, true, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, WeakAllocatorAttrsArgWithFTSingleDevice) {
std::vector<std::pair<Node *, FunctionArgIndex>> arg_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
arg_nodes.push_back(std::make_pair(arg, FunctionArgIndex(0, 0)));
TF_ASSERT_OK(full_type::WeakSingleDeviceSetAllocAttrsForArgs(
arg_nodes, false, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, SingleDeviceAllocAttrsRetError) {
std::vector<std::pair<Node *, int>> ret_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
AddArgFullType(arg, TFT_SHAPE_TENSOR, TFT_INT32);
ret_nodes.push_back(std::make_pair(ret, 0));
Status status = full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, true, alloc_attrs);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, SingleDeviceAllocAttrsNotInt32) {
std::vector<std::pair<Node *, int>> ret_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_STRING));
ret_nodes.push_back(std::make_pair(ret, 0));
TF_ASSERT_OK(full_type::SingleDeviceSetAllocAttrsForRets(
ret_nodes, false, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_TRUE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, SingleDeviceWeakAllocAttrsRetIgnore) {
std::vector<std::pair<Node *, int>> ret_nodes;
std::vector<AllocatorAttributes> alloc_attrs;
Node *arg, *ret;
TF_ASSERT_OK(MakeArgRet(&arg, &ret, DT_INT32));
ret_nodes.push_back(std::make_pair(ret, 0));
TF_ASSERT_OK(full_type::WeakSingleDeviceSetAllocAttrsForRets(
ret_nodes, true, alloc_attrs));
ASSERT_EQ(alloc_attrs.size(), 1);
ASSERT_FALSE(alloc_attrs[0].on_host());
}
TEST_F(FullTypeGraphUtilsTest, CheckMemoryTypeOK) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
TF_ASSERT_OK(full_type::CheckMemoryType(true, ft));
}
TEST_F(FullTypeGraphUtilsTest, CheckMemoryTypeBadFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type();
Status status = full_type::CheckMemoryType(true, ft);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, CheckMemoryTypeWrongFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
Status status = full_type::CheckMemoryType(false, ft);
EXPECT_FALSE(status.ok());
}
TEST_F(FullTypeGraphUtilsTest, LogMemoryTypeMismatchOK) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
EXPECT_TRUE(full_type::LogMemoryTypeMismatch(true, ft));
}
TEST_F(FullTypeGraphUtilsTest, LogMemoryTypeMismatchBadFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type();
EXPECT_FALSE(full_type::LogMemoryTypeMismatch(true, ft));
}
TEST_F(FullTypeGraphUtilsTest, LogMemoryTypeMismatchWrongFT) {
Node *node;
TF_ASSERT_OK(MakeArg(&node, DT_INT32));
AddArgFullType(node, TFT_SHAPE_TENSOR, TFT_INT32);
const FullTypeDef &ft = node->def().experimental_type().args()[0];
EXPECT_FALSE(full_type::LogMemoryTypeMismatch(false, ft));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/arg_ret_placement.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/arg_ret_placement_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c3cd288c-cd28-4217-a6cd-3e198a8c404d | cpp | google/quiche | http_header_storage | quiche/common/http/http_header_storage.cc | quiche/common/http/http_header_storage_test.cc | #include "quiche/common/http/http_header_storage.h"
#include <cstring>
#include "quiche/common/platform/api/quiche_logging.h"
namespace quiche {
namespace {
const size_t kDefaultStorageBlockSize = 2048;
}
HttpHeaderStorage::HttpHeaderStorage() : arena_(kDefaultStorageBlockSize) {}
absl::string_view HttpHeaderStorage::Write(const absl::string_view s) {
return absl::string_view(arena_.Memdup(s.data(), s.size()), s.size());
}
void HttpHeaderStorage::Rewind(const absl::string_view s) {
arena_.Free(const_cast<char*>(s.data()), s.size());
}
absl::string_view HttpHeaderStorage::WriteFragments(
const Fragments& fragments, absl::string_view separator) {
if (fragments.empty()) {
return absl::string_view();
}
size_t total_size = separator.size() * (fragments.size() - 1);
for (const absl::string_view& fragment : fragments) {
total_size += fragment.size();
}
char* dst = arena_.Alloc(total_size);
size_t written = Join(dst, fragments, separator);
QUICHE_DCHECK_EQ(written, total_size);
return absl::string_view(dst, total_size);
}
size_t Join(char* dst, const Fragments& fragments,
absl::string_view separator) {
if (fragments.empty()) {
return 0;
}
auto* original_dst = dst;
auto it = fragments.begin();
memcpy(dst, it->data(), it->size());
dst += it->size();
for (++it; it != fragments.end(); ++it) {
memcpy(dst, separator.data(), separator.size());
dst += separator.size();
memcpy(dst, it->data(), it->size());
dst += it->size();
}
return dst - original_dst;
}
} | #include "quiche/common/http/http_header_storage.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
TEST(JoinTest, JoinEmpty) {
Fragments empty;
absl::string_view separator = ", ";
char buf[10] = "";
size_t written = Join(buf, empty, separator);
EXPECT_EQ(0u, written);
}
TEST(JoinTest, JoinOne) {
Fragments v = {"one"};
absl::string_view separator = ", ";
char buf[15];
size_t written = Join(buf, v, separator);
EXPECT_EQ(3u, written);
EXPECT_EQ("one", absl::string_view(buf, written));
}
TEST(JoinTest, JoinMultiple) {
Fragments v = {"one", "two", "three"};
absl::string_view separator = ", ";
char buf[15];
size_t written = Join(buf, v, separator);
EXPECT_EQ(15u, written);
EXPECT_EQ("one, two, three", absl::string_view(buf, written));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/http/http_header_storage.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/http/http_header_storage_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
be4ba84d-b0c8-4011-bcdf-ed33ef61fd5b | cpp | tensorflow/tensorflow | reduce_window_rewriter | third_party/xla/xla/service/reduce_window_rewriter.cc | third_party/xla/xla/service/reduce_window_rewriter_test.cc | #include "xla/service/reduce_window_rewriter.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
static size_t FlattenShapeIndex(const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return 0;
}
CHECK_EQ(shape_index.size(), 1);
return shape_index.back();
}
static Shape ShapeAtIndex(const Shape& shape, const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return shape;
}
CHECK_EQ(shape_index.size(), 1);
return ShapeUtil::GetTupleElementShape(shape, shape_index.back());
}
static HloInstruction* GetAtIndex(HloInstruction* hlo,
const ShapeIndex& shape_index) {
if (shape_index.empty()) {
return hlo;
}
CHECK_EQ(shape_index.size(), 1);
return hlo->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeAtIndex(hlo->shape(), shape_index), hlo, shape_index.back()));
}
absl::Status ReduceWindowRewriter::ReplaceReduceWindowWithReshape(
HloReduceWindowInstruction* reduce_window) {
VLOG(2) << "Converting R1 reduce window: " << reduce_window->ToString();
std::vector<Shape> r2_output_shapes;
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), shape_index)) {
return;
}
Shape r2_output_shape = subshape;
ShapeUtil::AppendMajorDimension(1, &r2_output_shape);
UpdateLayout(&r2_output_shape);
r2_output_shapes.push_back(r2_output_shape);
VLOG(2) << "ReduceWindowRewriter: Converting R2 result to R1: "
<< ShapeUtil::HumanStringWithLayout(r2_output_shape);
});
Window r2_window = reduce_window->window();
WindowDimension* dim = r2_window.add_dimensions();
dim->set_size(1);
dim->set_stride(1);
dim->set_base_dilation(1);
dim->set_window_dilation(1);
std::vector<HloInstruction*> r2_operands;
for (HloInstruction* operand : reduce_window->inputs()) {
Shape r2_input_shape = operand->shape();
ShapeUtil::AppendMajorDimension(1, &r2_input_shape);
UpdateLayout(&r2_input_shape);
VLOG(2) << "ReduceWindowRewriter: Converting R1 operand to R2: "
<< ShapeUtil::HumanStringWithLayout(r2_input_shape);
HloInstruction* r2_operand = operand->parent()->AddInstruction(
HloInstruction::CreateReshape(r2_input_shape, operand));
VLOG(2) << "R2 new operand: " << r2_operand->ToString();
r2_operands.push_back(r2_operand);
}
HloInstruction* new_reduce_window = reduce_window->parent()->AddInstruction(
HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(r2_output_shapes)
: r2_output_shapes[0],
r2_operands, reduce_window->init_values(), r2_window,
reduce_window->to_apply()));
VLOG(2) << "R2 resulting reduce window: " << new_reduce_window->ToString();
std::vector<HloInstruction*> final_reshapes;
ShapeUtil::ForEachSubshape(
reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(reduce_window->shape(), shape_index)) {
return;
}
HloInstruction* final_reshape =
new_reduce_window->parent()->AddInstruction(
HloInstruction::CreateReshape(
subshape, GetAtIndex(new_reduce_window, shape_index)));
final_reshapes.push_back(final_reshape);
});
HloInstruction* result;
if (reduce_window->shape().IsTuple()) {
result = new_reduce_window->parent()->AddInstruction(
HloInstruction::CreateTuple(final_reshapes));
} else {
CHECK_EQ(final_reshapes.size(), 1);
result = final_reshapes[0];
}
TF_RETURN_IF_ERROR(reduce_window->ReplaceAllUsesWith(result));
TF_RETURN_IF_ERROR(
new_reduce_window->parent()->RemoveInstruction(reduce_window));
return absl::OkStatus();
}
absl::StatusOr<bool> ReduceWindowRewriter::TryOptimizeCumSumOrProd(
HloReduceWindowInstruction* reduce_window) {
const Shape& operand_shape = reduce_window->inputs().front()->shape();
int64_t rank = operand_shape.rank();
const Window& window = reduce_window->window();
int64_t scan_dim_num = -1;
for (int i = 0; i < rank; ++i) {
const WindowDimension& window_dim = window.dimensions(i);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
if (scan_dim_num != -1) {
return false;
}
scan_dim_num = i;
}
if (scan_dim_num == -1) {
return false;
}
const int64_t scan_length = operand_shape.dimensions(scan_dim_num);
absl::Span<HloInstruction* const> init_values = reduce_window->init_values();
const WindowDimension& scan_window_dim = window.dimensions(scan_dim_num);
bool forward_scan = (scan_window_dim.padding_low() == scan_length - 1 ||
scan_window_dim.padding_low() == scan_length) &&
scan_window_dim.padding_high() == 0;
bool reverse_scan = (scan_window_dim.padding_high() == scan_length - 1 ||
scan_window_dim.padding_high() == scan_length) &&
scan_window_dim.padding_low() == 0;
if (scan_window_dim.stride() != 1 || scan_window_dim.size() != scan_length ||
(!forward_scan && !reverse_scan) || scan_window_dim.window_reversal() ||
scan_window_dim.base_dilation() != 1 ||
scan_window_dim.window_dilation() != 1) {
return false;
}
bool is_exclusive = forward_scan
? (scan_window_dim.padding_low() == scan_length)
: (scan_window_dim.padding_high() == scan_length);
if (scan_length <= base_length_) {
return false;
}
if (reduce_window->to_apply()->root_instruction()->shape().IsTuple() &&
reduce_window->to_apply()->root_instruction()->opcode() !=
HloOpcode::kTuple) {
return false;
}
VLOG(2) << "Rewriting Scan: " << reduce_window->ToString();
HloComputation* parent = reduce_window->parent();
std::vector<HloInstruction*> sources(reduce_window->inputs().begin(),
reduce_window->inputs().end());
std::vector<int64_t> permutation(rank);
absl::c_iota(permutation, 0);
permutation[scan_dim_num] = rank - 1;
permutation[rank - 1] = scan_dim_num;
if (scan_dim_num != rank - 1) {
for (size_t i = 0; i < sources.size(); ++i) {
sources[i] = parent->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(permutation, sources[i]->shape()),
sources[i], permutation));
}
}
const int64_t padded_length = RoundUpTo(scan_length, base_length_);
if (scan_length != padded_length) {
for (size_t i = 0; i < sources.size(); ++i) {
auto* source = sources[i];
Shape padded_shape = source->shape();
padded_shape.set_dimensions(rank - 1, padded_length);
UpdateLayout(&padded_shape);
auto padding_config = MakeNoPaddingConfig(rank);
padding_config.mutable_dimensions(rank - 1)->set_edge_padding_high(
padded_length - scan_length);
sources[i] = parent->AddInstruction(HloInstruction::CreatePad(
padded_shape, source, init_values[i], padding_config));
}
}
const int64_t num_columns = padded_length / base_length_;
std::vector<HloInstruction*> tiled_sources;
std::vector<Shape> tiled_shapes;
for (size_t i = 0; i < sources.size(); ++i) {
auto* source = sources[i];
Shape tiled_shape = source->shape();
tiled_shape.set_dimensions(rank - 1, num_columns);
UpdateLayout(&tiled_shape);
ShapeUtil::AppendMajorDimension(base_length_, &tiled_shape);
tiled_shapes.push_back(tiled_shape);
tiled_sources.push_back(parent->AddInstruction(
HloInstruction::CreateReshape(tiled_shape, source)));
}
Window outer_window =
window_util::MakeWindow(std::vector<int64_t>(rank + 1, 1));
outer_window.mutable_dimensions(rank)->set_size(base_length_);
if (forward_scan) {
outer_window.mutable_dimensions(rank)->set_padding_low(base_length_ - 1);
} else {
outer_window.mutable_dimensions(rank)->set_padding_high(base_length_ - 1);
}
auto outer_reduce_window =
parent->AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(tiled_shapes)
: tiled_shapes[0],
tiled_sources, init_values, outer_window, reduce_window->to_apply()));
std::vector<Shape> column_shapes;
std::vector<HloInstruction*> last_cols;
ShapeUtil::ForEachSubshape(
outer_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return;
}
Shape column_shape = subshape;
column_shape.set_dimensions(rank, 1);
UpdateLayout(&column_shape);
std::vector<int64_t> col_slice_starts(rank + 1, 0);
std::vector<int64_t> col_slice_limits(
SpanToVector(subshape.dimensions()));
if (forward_scan) {
col_slice_starts[rank] = base_length_ - 1;
} else {
col_slice_limits[rank] = 1;
}
auto last_col = parent->AddInstruction(HloInstruction::CreateSlice(
column_shape, GetAtIndex(outer_reduce_window, shape_index),
col_slice_starts, col_slice_limits,
std::vector<int64_t>(rank + 1, 1)));
column_shape.DeleteDimension(rank);
last_col = parent->AddInstruction(
HloInstruction::CreateReshape(column_shape, last_col));
last_cols.push_back(last_col);
column_shape.set_dimensions(rank - 1, num_columns + 1);
UpdateLayout(&column_shape);
column_shapes.push_back(column_shape);
});
Window inner_window = window_util::MakeWindow(std::vector<int64_t>(rank, 1));
inner_window.mutable_dimensions(rank - 1)->set_size(num_columns);
if (forward_scan) {
inner_window.mutable_dimensions(rank - 1)->set_padding_low(num_columns);
} else {
inner_window.mutable_dimensions(rank - 1)->set_padding_high(num_columns);
}
auto inner_reduce_window =
parent->AddInstruction(HloInstruction::CreateReduceWindow(
reduce_window->shape().IsTuple()
? ShapeUtil::MakeTupleShape(column_shapes)
: column_shapes[0],
last_cols, init_values, inner_window, reduce_window->to_apply()));
std::vector<int64_t> exclusive_slice_starts(rank, 0);
std::vector<int64_t> exclusive_slice_limits =
SpanToVector(column_shapes[0].dimensions());
if (forward_scan) {
exclusive_slice_limits[rank - 1] = num_columns;
} else {
exclusive_slice_starts[rank - 1] = 1;
exclusive_slice_limits[rank - 1] = num_columns + 1;
}
std::vector<HloInstruction*> inner_scan_components;
ShapeUtil::ForEachSubshape(
inner_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(inner_reduce_window->shape(),
shape_index)) {
return;
}
size_t idx = FlattenShapeIndex(shape_index);
auto last_col = last_cols[idx];
auto* inner_slice = parent->AddInstruction(HloInstruction::CreateSlice(
last_col->shape(), GetAtIndex(inner_reduce_window, shape_index),
exclusive_slice_starts, exclusive_slice_limits,
std::vector<int64_t>(rank, 1)));
std::vector<int64_t> rank_iota(rank);
absl::c_iota(rank_iota, 0);
auto* inner_scan_component =
parent->AddInstruction(HloInstruction::CreateBroadcast(
tiled_shapes[idx], inner_slice, rank_iota));
inner_scan_components.push_back(inner_scan_component);
});
std::vector<HloInstruction*> map_operands;
ShapeUtil::ForEachSubshape(
outer_reduce_window->shape(),
[&](const Shape& subshape, const ShapeIndex& shape_index) {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return;
}
map_operands.push_back(GetAtIndex(outer_reduce_window, shape_index));
});
map_operands.insert(map_operands.end(), inner_scan_components.begin(),
inner_scan_components.end());
std::vector<HloInstruction*> scans;
auto status = ShapeUtil::ForEachSubshapeWithStatus(
outer_reduce_window->shape(),
[&](const Shape& subshape,
const ShapeIndex& shape_index) -> absl::Status {
if (!ShapeUtil::IsLeafIndex(outer_reduce_window->shape(),
shape_index)) {
return absl::OkStatus();
}
size_t idx = FlattenShapeIndex(shape_index);
auto source = sources[idx];
HloComputation* map_computation;
auto reduce_function_root =
reduce_window->to_apply()->root_instruction();
if (reduce_function_root->shape().IsTuple()) {
TF_RET_CHECK(reduce_function_root->opcode() == HloOpcode::kTuple);
auto* map_computation_root = reduce_function_root->operand(idx);
absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>
replacements;
replacements[reduce_function_root] = nullptr;
map_computation = parent->parent()->AddEmbeddedComputation(
reduce_window->to_apply()->CloneWithReplacements(
&replacements,
{}, nullptr, "clone",
map_computation_root));
} else {
map_computation = reduce_window->to_apply();
}
auto scan = parent->AddInstruction(HloInstruction::CreateMap(
ShapeAtIndex(outer_reduce_window->shape(), shape_index),
map_operands, map_computation));
scan = parent->AddInstruction(
HloInstruction::CreateReshape(source->shape(), scan));
if (scan_dim_num != rank - 1) {
scan = parent->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::PermuteDimensions(permutation, source->shape()), scan,
permutation));
}
if (padded_length != scan_length) {
scan = parent->AddInstruction(HloInstruction::CreateSlice(
operand_shape, scan, std::vector<int64_t>(rank, 0),
operand_shape.dimensions(), std::vector<int64_t>(rank, 1)));
}
if (is_exclusive) {
auto padding_config = MakeNoPaddingConfig(rank);
if (forward_scan) {
padding_config.mutable_dimensions(scan_dim_num)
->set_edge_padding_low(1);
} else {
padding_config.mutable_dimensions(scan_dim_num)
->set_edge_padding_high(1);
}
scan = parent->AddInstruction(HloInstruction::CreatePad(
ShapeAtIndex(reduce_window->shape(), shape_index), scan,
init_values[idx], padding_config));
}
scans.push_back(scan);
return absl::OkStatus();
});
TF_RETURN_IF_ERROR(status);
HloInstruction* scan;
if (reduce_window->shape().IsTuple()) {
scan = parent->AddInstruction(HloInstruction::CreateTuple(scans));
} else {
CHECK_EQ(scans.size(), 1);
scan = scans[0];
}
TF_RETURN_IF_ERROR(reduce_window->ReplaceAllUsesWith(scan));
TF_RETURN_IF_ERROR(parent->RemoveInstruction(reduce_window));
return true;
}
absl::StatusOr<bool> ReduceWindowRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (const auto& computation : module->computations(execution_threads)) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
HloReduceWindowInstruction* reduce_window =
DynCast<HloReduceWindowInstruction>(instruction);
if (!reduce_window) {
continue;
}
TF_ASSIGN_OR_RETURN(bool made_change,
TryOptimizeCumSumOrProd(reduce_window));
if (made_change) {
changed = true;
continue;
}
if (reduce_window->inputs().front()->shape().rank() != 1) {
continue;
}
TF_RETURN_IF_ERROR(ReplaceReduceWindowWithReshape(reduce_window));
changed = true;
}
}
return changed;
}
} | #include "xla/service/reduce_window_rewriter.h"
#include <optional>
#include <string>
#include "absl/strings/string_view.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
class ReduceWindowRewriterTest : public HloTestBase {
public:
void CheckReduceWindowRewrite(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, ReduceWindowRewriter{128}, expected);
}
};
TEST_F(ReduceWindowRewriterTest, EliminateR1) {
const char* hlo = R"(
%binary_add {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] %a, f32[] %b)
}
ENTRY %EliminateR1 (input: f32[10]) -> f32[10] {
%input = f32[10]{0} parameter(0)
%constant = f32[] constant(0)
ROOT %reduce-window = f32[10]{0} reduce-window(f32[10]{0} %input, f32[] %constant), window={size=5 pad=2_2}, to_apply=%binary_add
}
)";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, EliminateR1Variadic) {
const char* hlo = R"(
HloModule reduce-window
add_float {
lhs.0 = f32[] parameter(0)
lhs.1 = f32[] parameter(1)
rhs.0 = f32[] parameter(2)
rhs.1 = f32[] parameter(3)
sum.0 = f32[] add(lhs.0, rhs.0)
sum.1 = f32[] add(lhs.1, rhs.1)
ROOT root = (f32[], f32[]) tuple(sum.0, sum.1)
}
ENTRY entry (arg: f32[10]) -> (f32[10], f32[10]) {
arg = f32[10]{0} parameter(0)
constant = f32[] constant(0)
ROOT reduce-window = (f32[10]{0}, f32[10]{0}) reduce-window(f32[10]{0} %arg, f32[10]{0} %arg, f32[] %constant, f32[] %constant), window={size=5 pad=2_2}, to_apply=%add_float
})";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, OptimizeR1InclusiveScan) {
const char* hlo = R"(
HloModule reduce-window
add_float {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY entry (arg: f32[46592]) -> f32[46592] {
arg = f32[46592]{0} parameter(0)
constant = f32[] constant(0)
ROOT reduce-window = f32[46592]{0} reduce-window(f32[46592]{0} %arg, f32[] %constant), window={size=46592 pad=46591_0}, to_apply=%add_float
})";
CheckReduceWindowRewrite(hlo, R"(
)");
}
TEST_F(ReduceWindowRewriterTest, OptimizeR1InclusiveScanVariadic) {
const std::string hlo_string = R"(
HloModule reduce-window
MaxMin {
l.max = f32[] parameter(0)
l.min = f32[] parameter(1)
r.max = f32[] parameter(2)
r.min = f32[] parameter(3)
max = f32[] maximum(l.max, r.max)
min = f32[] minimum(l.min, r.min)
ROOT root = (f32[], f32[]) tuple(max, min)
}
ENTRY entry (arg_0: f32[46592], arg_1: f32[46592]) -> (f32[46592], f32[46592]) {
arg.0 = f32[46592]{0} parameter(0)
arg.1 = f32[46592]{0} parameter(1)
init_ninf = f32[] constant(-inf)
init_inf = f32[] constant(inf)
ROOT reduce-window = (f32[46592]{0}, f32[46592]{0}) reduce-window(f32[46592]{0} %arg.0, f32[46592]{0} %arg.1, f32[] %init_ninf, f32[] %init_inf), window={size=46592 pad=46591_0}, to_apply=%MaxMin
}
)";
CheckReduceWindowRewrite(hlo_string, R"(
)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_window_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/reduce_window_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a9341881-a6be-430e-8b9b-db2749301f2e | cpp | tensorflow/tensorflow | profile_buffer | tensorflow/lite/profiling/profile_buffer.cc | tensorflow/lite/profiling/profile_buffer_test.cc | #include "tensorflow/lite/profiling/profile_buffer.h"
#include <utility>
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/profiling/memory_info.h"
#include "tensorflow/lite/profiling/time.h"
namespace tflite {
namespace profiling {
uint32_t ProfileBuffer::BeginEvent(const char* tag,
ProfileEvent::EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) {
if (!enabled_) {
return kInvalidEventHandle;
}
uint64_t timestamp = time::NowMicros();
const auto next_index = GetNextEntryIndex();
if (next_index.second) {
return next_index.first;
}
const int index = next_index.first;
event_buffer_[index].tag = tag;
event_buffer_[index].event_type = event_type;
event_buffer_[index].event_metadata = event_metadata1;
event_buffer_[index].extra_event_metadata = event_metadata2;
event_buffer_[index].begin_timestamp_us = timestamp;
event_buffer_[index].elapsed_time = 0;
if (event_type != Profiler::EventType::OPERATOR_INVOKE_EVENT) {
event_buffer_[index].begin_mem_usage = memory::GetMemoryUsage();
}
current_index_++;
return index;
}
void ProfileBuffer::EndEvent(uint32_t event_handle,
const int64_t* event_metadata1,
const int64_t* event_metadata2) {
if (!enabled_ || event_handle == kInvalidEventHandle ||
event_handle > current_index_) {
return;
}
const uint32_t max_size = event_buffer_.size();
if (current_index_ > (max_size + event_handle)) {
return;
}
int event_index = event_handle % max_size;
event_buffer_[event_index].elapsed_time =
time::NowMicros() - event_buffer_[event_index].begin_timestamp_us;
if (event_buffer_[event_index].event_type !=
Profiler::EventType::OPERATOR_INVOKE_EVENT) {
event_buffer_[event_index].end_mem_usage = memory::GetMemoryUsage();
}
if (event_metadata1) {
event_buffer_[event_index].event_metadata = *event_metadata1;
}
if (event_metadata2) {
event_buffer_[event_index].extra_event_metadata = *event_metadata2;
}
}
const struct ProfileEvent* ProfileBuffer::At(size_t index) const {
size_t size = Size();
if (index >= size) {
return nullptr;
}
const uint32_t max_size = event_buffer_.size();
uint32_t start =
(current_index_ > max_size) ? current_index_ % max_size : max_size;
index = (index + start) % max_size;
return &event_buffer_[index];
}
void ProfileBuffer::AddEvent(const char* tag,
ProfileEvent::EventType event_type,
uint64_t elapsed_time, int64_t event_metadata1,
int64_t event_metadata2) {
if (!enabled_) {
return;
}
const auto next_index = GetNextEntryIndex();
if (next_index.second) {
return;
}
const int index = next_index.first;
event_buffer_[index].tag = tag;
event_buffer_[index].event_type = event_type;
event_buffer_[index].event_metadata = event_metadata1;
event_buffer_[index].extra_event_metadata = event_metadata2;
event_buffer_[index].begin_timestamp_us = 0;
event_buffer_[index].elapsed_time = elapsed_time;
current_index_++;
}
std::pair<int, bool> ProfileBuffer::GetNextEntryIndex() {
int index = current_index_ % event_buffer_.size();
if (current_index_ == 0 || index != 0) {
return std::make_pair(index, false);
}
if (!allow_dynamic_expansion_) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Warning: Dropping ProfileBuffer event.");
return std::make_pair(current_index_, true);
} else {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"Warning: Doubling internal profiling buffer.");
event_buffer_.resize(current_index_ * 2);
return std::make_pair(current_index_, false);
}
}
}
} | #include "tensorflow/lite/profiling/profile_buffer.h"
#include <algorithm>
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
namespace tflite {
namespace profiling {
namespace {
std::vector<const ProfileEvent*> GetProfileEvents(const ProfileBuffer& buffer) {
std::vector<const ProfileEvent*> events;
for (size_t i = 0; i < buffer.Size(); i++) {
events.push_back(buffer.At(i));
}
return events;
}
TEST(ProfileBufferTest, Empty) {
ProfileBuffer buffer( 0, true);
EXPECT_EQ(0, buffer.Size());
}
TEST(ProfileBufferTest, AddEvent) {
ProfileBuffer buffer( 10, true);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
auto event = GetProfileEvents(buffer)[0];
EXPECT_EQ(event->tag, "hello");
EXPECT_GT(event->begin_timestamp_us, 0);
EXPECT_EQ(event->event_type, ProfileEvent::EventType::DEFAULT);
EXPECT_EQ(event->event_metadata, 42);
buffer.EndEvent(event_handle);
EXPECT_EQ(1, buffer.Size());
EXPECT_GE(event->elapsed_time, 0);
}
TEST(ProfileBufferTest, EndEventWithMetadata) {
ProfileBuffer buffer( 10, true);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
const int64_t kEventMetadata1 = 18;
const int64_t kEventMetadata2 = 36;
buffer.EndEvent(event_handle, &kEventMetadata1, &kEventMetadata2);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
auto event = GetProfileEvents(buffer)[0];
EXPECT_EQ(event->tag, "hello");
EXPECT_GT(event->begin_timestamp_us, 0);
EXPECT_EQ(event->event_type, ProfileEvent::EventType::DEFAULT);
EXPECT_EQ(event->event_metadata, kEventMetadata1);
EXPECT_EQ(event->extra_event_metadata, kEventMetadata2);
EXPECT_EQ(1, buffer.Size());
EXPECT_GE(event->elapsed_time, 0);
}
TEST(ProfileBufferTest, OverFlow) {
const int max_size = 4;
ProfileBuffer buffer{max_size, true};
std::vector<std::string> eventNames = {"first", "second", "third", "fourth"};
for (int i = 0; i < 2 * max_size; i++) {
buffer.BeginEvent(eventNames[i % 4].c_str(),
ProfileEvent::EventType::DEFAULT, i, 0);
size_t expected_size = std::min(i + 1, max_size);
EXPECT_EQ(expected_size, buffer.Size());
}
EXPECT_EQ(max_size, buffer.Size());
for (size_t j = 0; j < buffer.Size(); ++j) {
auto event = buffer.At(j);
EXPECT_EQ(eventNames[j % 4], event->tag);
EXPECT_EQ(ProfileEvent::EventType::DEFAULT, event->event_type);
EXPECT_EQ(j, event->event_metadata);
}
}
TEST(ProfileBufferTest, DynamicIncrease) {
const int max_initial_size = 4;
ProfileBuffer buffer{max_initial_size, true,
true };
std::vector<std::string> eventNames = {"first", "second", "third", "fourth"};
for (int i = 0; i < 2 * max_initial_size; i++) {
buffer.BeginEvent(eventNames[i % 4].c_str(),
ProfileEvent::EventType::DEFAULT, i, 0);
const size_t expected_size = i + 1;
EXPECT_EQ(expected_size, buffer.Size());
}
EXPECT_EQ(2 * max_initial_size, buffer.Size());
for (size_t j = 0; j < buffer.Size(); ++j) {
auto event = buffer.At(j);
EXPECT_EQ(eventNames[j % 4], event->tag);
EXPECT_EQ(ProfileEvent::EventType::DEFAULT, event->event_type);
EXPECT_EQ(j, event->event_metadata);
}
}
TEST(ProfileBufferTest, Enable) {
ProfileBuffer buffer( 10, false);
EXPECT_EQ(0, buffer.Size());
auto event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_EQ(kInvalidEventHandle, event_handle);
EXPECT_EQ(0, buffer.Size());
buffer.SetEnabled(true);
event_handle =
buffer.BeginEvent("hello", ProfileEvent::EventType::DEFAULT,
42, 0);
EXPECT_GE(event_handle, 0);
EXPECT_EQ(1, buffer.Size());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_buffer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/profiling/profile_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b8ea089a-3733-4567-b660-481e6bcb3155 | cpp | google/tensorstore | intrusive_red_black_tree | tensorstore/internal/container/intrusive_red_black_tree.cc | tensorstore/internal/container/intrusive_red_black_tree_test.cc | #include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include <stddef.h>
#include <array>
#include <cassert>
#include <utility>
namespace tensorstore {
namespace internal {
namespace intrusive_red_black_tree {
namespace ops {
inline void SetParent(NodeData* node, NodeData* parent) {
node->rbtree_parent_ = {parent, node->rbtree_parent_.tag()};
}
inline void SetColor(NodeData* node, Color color) {
node->rbtree_parent_.set_tag(color);
}
inline Direction ChildDir(NodeData* node) {
return static_cast<Direction>(node != ops::Child(ops::Parent(node), kLeft));
}
inline NodeData* Grandparent(NodeData* node) {
return ops::Parent(ops::Parent(node));
}
void Rotate(NodeData*& root, NodeData* x, Direction dir) {
auto* y = ops::Child(x, !dir);
ops::Child(x, !dir) = ops::Child(y, dir);
if (ops::Child(y, dir)) {
ops::SetParent(ops::Child(y, dir), x);
}
ops::SetParent(y, ops::Parent(x));
if (!ops::Parent(x)) {
root = y;
} else {
ops::Child(ops::Parent(x), ops::ChildDir(x)) = y;
}
ops::Child(y, dir) = x;
ops::SetParent(x, y);
}
bool InsertFixup(NodeData*& root, NodeData* z) {
assert(ops::IsRed(z));
while (ops::IsRed(ops::Parent(z))) {
Direction dir = ops::ChildDir(ops::Parent(z));
if (NodeData* y = ops::Child(ops::Grandparent(z), !dir); ops::IsRed(y)) {
ops::SetColor(ops::Parent(z), kBlack);
ops::SetColor(y, kBlack);
ops::SetColor(ops::Grandparent(z), kRed);
z = ops::Grandparent(z);
} else {
if (ops::ChildDir(z) == !dir) {
z = ops::Parent(z);
ops::Rotate(root, z, dir);
}
ops::SetColor(ops::Parent(z), kBlack);
ops::SetColor(ops::Grandparent(z), kRed);
ops::Rotate(root, ops::Grandparent(z), !dir);
assert(!ops::IsRed(ops::Parent(z)));
break;
}
}
const Color existing_color = ops::GetColor(root);
ops::SetColor(root, kBlack);
return existing_color == kRed;
}
struct TreeWithBlackHeight {
NodeData* root = nullptr;
size_t black_height = 0;
};
size_t BlackHeight(NodeData* node) {
size_t black_height = 0;
while (node) {
if (ops::GetColor(node) == kBlack) ++black_height;
node = ops::Child(node, kLeft);
}
return black_height;
}
TreeWithBlackHeight Join(TreeWithBlackHeight a_tree, NodeData* center,
TreeWithBlackHeight b_tree, Direction a_dir) {
assert(a_tree.black_height == ops::BlackHeight(a_tree.root));
assert(b_tree.black_height == ops::BlackHeight(b_tree.root));
if (a_tree.black_height < b_tree.black_height) {
a_dir = !a_dir;
std::swap(a_tree, b_tree);
}
size_t difference = a_tree.black_height - b_tree.black_height;
NodeData* a_graft = a_tree.root;
NodeData* a_graft_parent = nullptr;
while (true) {
if (!ops::IsRed(a_graft)) {
if (difference == 0) break;
--difference;
}
a_graft_parent = a_graft;
a_graft = ops::Child(a_graft, !a_dir);
}
assert(!ops::IsRed(a_graft));
ops::SetColor(center, kRed);
ops::SetParent(center, a_graft_parent);
if (a_graft_parent) {
ops::Child(a_graft_parent, !a_dir) = center;
} else {
a_tree.root = center;
}
ops::Child(center, a_dir) = a_graft;
if (a_graft) {
ops::SetParent(a_graft, center);
}
ops::Child(center, !a_dir) = b_tree.root;
if (b_tree.root) {
ops::SetParent(b_tree.root, center);
}
a_tree.black_height += ops::InsertFixup(a_tree.root, center);
return a_tree;
}
TreeWithBlackHeight ExtractSubtreeWithBlackHeight(NodeData* child,
size_t black_height) {
TreeWithBlackHeight tree{child, black_height};
if (child) {
ops::SetParent(child, nullptr);
if (ops::GetColor(child) == kRed) {
++tree.black_height;
ops::SetColor(child, kBlack);
}
}
return tree;
}
NodeData* ExtremeNode(NodeData* x, Direction dir) {
assert(x);
while (auto* child = ops::Child(x, dir)) x = child;
return x;
}
NodeData* TreeExtremeNode(NodeData* root, Direction dir) {
if (!root) return nullptr;
return ops::ExtremeNode(root, dir);
}
NodeData* Traverse(NodeData* x, Direction dir) {
if (auto* child = ops::Child(x, dir)) {
return ops::ExtremeNode(child, !dir);
}
auto* y = ops::Parent(x);
while (y && x == ops::Child(y, dir)) {
x = y;
y = ops::Parent(y);
}
return y;
}
void Insert(NodeData*& root, NodeData* parent, Direction direction,
NodeData* new_node) {
if (!parent) {
assert(!root);
root = new_node;
} else {
if (ops::Child(parent, direction)) {
parent = ops::Traverse(parent, direction);
direction = !direction;
}
ops::Child(parent, direction) = new_node;
}
ops::SetParent(new_node, parent);
ops::Child(new_node, kLeft) = nullptr;
ops::Child(new_node, kRight) = nullptr;
ops::SetColor(new_node, kRed);
ops::InsertFixup(root, new_node);
}
NodeData* Join(NodeData* a_tree, NodeData* center, NodeData* b_tree,
Direction a_dir) {
return ops::Join({a_tree, ops::BlackHeight(a_tree)}, center,
{b_tree, ops::BlackHeight(b_tree)}, a_dir)
.root;
}
NodeData* Join(NodeData* a_tree, NodeData* b_tree, Direction a_dir) {
if (!a_tree) return b_tree;
if (!b_tree) return a_tree;
auto* center = ops::ExtremeNode(a_tree, !a_dir);
ops::Remove(a_tree, center);
return ops::Join(a_tree, center, b_tree, a_dir);
}
std::array<NodeData*, 2> Split(NodeData* root, NodeData* center) {
std::array<TreeWithBlackHeight, 2> split_trees;
size_t center_black_height = ops::BlackHeight(center);
size_t child_black_height =
center_black_height - (ops::GetColor(center) == kBlack);
for (int dir = 0; dir < 2; ++dir) {
split_trees[dir] = ops::ExtractSubtreeWithBlackHeight(
ops::Child(center, static_cast<Direction>(dir)), child_black_height);
}
NodeData* parent = ops::Parent(center);
while (parent) {
Direction dir =
static_cast<Direction>(ops::Child(parent, kRight) == center);
NodeData* grandparent = ops::Parent(parent);
auto parent_color = ops::GetColor(parent);
split_trees[!dir] =
ops::Join(split_trees[!dir], parent,
ops::ExtractSubtreeWithBlackHeight(ops::Child(parent, !dir),
center_black_height),
dir);
center = parent;
parent = grandparent;
center_black_height += (parent_color == kBlack);
}
assert(center == root);
return {{split_trees[0].root, split_trees[1].root}};
}
std::array<NodeData*, 2> Split(NodeData* root, NodeData*& center, Direction dir,
bool found) {
if (!center) return {{nullptr, nullptr}};
auto split_trees = ops::Split(root, center);
if (!found) {
ops::InsertExtreme(split_trees[!dir], dir, center);
center = nullptr;
}
return split_trees;
}
void InsertExtreme(NodeData*& root, Direction dir, NodeData* new_node) {
ops::Insert(root, ops::TreeExtremeNode(root, dir), dir, new_node);
}
void Remove(NodeData*& root, NodeData* z) {
NodeData* y;
if (!ops::Child(z, kLeft) || !ops::Child(z, kRight)) {
y = z;
} else {
y = ops::Traverse(z, kRight);
}
NodeData* x =
ops::Child(y, static_cast<Direction>(ops::Child(y, kLeft) == nullptr));
NodeData* px = ops::Parent(y);
if (x) {
ops::SetParent(x, px);
}
if (!px) {
root = x;
} else {
ops::Child(px, ops::ChildDir(y)) = x;
}
const Color color_removed = ops::GetColor(y);
if (y != z) {
if (px == z) px = y;
Replace(root, z, y);
} else {
z->rbtree_parent_ = ops::DisconnectedParentValue();
}
if (color_removed == kRed) {
return;
}
while (px && !ops::IsRed(x)) {
const Direction dir = static_cast<Direction>(x == ops::Child(px, kRight));
NodeData* w = ops::Child(px, !dir);
assert(w != nullptr);
if (ops::GetColor(w) == kRed) {
ops::SetColor(w, kBlack);
ops::SetColor(px, kRed);
ops::Rotate(root, px, dir);
w = ops::Child(px, !dir);
}
assert(ops::GetColor(w) == kBlack);
if (!ops::IsRed(ops::Child(w, kLeft)) &&
!ops::IsRed(ops::Child(w, kRight))) {
ops::SetColor(w, kRed);
x = px;
px = ops::Parent(x);
} else {
if (!ops::IsRed(ops::Child(w, !dir))) {
ops::SetColor(ops::Child(w, dir), kBlack);
ops::SetColor(w, kRed);
ops::Rotate(root, w, !dir);
w = ops::Child(px, !dir);
}
ops::SetColor(w, ops::GetColor(px));
ops::SetColor(px, kBlack);
ops::SetColor(ops::Child(w, !dir), kBlack);
ops::Rotate(root, px, dir);
x = root;
px = nullptr;
}
}
if (x) ops::SetColor(x, kBlack);
}
void Replace(NodeData*& root, NodeData* existing, NodeData* replacement) {
*replacement = *existing;
for (int dir = 0; dir < 2; ++dir) {
if (ops::Child(replacement, static_cast<Direction>(dir))) {
ops::SetParent(ops::Child(replacement, static_cast<Direction>(dir)),
replacement);
}
}
if (!ops::Parent(existing)) {
root = replacement;
} else {
ops::Child(ops::Parent(existing), ops::ChildDir(existing)) = replacement;
}
existing->rbtree_parent_ = ops::DisconnectedParentValue();
}
}
}
}
} | #include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/random/random.h"
#include "absl/types/compare.h"
#include "tensorstore/internal/compare.h"
namespace {
namespace rbtree = tensorstore::internal::intrusive_red_black_tree;
namespace ops = tensorstore::internal::intrusive_red_black_tree::ops;
int CheckInvariants(ops::NodeData* x) {
if (!x) return 1;
ops::NodeData* c1 = ops::Child(x, rbtree::kLeft);
ops::NodeData* c2 = ops::Child(x, rbtree::kRight);
if (c1) {
EXPECT_EQ(x, ops::Parent(c1));
}
if (c2) {
EXPECT_EQ(x, ops::Parent(c2));
}
if (ops::GetColor(x) == rbtree::kRed) {
EXPECT_FALSE(ops::IsRed(c1));
EXPECT_FALSE(ops::IsRed(c2));
}
int lh = CheckInvariants(c1);
int rh = CheckInvariants(c2);
EXPECT_EQ(lh, rh);
if (ops::GetColor(x) == rbtree::kRed) {
return lh;
} else {
return lh + 1;
}
}
template <typename Node, typename Tag, typename Compare>
void CheckInvariants(rbtree::Tree<Node, Tag>& x, Compare compare) {
auto* root = static_cast<rbtree::NodeBase<Tag>*>(x.root());
if (!root) return;
EXPECT_EQ(rbtree::kBlack, ops::GetColor(root));
CheckInvariants(root);
EXPECT_TRUE(std::is_sorted(
x.begin(), x.end(), [&](Node& a, Node& b) { return compare(a, b) < 0; }));
}
struct Set {
struct Node : public rbtree::NodeBase<> {
int value;
};
static void FormatNode(std::string& out, const std::string& prefix,
Node* node, bool dir) {
out += prefix;
out += (dir == rbtree::kLeft) ? "|- " : " - ";
if (!node) {
out += "null";
} else {
out += std::to_string(node->value);
out += ops::GetColor(node) == rbtree::kBlack ? "(blk)" : "(red)";
}
out += '\n';
if (!node) return;
std::string child_prefix =
prefix + ((dir == rbtree::kLeft) ? "| " : " ");
for (int dir = 0; dir < 2; ++dir) {
FormatNode(out, child_prefix,
static_cast<Node*>(
ops::Child(node, static_cast<rbtree::Direction>(dir))),
static_cast<rbtree::Direction>(dir));
}
}
static std::string FormatTree(rbtree::Tree<Node>& tree) {
std::string out;
FormatNode(out, "", tree.root(), rbtree::kRight);
return out;
}
static auto CompareToKey(int key) {
return [key](Node& node) -> absl::weak_ordering {
return tensorstore::internal::DoThreeWayComparison(std::less<int>{}, key,
node.value);
};
}
static auto CompareNodes() {
return [](Node& a, Node& b) -> absl::weak_ordering {
return tensorstore::internal::CompareResultAsWeakOrdering(a.value -
b.value);
};
}
static std::vector<int> Elements(rbtree::Tree<Node>& tree) {
std::vector<int> elements;
for (auto& node : tree) {
elements.push_back(node.value);
}
return elements;
}
using Tree = rbtree::Tree<Node>;
Tree tree;
std::set<int> golden_set;
void CheckTreeInvariants() {
SCOPED_TRACE("\n" + FormatTree(tree));
CheckInvariants(tree, CompareNodes());
}
bool Contains(int key) {
bool result = tree.Find(CompareToKey(key)).found;
EXPECT_EQ(result, golden_set.count(key) == 1);
return result;
}
Node* FindNode(int key) {
auto* node = tree.Find(CompareToKey(key)).found_node();
assert(node);
return node;
}
bool Insert(int key) {
auto [node, inserted] = tree.FindOrInsert(CompareToKey(key), [&] {
auto* n = new Node;
n->value = key;
return n;
});
EXPECT_EQ(key, node->value);
CheckTreeInvariants();
EXPECT_EQ(inserted, golden_set.insert(key).second);
return inserted;
}
bool Erase(int key) {
auto node = tree.Find(CompareToKey(key)).found_node();
bool result;
if (!node) {
result = false;
} else {
tree.Remove(*node);
delete node;
CheckTreeInvariants();
result = true;
}
EXPECT_EQ(static_cast<int>(result), golden_set.erase(key));
return result;
}
void CheckElements() {
EXPECT_THAT(Elements(), ::testing::ElementsAreArray(golden_set.begin(),
golden_set.end()));
}
void CheckSplitJoin(int key) {
auto orig_elements = Elements();
auto split_result = tree.FindSplit([&](Node& node) -> absl::weak_ordering {
return tensorstore::internal::DoThreeWayComparison(std::less<>{}, key,
node.value);
});
SCOPED_TRACE("Key=" + std::to_string(key) +
"\nLeft tree:\n" + FormatTree(split_result.trees[0]) +
"\nRight tree:\n" + FormatTree(split_result.trees[1]));
for (int i = 0; i < 2; ++i) {
CheckInvariants(split_result.trees[i], CompareNodes());
}
std::vector<int> elements_a = Elements(split_result.trees[0]);
std::vector<int> elements_b = Elements(split_result.trees[1]);
std::vector<int> combined_elements = elements_a;
if (split_result.center) {
EXPECT_EQ(key, split_result.center->value);
combined_elements.push_back(split_result.center->value);
}
combined_elements.insert(combined_elements.end(), elements_b.begin(),
elements_b.end());
EXPECT_THAT(combined_elements, ::testing::ElementsAreArray(orig_elements));
if (split_result.center) {
tree = Tree::Join(split_result.trees[0], *split_result.center,
split_result.trees[1]);
} else {
tree = Tree::Join(split_result.trees[0], split_result.trees[1]);
}
CheckTreeInvariants();
CheckElements();
}
void CheckSplitJoin() {
auto orig_elements = Elements();
if (orig_elements.empty()) {
CheckSplitJoin(0);
} else {
int min = orig_elements.front() - 1;
int max = orig_elements.back() + 1;
for (int x = min; x <= max; ++x) {
SCOPED_TRACE(x);
CheckSplitJoin(x);
}
}
}
std::vector<int> Elements() { return Elements(tree); }
~Set() {
for (auto it = tree.begin(); it != tree.end();) {
auto next = std::next(it);
tree.Remove(*it);
delete &*it;
it = next;
}
}
};
TEST(SetTest, SimpleInsert1) {
Set rbtree_set;
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(1);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(2);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(3);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
}
TEST(SetTest, SimpleInsert2) {
Set rbtree_set;
Set::Tree::Range empty_range = rbtree_set.tree;
EXPECT_TRUE(empty_range.empty());
EXPECT_EQ(empty_range, empty_range);
rbtree_set.Insert(5);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(8);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(1);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(3);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(9);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(7);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
rbtree_set.Insert(0);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
Set::Tree::Range full_range = rbtree_set.tree;
EXPECT_FALSE(full_range.empty());
EXPECT_EQ(full_range, full_range);
EXPECT_NE(full_range, empty_range);
EXPECT_EQ(full_range.begin(), rbtree_set.tree.begin());
EXPECT_EQ(full_range.end(), rbtree_set.tree.end());
Set::Tree::Range partial_range(rbtree_set.FindNode(1),
rbtree_set.FindNode(5));
EXPECT_NE(partial_range, full_range);
EXPECT_NE(partial_range, empty_range);
std::set<int> partial_elements;
for (auto& node : partial_range) {
partial_elements.insert(node.value);
}
EXPECT_THAT(partial_elements, ::testing::ElementsAre(1, 3));
}
TEST(SetTest, RandomInsert) {
Set rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
for (int i = 0; i < 20; ++i) {
const int key = absl::Uniform(gen, 0, kMaxKey);
rbtree_set.Contains(key);
rbtree_set.Insert(key);
rbtree_set.CheckElements();
rbtree_set.CheckSplitJoin();
}
}
TEST(SetTest, RandomInsertRemove) {
Set rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
for (int i = 0; i < 50; ++i) {
const int key = absl::Uniform(gen, 0, kMaxKey);
if (absl::Bernoulli(gen, 0.5)) {
rbtree_set.Insert(key);
} else {
rbtree_set.Erase(key);
}
}
}
struct MultiSet {
using Pair = std::pair<int, int>;
struct Node : public rbtree::NodeBase<> {
Pair value;
};
struct Compare {
bool operator()(const Pair& a, const Pair& b) const {
return a.first < b.first;
}
};
using Tree = rbtree::Tree<Node>;
Tree tree;
std::multiset<Pair, Compare> golden_set;
constexpr static auto ThreeWayCompare = [](Node& a, Node& b) {
return tensorstore::internal::CompareResultAsWeakOrdering(a.value.first -
b.value.first);
};
void CheckTreeInvariants() { CheckInvariants(tree, ThreeWayCompare); }
void Insert(Pair value) {
tree.FindOrInsert(
[&](Node& node) {
return value.first < node.value.first ? absl::weak_ordering::less
: absl::weak_ordering::greater;
},
[&] {
auto* n = new Node;
n->value = value;
return n;
});
CheckTreeInvariants();
golden_set.insert(value);
}
void CheckElements() {
EXPECT_THAT(Elements(), ::testing::ElementsAreArray(golden_set.begin(),
golden_set.end()));
}
std::vector<Pair> Elements() {
std::vector<Pair> elements;
for (auto& node : tree) {
elements.push_back(node.value);
}
return elements;
}
~MultiSet() {
for (auto it = tree.begin(); it != tree.end();) {
auto next = std::next(it);
tree.Remove(*it);
delete &*it;
it = next;
}
}
};
TEST(MultiSetTest, SimpleInsert1) {
MultiSet rbtree_set;
rbtree_set.Insert({1, 2});
rbtree_set.CheckElements();
rbtree_set.Insert({2, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({1, 1});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 1});
rbtree_set.CheckElements();
EXPECT_THAT(
rbtree_set.Elements(),
::testing::ElementsAre(::testing::Pair(1, 2), ::testing::Pair(1, 1),
::testing::Pair(2, 0), ::testing::Pair(3, 0),
::testing::Pair(3, 1)));
}
TEST(MultiSetTest, SimpleInsert2) {
MultiSet rbtree_set;
rbtree_set.Insert({5, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({8, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({1, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({3, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({9, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({7, 0});
rbtree_set.CheckElements();
rbtree_set.Insert({0, 0});
rbtree_set.CheckElements();
}
TEST(MultiSetTest, RandomInsert) {
MultiSet rbtree_set;
absl::BitGen gen;
constexpr int kMaxKey = 10;
constexpr int kMaxValue = 100;
for (int i = 0; i < 20; ++i) {
rbtree_set.Insert(
{absl::Uniform(gen, 0, kMaxKey), absl::Uniform(gen, 0, kMaxValue)});
rbtree_set.CheckElements();
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/intrusive_red_black_tree.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/container/intrusive_red_black_tree_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
22f6ce03-d609-4d36-bd27-5d70724af7e0 | cpp | tensorflow/tensorflow | cpu_gpu_shape_verifier | third_party/xla/xla/service/cpu_gpu_shape_verifier.cc | third_party/xla/xla/service/cpu_gpu_shape_verifier_test.cc | #include "xla/service/cpu_gpu_shape_verifier.h"
#include <array>
#include <string_view>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/primitive_util.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool IsAllowedS4U4CustomCall(const HloInstruction* instruction) {
static constexpr std::array<std::string_view, 1> kMetadataCustomCalls = {
"Sharding",
};
return absl::c_any_of(kMetadataCustomCalls, [&](std::string_view target) {
return target == instruction->custom_call_target();
});
}
absl::Status VerifyS4U4Usage(HloInstruction* instruction) {
auto verify_subshape = [](const HloInstruction* instruction) {
return ShapeUtil::ForEachSubshapeWithStatus(
instruction->shape(), [&](const Shape& shape, const ShapeIndex&) {
if (primitive_util::IsSubByteNonPredType(shape.element_type())) {
return absl::InvalidArgumentError(absl::StrFormat(
"%s is currently only supported in allow-listed instructions, "
"but got instruction: %s",
primitive_util::LowercasePrimitiveTypeName(
shape.element_type()),
instruction->ToString()));
}
return absl::OkStatus();
});
};
switch (instruction->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kCall:
case HloOpcode::kConstant:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kFusion:
case HloOpcode::kGetTupleElement:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTuple:
case HloOpcode::kWhile:
break;
case HloOpcode::kCustomCall:
if (IsAllowedS4U4CustomCall(instruction)) {
break;
}
ABSL_FALLTHROUGH_INTENDED;
default:
return verify_subshape(instruction);
}
return absl::OkStatus();
}
}
absl::Status CpuGpuShapeVerifier::Preprocess(HloInstruction* hlo) {
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
hlo->shape(), [&](const Shape& shape, const ShapeIndex&) {
if (shape.has_layout()) {
if (LayoutUtil::IsSparseArray(shape)) {
return absl::InvalidArgumentError(absl::StrFormat(
"The XLA CPU/GPU backend does not support sparse shapes: %s",
hlo->ToString()));
}
if (!primitive_util::IsSubByteNonPredType(shape.element_type()) &&
shape.layout().element_size_in_bits() != 0) {
return absl::InvalidArgumentError(absl::StrFormat(
"The XLA CPU/GPU backend does not support custom element sizes "
"on non-sub-byte-bit types: %s",
hlo->ToString()));
}
}
return absl::OkStatus();
}));
TF_RETURN_IF_ERROR(VerifyS4U4Usage(hlo));
return ShapeVerifier::Preprocess(hlo);
}
} | #include "xla/service/cpu_gpu_shape_verifier.h"
#include <memory>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::HasSubstr;
class CpuGpuShapeVerifierTest : public HloTestBase {
public:
CpuGpuShapeVerifierTest() {
HloVerifierOpts opts;
std::unique_ptr<TargetVerifierMetadata> metadata =
std::make_unique<CpuGpuVerifierMetadata>(std::move(opts));
hlo_verifier_ = std::make_unique<HloVerifier>(std::move(metadata));
}
};
TEST_F(CpuGpuShapeVerifierTest, Int4UnsupportedInstruction) {
const char* const hlo_string = R"(
HloModule Module
ENTRY main {
p0 = u4[2,5] parameter(0)
ROOT out = u4[2,5] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
ASSERT_FALSE(status.ok());
EXPECT_THAT(
status.message(),
HasSubstr("u4 is currently only supported in allow-listed instructions"));
}
TEST_F(CpuGpuShapeVerifierTest, Int4SupportedInstruction) {
const char* const hlo_string = R"(
HloModule Module
bcast {
p0 = u4[] parameter(0)
ROOT out = u4[3, 3] broadcast(p0), dimensions={}
}
ENTRY main {
p0 = u4[] parameter(0)
ROOT out = u4[3, 3] call(p0), to_apply=bcast
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
TF_EXPECT_OK(status);
}
TEST_F(CpuGpuShapeVerifierTest, Int4ShardingCustomCall) {
const char* const hlo_string = R"(
HloModule Module
ENTRY main {
p0 = u4[] parameter(0)
ROOT sharded = u4[] custom-call(p0), custom_call_target="Sharding"
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto status = verifier().Run(module.get()).status();
TF_EXPECT_OK(status);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu_gpu_shape_verifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu_gpu_shape_verifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e8206bd6-ec21-4c85-a0fb-f7ed53b0a6be | cpp | tensorflow/tensorflow | type_util | tensorflow/compiler/tf2xla/type_util.cc | tensorflow/compiler/tf2xla/type_util_test.cc | #include "tensorflow/compiler/tf2xla/type_util.h"
#include "absl/container/flat_hash_map.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
Status DataTypeToPrimitiveType(DataType data_type, xla::PrimitiveType* type) {
switch (data_type) {
case tensorflow::DT_BOOL:
*type = xla::PRED;
return absl::OkStatus();
case tensorflow::DT_INT4:
*type = xla::S4;
return absl::OkStatus();
case tensorflow::DT_INT8:
case tensorflow::DT_QINT8:
*type = xla::S8;
return absl::OkStatus();
case tensorflow::DT_INT16:
case tensorflow::DT_QINT16:
*type = xla::S16;
return absl::OkStatus();
case tensorflow::DT_INT32:
case tensorflow::DT_QINT32:
*type = xla::S32;
return absl::OkStatus();
case tensorflow::DT_INT64:
*type = xla::S64;
return absl::OkStatus();
case tensorflow::DT_UINT4:
*type = xla::U4;
return absl::OkStatus();
case tensorflow::DT_UINT8:
case tensorflow::DT_QUINT8:
*type = xla::U8;
return absl::OkStatus();
case tensorflow::DT_UINT16:
case tensorflow::DT_QUINT16:
*type = xla::U16;
return absl::OkStatus();
case tensorflow::DT_UINT32:
*type = xla::U32;
return absl::OkStatus();
case tensorflow::DT_UINT64:
*type = xla::U64;
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E5M2:
*type = xla::F8E5M2;
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E4M3FN:
*type = xla::F8E4M3FN;
return absl::OkStatus();
case tensorflow::DT_BFLOAT16:
*type = xla::BF16;
return absl::OkStatus();
case tensorflow::DT_HALF:
*type = xla::F16;
return absl::OkStatus();
case tensorflow::DT_FLOAT:
*type = xla::F32;
return absl::OkStatus();
case tensorflow::DT_DOUBLE:
*type = xla::F64;
return absl::OkStatus();
case tensorflow::DT_COMPLEX64:
*type = xla::C64;
return absl::OkStatus();
case tensorflow::DT_COMPLEX128:
*type = xla::C128;
return absl::OkStatus();
default:
return errors::InvalidArgument(
"Unsupported type in DataTypeToPrimitiveType: '",
DataTypeString(data_type), "'");
}
}
absl::StatusOr<DataType> EncodePrimitiveTypeAsDataType(
xla::PrimitiveType type) {
static const absl::flat_hash_map<xla::PrimitiveType, DataType>&
data_type_map = *new absl::flat_hash_map<xla::PrimitiveType, DataType>({
{xla::PRED, DT_BOOL},
{xla::F8E5M2, DT_FLOAT8_E5M2},
{xla::F8E4M3FN, DT_FLOAT8_E4M3FN},
{xla::BF16, DT_BFLOAT16},
{xla::F16, DT_HALF},
{xla::F32, DT_FLOAT},
{xla::F64, DT_DOUBLE},
{xla::C64, DT_COMPLEX64},
{xla::S4, DT_INT4},
{xla::S8, DT_INT8},
{xla::S16, DT_INT16},
{xla::S32, DT_INT32},
{xla::S64, DT_INT64},
{xla::U4, DT_UINT4},
{xla::U8, DT_UINT8},
{xla::U16, DT_UINT16},
{xla::U32, DT_UINT32},
{xla::U64, DT_UINT64},
{xla::C128, DT_COMPLEX128},
});
auto it = data_type_map.find(type);
if (it == data_type_map.end()) {
return errors::InvalidArgument(
"Unsupported type in PrimitiveTypeToDataType ", type);
}
return it->second;
}
} | #include "tensorflow/compiler/tf2xla/type_util.h"
#include <array>
#include "absl/status/statusor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
bool DataTypeSupportsXlaConversion(DataType dt) {
switch (dt) {
case DataType::DT_STRING:
case DataType::DT_RESOURCE:
case DataType::DT_VARIANT:
case DataType::DT_INVALID:
return false;
default:
break;
}
return !IsRefType(dt);
}
TEST(DataTypeToPrimitiveTypeTest, AllDataTypesSupported) {
for (int i = tensorflow::DataType_MIN; i < tensorflow::DataType_MAX; ++i) {
if (tensorflow::DataType_IsValid(i)) {
DataType dt = static_cast<DataType>(i);
if (DataTypeSupportsXlaConversion(dt)) {
xla::PrimitiveType out_type;
EXPECT_TRUE(DataTypeToPrimitiveType(dt, &out_type).ok());
}
}
}
}
TEST(EncodePrimitiveTypeAsDataType, AllPrimitiveTypesSupported) {
for (int i = tensorflow::DataType_MIN; i < tensorflow::DataType_MAX; ++i) {
DataType dt = static_cast<DataType>(i);
xla::PrimitiveType xla_type;
if (DataTypeToPrimitiveType(dt, &xla_type).ok()) {
absl::StatusOr<DataType> data_type_or =
EncodePrimitiveTypeAsDataType(xla_type);
EXPECT_TRUE(data_type_or.ok());
if (!DataTypeIsQuantized(dt)) {
EXPECT_EQ(*data_type_or, dt);
}
}
}
}
TEST(EncodePrimitiveTypeAsDataType, QuantizedTypesMapToUnquantized) {
static std::array<DataType, 5> quantized_inputs = {
DT_QINT8, DT_QINT16, DT_QINT32, DT_QUINT8, DT_QUINT16};
static std::array<DataType, 5> expected_outputs = {
DT_INT8, DT_INT16, DT_INT32, DT_UINT8, DT_UINT16};
for (int i = 0; i < quantized_inputs.size(); ++i) {
xla::PrimitiveType xla_type;
EXPECT_TRUE(DataTypeToPrimitiveType(quantized_inputs[i], &xla_type).ok());
absl::StatusOr<DataType> data_type_or =
EncodePrimitiveTypeAsDataType(xla_type);
EXPECT_TRUE(data_type_or.ok());
EXPECT_EQ(*data_type_or, expected_outputs[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/type_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/type_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.