ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
48
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 13
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 78
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 93
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
b1b7d91e-5a2a-4393-9417-f758eb6e6396 | cpp | tensorflow/tensorflow | rematerializer | tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc | tensorflow/compiler/mlir/lite/experimental/remat/rematerializer_test.cc | #include "tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h"
#include <algorithm>
#include <map>
#include <tuple>
#include <utility>
#include <vector>
namespace mlir {
namespace TFL {
namespace {
std::tuple<std::vector<int>::iterator, bool> Find(const int item,
std::vector<int>& items) {
const auto iter = std::lower_bound(items.begin(), items.end(), item);
return std::make_tuple(iter, iter != items.end() && *iter == item);
}
void Insert(const int item, std::vector<int>& items) {
const auto [iter, found] = Find(item, items);
if (!found) items.insert(iter, item);
}
void Erase(const int item, std::vector<int>& items) {
const auto [iter, found] = Find(item, items);
if (found) items.erase(iter);
}
}
int Rematerializer::AddOperation(const bool is_stateful) {
operations_.emplace_back();
operations_.back().is_stateful = is_stateful;
return operations_.size() - 1;
}
int Rematerializer::AddTensor(const SizeT size) {
tensors_.emplace_back();
tensors_.back().size = size;
return tensors_.size() - 1;
}
void Rematerializer::DelUse(const int ioperation, const int itensor) {
auto& tensor = tensors_[itensor];
auto& operation = operations_[ioperation];
const auto& size = tensor.size;
const bool was_first_use =
(!tensor.operations.empty() && ioperation == tensor.first_use());
const bool was_last_use =
(!tensor.operations.empty() && ioperation == tensor.last_use());
Erase(ioperation, tensor.operations);
Erase(itensor, operation.tensors);
if (was_first_use) {
operation.alloc -= size;
if (!was_last_use) {
operations_[tensor.first_use()].alloc += size;
}
}
if (was_last_use) {
operation.dealloc -= size;
if (!was_first_use) {
operations_[tensor.last_use()].dealloc += size;
}
}
}
void Rematerializer::AddUse(const int ioperation, const int itensor) {
auto& tensor = tensors_[itensor];
auto& operation = operations_[ioperation];
const auto& size = tensor.size;
const bool will_be_first_use =
tensor.operations.empty() || ioperation < tensor.first_use();
const bool will_be_last_use =
tensor.operations.empty() || ioperation > tensor.last_use();
if (will_be_first_use) {
operation.alloc += size;
if (!will_be_last_use) {
operations_[tensor.first_use()].alloc -= size;
}
}
if (will_be_last_use) {
operation.dealloc += size;
if (!will_be_first_use) {
operations_[tensor.last_use()].dealloc -= size;
}
}
Insert(ioperation, tensor.operations);
Insert(itensor, operation.tensors);
}
Rematerializer::SizeT Rematerializer::MaxSavings(const int begin, const int end,
const int peak_loc) const {
SizeT max_savings = 0;
for (int ioperation = begin; ioperation != end; ++ioperation) {
for (const int itensor : operations_[ioperation].tensors) {
if (const Tensor& tensor = tensors_[itensor];
tensor.first_use() == ioperation &&
tensor.last_use() > peak_loc ) {
max_savings += tensor.size;
}
}
}
return max_savings;
}
std::tuple<Rematerializer::SizeT, Rematerializer::RematSpec>
Rematerializer::FindBestRemat(const SizeT min_savings, const int begin_len,
const int end_len) const {
const auto peak = GetPeakMemory();
SizeT best_peak_mem = peak.size;
RematSpec best_remat = {};
for (int len = begin_len; len < end_len; ++len) {
std::vector<std::tuple<SizeT, int, int>> pre_screen;
for (int begin = 0, end = begin + len; end <= peak.op_index;
++begin, ++end) {
if (!std::any_of(operations_.begin() + begin, operations_.begin() + end,
[](const Operation& s) { return s.is_stateful; })) {
if (const auto max_savings = MaxSavings(begin, end, peak.op_index);
max_savings >= min_savings) {
pre_screen.emplace_back(max_savings, begin, end);
}
}
}
std::sort(pre_screen.begin(), pre_screen.end());
for (; !pre_screen.empty(); pre_screen.pop_back()) {
const auto& [max_savings, begin, end] = pre_screen.back();
const auto insert_before = FindBestRematPoint(begin, end, peak.op_index);
if (insert_before == operations_.size()) {
continue;
}
const RematSpec this_remat = {begin, end, insert_before};
if (const auto new_peak = GetPeakMemory(this_remat);
new_peak.size < best_peak_mem &&
peak.size >= new_peak.size + min_savings) {
best_peak_mem = new_peak.size;
best_remat = this_remat;
}
if (peak.size >= max_savings + best_peak_mem) {
break;
}
}
if (peak.size >= min_savings + best_peak_mem) {
break;
}
}
return std::make_tuple(best_peak_mem, best_remat);
}
std::vector<Rematerializer::MemSpec> Rematerializer::GetDeltas(
const RematSpec& remat) const {
std::vector<MemSpec> deltas;
if (remat.begin == remat.end) {
return deltas;
}
const auto source_to_target = [&](int i) {
return i + (remat.insert - remat.begin);
};
struct TensorUse {
int first_use;
int last_use;
};
std::map<int, TensorUse> source_uses;
for (int ioperation = remat.begin; ioperation < remat.end; ++ioperation) {
const auto& operation = operations_[ioperation];
for (const int itensor : operation.tensors) {
const auto [iter, inserted] = source_uses.emplace(
itensor,
TensorUse{ioperation, ioperation});
if (!inserted) {
iter->second.last_use = ioperation;
}
}
}
deltas.reserve(2 * source_uses.size());
for (const auto& [itensor, source] : source_uses) {
auto& tensor = tensors_[itensor];
const TensorUse global = {tensor.first_use(), tensor.last_use()};
auto add_alloc = [&](int pos) { deltas.emplace_back(pos, tensor.size); };
auto add_dealloc = [&](int pos) {
deltas.emplace_back(pos + 1, -tensor.size);
};
auto del_dealloc = [&](int pos) {
deltas.emplace_back(pos + 1, tensor.size);
};
if (global.first_use < remat.begin) {
if (global.last_use < remat.insert) {
del_dealloc(global.last_use);
add_dealloc(source_to_target(source.last_use));
}
} else {
add_alloc(source_to_target(source.first_use));
if (global.last_use < remat.insert) {
add_dealloc(source_to_target(source.last_use));
} else {
add_dealloc(*std::partition_point(
tensor.operations.rbegin(), tensor.operations.rend(),
[&](int i) { return i >= remat.insert; }));
}
}
}
std::sort(deltas.begin(), deltas.end(), ByOpIndex);
return deltas;
}
Rematerializer::MemProfile Rematerializer::GetMemProfile(
const RematSpec& remat) const {
const auto num_inserted = remat.end - remat.begin;
std::vector<SizeT> profile(operations_.size() + num_inserted);
MapMem([&](const MemSpec& m) { profile[m.op_index] = m.size; }, remat);
return profile;
}
Rematerializer::MemSpec Rematerializer::GetPeakMemory(
const RematSpec& remat) const {
MemSpec peak;
MapMem([&](const MemSpec& m) { peak = std::max(m, peak, BySize); }, remat);
return peak;
}
int Rematerializer::FindBestRematPoint(const int begin, const int end,
const int peak_loc) const {
int best = operations_.size();
for (int ioperation = begin; ioperation < end; ++ioperation) {
for (const int itensor : operations_[ioperation].tensors) {
if (const auto& tensor = tensors_[itensor];
tensor.first_use() >= begin && tensor.first_use() < end &&
tensor.last_use() > peak_loc) {
for (const int ioperation : tensor.operations) {
if (ioperation > peak_loc && ioperation < best) {
best = ioperation;
break;
}
}
}
}
}
return best;
}
void Rematerializer::Remat(const RematSpec& remat) {
const int num_inserted = remat.end - remat.begin;
for (auto& tensor : tensors_) {
std::for_each(std::lower_bound(tensor.operations.begin(),
tensor.operations.end(), remat.insert),
tensor.operations.end(),
[&](int& iop) { iop += num_inserted; });
}
operations_.insert(operations_.begin() + remat.insert, num_inserted, {});
std::vector<std::pair<int, int>> new_tensors;
for (int iop_old = remat.begin, iop_new = remat.insert; iop_old < remat.end;
++iop_old, ++iop_new) {
for (const auto itensor : operations_[iop_old].tensors) {
if (tensors_[itensor].first_use() == iop_old) {
new_tensors.emplace_back(itensor, AddTensor(tensors_[itensor].size));
}
AddUse(iop_new, itensor);
}
}
std::sort(new_tensors.begin(), new_tensors.end());
for (int iop = remat.insert; iop < operations_.size(); ++iop) {
for (const int old_tensor : std::vector<int>(operations_[iop].tensors)) {
const auto new_tensor =
std::lower_bound(new_tensors.begin(), new_tensors.end(),
std::make_pair(old_tensor, 0));
if (new_tensor != new_tensors.end() && new_tensor->first == old_tensor) {
DelUse(iop, old_tensor);
AddUse(iop, new_tensor->second);
}
}
}
}
void Rematerializer::RunGreedyAlgorithm(const int max_cost,
const int max_block_length,
const SizeT min_savings) {
const bool unlimited_cost = (max_cost < 0);
for (int min_block_length = 1, cost = 0;
min_block_length <= max_block_length &&
(unlimited_cost || cost <= max_cost);
min_block_length *= 2) {
while (unlimited_cost || cost <= max_cost) {
const auto [peak, remat] = FindBestRemat(
min_savings,
min_block_length,
std::min(1 + (unlimited_cost
? max_block_length
: std::min(max_block_length, max_cost - cost)),
2 * min_block_length));
if (remat.begin == remat.end) break;
Remat(remat);
ApplyRemat(remat);
cost += (remat.end - remat.begin);
}
}
}
}
} | #include "tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <initializer_list>
#include <random>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace mlir {
namespace TFL {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FieldsAre;
using ::testing::StrictMock;
class RematTest : public ::testing::Test {
protected:
class TestableRematerializer : public Rematerializer {
public:
using Rematerializer::AddOperation;
using Rematerializer::AddTensor;
using Rematerializer::AddUse;
using Rematerializer::DelUse;
using Rematerializer::Remat;
};
TestableRematerializer r_;
};
TEST_F(RematTest, TensorUseSimple) {
for (int i = 0; i < 6; ++i) {
r_.AddOperation(false);
r_.AddTensor(1 << i);
}
r_.AddUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(4)));
r_.AddUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(4)));
r_.AddUse(4, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 4, 4, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(2, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 4, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(4), Eq(4)));
r_.DelUse(4, 2);
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(0, 0, 0, 0, 0, 0));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(5), Eq(0)));
}
TEST_F(RematTest, TensorUseMany) {
constexpr int n = 6;
for (int i = 0; i < n; ++i) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1 << (n - i - 1)));
}
for (int i = 0; i < n; ++i) {
r_.AddUse(r_.AddOperation(false),
n - 1 - i);
}
EXPECT_THAT(r_.GetMemProfile(), ElementsAreArray({32, 48, 56, 60, 62, 63, 63,
62, 60, 56, 48, 32}));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(6), Eq(63)));
}
TEST_F(RematTest, PeakTiesAreBrokenInFavorOfLaterOperations) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
ASSERT_THAT(r_.GetMemProfile(), ElementsAreArray({100, 1, 100}));
EXPECT_THAT(r_.GetPeakMemory(), FieldsAre(Eq(2), Eq(100)));
}
TEST_F(RematTest, RematRecreatesOutput) {
r_.AddUse(r_.AddOperation(false), r_.AddTensor(100));
r_.AddOperation(false);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(100, 0));
EXPECT_THAT(r_.GetMemProfile({0, 1, 2}),
ElementsAre(100, 0, 100));
r_.Remat({0, 1, 2});
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(100, 0, 100));
EXPECT_THAT(r_.AddTensor(0), 2);
}
TEST_F(RematTest, RematExtendsInputAndRecreatesOutput) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(1, 0);
r_.AddOperation(false);
r_.AddOperation(false);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(1, 101, 0, 0));
EXPECT_THAT(r_.GetMemProfile({1, 2, 3}),
ElementsAre(1, 101, 1, 101, 0));
r_.Remat({1, 2, 3});
EXPECT_THAT(r_.GetMemProfile(), ElementsAre(1, 101, 1, 101, 0));
EXPECT_THAT(r_.AddTensor(0), 3);
}
TEST_F(RematTest, BlockRematDuplicatesIntraBlockValues) {
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(10));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(100));
r_.AddUse(r_.AddOperation(false),
r_.AddTensor(1000));
r_.AddOperation(false);
r_.AddUse(1, 0);
r_.AddUse(2, 0);
r_.AddUse(2, 1);
r_.AddUse(3, 0);
r_.AddUse(3, 1);
r_.AddUse(3, 2);
ASSERT_THAT(r_.GetMemProfile(), ElementsAre(1, 11, 111, 1111, 0));
EXPECT_THAT(r_.GetMemProfile({1, 4, 5}),
ElementsAre(1, 11, 111, 1111, 1, 11, 111, 1111));
r_.Remat({1, 4, 5});
EXPECT_THAT(r_.GetMemProfile(),
ElementsAre(1, 11, 111, 1111, 1, 11, 111, 1111));
EXPECT_THAT(r_.AddTensor(0), 7);
}
class RematSimulationTest : public testing::Test {
protected:
class RandomRemat : public Rematerializer {
public:
using Rematerializer::Remat;
RandomRemat(const int num_operations, const int num_tensors,
const int num_uses, std::mt19937& rng) {
std::uniform_int_distribution<int> some_size_log(0, 16);
std::uniform_int_distribution<int> some_tensor(0, num_tensors - 1);
std::uniform_int_distribution<int> some_operation(0, num_operations - 1);
for (int i = 0; i < num_tensors; ++i) {
AddTensor(SizeT{1} << some_size_log(rng));
}
for (int i = 0; i < num_operations; ++i) {
AddOperation(false);
}
for (int i = 0; i < num_uses; ++i) {
AddUse(some_operation(rng), some_tensor(rng));
}
}
};
};
TEST_F(RematSimulationTest, SimulationAgreesWithReality) {
constexpr int kNumOperations = 128;
constexpr int kNumTensors = 32;
constexpr int kNumUses = kNumOperations * kNumTensors / 4;
std::mt19937 rng;
for (int i = 0; i < 1024; ++i) {
RandomRemat remat(kNumOperations, kNumTensors, kNumUses, rng);
std::array<int, 3> randos;
const auto& [begin, end, insert] = randos;
for (int i = 0, num_operations = kNumOperations; i < 4;
++i, num_operations += end - begin) {
std::uniform_int_distribution<int> some_op(0, num_operations - 1);
for (auto& rando : randos) {
rando = some_op(rng);
}
std::sort(randos.begin(), randos.end());
const Rematerializer::RematSpec spec{begin, end, insert};
const auto simulated_profile = remat.GetMemProfile(spec);
remat.Remat(spec);
const auto actual_profile = remat.GetMemProfile();
EXPECT_THAT(simulated_profile, ElementsAreArray(actual_profile));
}
}
}
class GreedyRematTest : public testing::Test {
protected:
class RainbowRemat : public Rematerializer {
public:
explicit RainbowRemat(const std::vector<std::vector<int>>& sizes,
int extra_ops = 0, SizeT extra_size = 0) {
for (const auto& rainbow : sizes) {
int tensor = 0;
int op = 0;
for (const auto& size : rainbow) {
for (int i = 0; i < extra_ops; ++i) {
op = AddOperation(false);
if (i != 0) {
AddUse(op, tensor);
}
tensor = AddTensor(extra_size);
AddUse(op, tensor);
}
op = AddOperation(size < 0);
if (extra_ops > 0) {
AddUse(op, tensor);
}
tensor = AddTensor(std::abs(size));
AddUse(op, tensor);
}
for (int i = 0; i < rainbow.size(); ++i) {
op = AddOperation(false);
AddUse(op, tensor - i);
}
}
}
};
class MlpRemat : public Rematerializer {
public:
explicit MlpRemat(const std::vector<int>& sizes) {
int forward_tensor = -1;
int backward_tensor = -1;
int op = -1;
for (const int size : sizes) {
op = AddOperation(false);
if (forward_tensor >= 0) AddUse(op, forward_tensor);
forward_tensor = AddTensor(size);
AddUse(op, forward_tensor);
}
for (; forward_tensor >= 0; --forward_tensor) {
op = AddOperation(false);
AddUse(op, forward_tensor);
if (backward_tensor >= 0) AddUse(op, backward_tensor);
backward_tensor = AddTensor(sizes[forward_tensor]);
AddUse(op, backward_tensor);
}
}
MOCK_METHOD(void, ApplyRemat, (const RematSpec&));
};
};
TEST_F(GreedyRematTest, MlpBasic) {
StrictMock<MlpRemat> remat(std::vector<int>({1, 1, 1}));
ASSERT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 2, 3, 4, 4, 3}));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(0,
1,
5)));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(), ElementsAreArray({1, 2, 2, 3, 3, 2, 3}));
}
TEST_F(GreedyRematTest, MlpBinary) {
StrictMock<MlpRemat> remat(std::vector<int>({1, 2, 4, 8}));
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 19, 9, 4}));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(2,
3,
5)));
EXPECT_CALL(remat, ApplyRemat(FieldsAre(0,
1,
8)));
remat.RunGreedyAlgorithm(-1, 4,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 6, 14, 18, 14, 18, 8, 3, 4}));
}
TEST_F(GreedyRematTest, SimpleMax) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, SimpleMaxLongWindow) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 4,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, SimpleSizeThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
4);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 11, 19, 19, 11, 11, 7, 7, 3, 1}));
}
TEST_F(GreedyRematTest, SimpleCostThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1}));
}
TEST_F(GreedyRematTest, SimpleForbiddenOps) {
RainbowRemat remat({{1, 2, -4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 31, 31, 15, 7, 3, 1}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 12, 20, 20, 12, 12, 4, 2, 2, 1, 1}));
}
TEST_F(GreedyRematTest, DoubleMax) {
RainbowRemat remat({{1, 2, 4, 8, 16}, {4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray(
{1, 3, 7, 15, 31, 31, 15, 7, 3, 1, 4, 12, 28, 28, 12, 4}));
remat.RunGreedyAlgorithm(-1, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 2, 4, 8, 16, 16, 8, 8, 4, 4, 2,
2, 1, 1, 4, 8, 16, 16, 8, 8, 4, 4}));
}
TEST_F(GreedyRematTest, DoubleCostThreshold) {
RainbowRemat remat({{1, 2, 4, 8, 16}, {4, 8, 16}});
ASSERT_THAT(remat.GetMemProfile(),
ElementsAreArray(
{1, 3, 7, 15, 31, 31, 15, 7, 3, 1, 4, 12, 28, 28, 12, 4}));
remat.RunGreedyAlgorithm(2, 1,
1);
EXPECT_THAT(remat.GetMemProfile(),
ElementsAreArray({1, 3, 7, 15, 23, 23, 15, 15, 7, 3, 1, 4, 12, 20,
20, 12, 12, 4}));
}
TEST_F(GreedyRematTest, SingleLongerBlocksByWindowSize) {
std::vector<Rematerializer::SizeT> best_for_window_size;
for (int window_size : {0, 1, 2, 3, 4, 5}) {
RainbowRemat remat({{1, 2, 4, 8}}, 2, 16);
remat.RunGreedyAlgorithm(-1, window_size,
1);
best_for_window_size.push_back(remat.GetPeakMemory().size);
}
EXPECT_THAT(best_for_window_size, ElementsAreArray({44, 36, 36, 32, 32, 32}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/remat/rematerializer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/experimental/remat/rematerializer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0186f4b7-533c-45e9-9879-a8ec368823e9 | cpp | tensorflow/tensorflow | error_reporter | tensorflow/compiler/mlir/lite/core/api/error_reporter.cc | tensorflow/compiler/mlir/lite/core/api/error_reporter_test.cc | #include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include <cstdarg>
namespace tflite {
int ErrorReporter::Report(const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
int ErrorReporter::ReportError(void*, const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
} | #include "tensorflow/compiler/mlir/lite/core/api/error_reporter.h"
#include <cstdio>
#include <gtest/gtest.h>
namespace tflite {
class MockErrorReporter : public ErrorReporter {
public:
MockErrorReporter() { buffer_[0] = 0; }
int Report(const char* format, va_list args) override {
vsnprintf(buffer_, kBufferSize, format, args);
return 0;
}
char* GetBuffer() { return buffer_; }
private:
static constexpr int kBufferSize = 256;
char buffer_[kBufferSize];
};
TEST(ErrorReporter, TestReport) {
MockErrorReporter mock_reporter;
ErrorReporter* reporter = &mock_reporter;
reporter->Report("Error: %d", 23);
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
}
TEST(ErrorReporter, TestReportMacro) {
MockErrorReporter mock_reporter;
#ifndef TF_LITE_STRIP_ERROR_STRINGS
ErrorReporter* reporter = &mock_reporter;
#endif
TF_LITE_REPORT_ERROR(reporter, "Error: %d", 23);
#ifndef TF_LITE_STRIP_ERROR_STRINGS
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), "Error: 23"));
#else
EXPECT_EQ(0, strcmp(mock_reporter.GetBuffer(), ""));
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/core/api/error_reporter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/core/api/error_reporter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4ea07cfb-2e0c-4af9-bde7-dfeec7e2fe36 | cpp | tensorflow/tensorflow | convert_type | tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc | tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include <limits>
#include "absl/strings/str_cat.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
using mlir::Builder;
using mlir::ShapedType;
using mlir::Type;
Status ConvertDataType(DataType dtype, Builder builder, Type* type) {
switch (dtype) {
case DT_HALF:
*type = builder.getF16Type();
return absl::OkStatus();
case DT_FLOAT:
*type = builder.getF32Type();
return absl::OkStatus();
case DT_DOUBLE:
*type = builder.getF64Type();
return absl::OkStatus();
case DT_BOOL:
*type = builder.getIntegerType(1);
return absl::OkStatus();
case DT_INT8:
*type = builder.getIntegerType(8);
return absl::OkStatus();
case DT_INT16:
*type = builder.getIntegerType(16);
return absl::OkStatus();
case DT_INT32:
*type = builder.getIntegerType(32);
return absl::OkStatus();
case DT_INT64:
*type = builder.getIntegerType(64);
return absl::OkStatus();
case DT_UINT8:
*type = builder.getIntegerType(8, false);
return absl::OkStatus();
case DT_UINT16:
*type = builder.getIntegerType(16, false);
return absl::OkStatus();
case DT_UINT32:
*type = builder.getIntegerType(32, false);
return absl::OkStatus();
case DT_UINT64:
*type = builder.getIntegerType(64, false);
return absl::OkStatus();
case DT_BFLOAT16:
*type = builder.getBF16Type();
return absl::OkStatus();
case DT_COMPLEX64:
*type = mlir::ComplexType::get(builder.getF32Type());
return absl::OkStatus();
case DT_COMPLEX128:
*type = mlir::ComplexType::get(builder.getF64Type());
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E4M3FN:
*type = builder.getFloat8E4M3FNType();
return absl::OkStatus();
case tensorflow::DT_FLOAT8_E5M2:
*type = builder.getFloat8E5M2Type();
return absl::OkStatus();
case DT_INT4:
*type = builder.getIntegerType(4, true);
return absl::OkStatus();
case DT_UINT4:
*type = builder.getIntegerType(4, false);
return absl::OkStatus();
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
case DT_##enumerant: \
*type = builder.getType<mlir::tf_type::tftype##Type>(); \
return OkStatus();
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.def"
default:
return errors::Unimplemented(absl::StrCat(
"Converting DataType '", DataTypeString(dtype), "' to MLIR Type"));
}
}
Status ConvertScalarTypeToDataType(Type type, DataType* dtype) {
if (type.isF16()) {
*dtype = DT_HALF;
return absl::OkStatus();
} else if (type.isF32()) {
*dtype = DT_FLOAT;
return absl::OkStatus();
} else if (type.isF64()) {
*dtype = DT_DOUBLE;
return absl::OkStatus();
} else if (type.isBF16()) {
*dtype = DT_BFLOAT16;
return absl::OkStatus();
} else if (type.isFloat8E4M3FN()) {
*dtype = DT_FLOAT8_E4M3FN;
return absl::OkStatus();
} else if (type.isFloat8E5M2()) {
*dtype = DT_FLOAT8_E5M2;
return absl::OkStatus();
} else if (auto itype = mlir::dyn_cast<mlir::IntegerType>(type)) {
switch (itype.getWidth()) {
case 1:
*dtype = DT_BOOL;
return absl::OkStatus();
case 4:
*dtype = itype.isUnsigned() ? DT_UINT4 : DT_INT4;
return absl::OkStatus();
case 8:
*dtype = itype.isUnsigned() ? DT_UINT8 : DT_INT8;
return absl::OkStatus();
case 16:
*dtype = itype.isUnsigned() ? DT_UINT16 : DT_INT16;
return absl::OkStatus();
case 32:
*dtype = itype.isUnsigned() ? DT_UINT32 : DT_INT32;
return absl::OkStatus();
case 64:
*dtype = itype.isUnsigned() ? DT_UINT64 : DT_INT64;
return absl::OkStatus();
default:
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
} else if (auto complex_type = mlir::dyn_cast<mlir::ComplexType>(type)) {
auto etype = complex_type.getElementType();
if (etype.isF32()) {
*dtype = DT_COMPLEX64;
return absl::OkStatus();
} else if (etype.isF64()) {
*dtype = DT_COMPLEX128;
return absl::OkStatus();
}
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (type.isa<mlir::tf_type::tftype##Type>()) { \
*dtype = DT_##enumerant; \
return OkStatus(); \
}
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.def"
return errors::Unimplemented(
absl::StrCat("Converting ", debugString(type), " to DataType"));
}
Status ConvertToDataType(Type type, DataType* dtype) {
if (auto stype = mlir::dyn_cast<ShapedType>(type)) {
TF_RETURN_IF_ERROR(
ConvertScalarTypeToDataType(stype.getElementType(), dtype));
} else {
TF_RETURN_IF_ERROR(ConvertScalarTypeToDataType(type, dtype));
}
return absl::OkStatus();
}
void ConvertToMlirShape(const TensorShape& input_shape,
llvm::SmallVectorImpl<int64_t>* shape) {
shape->reserve(input_shape.dims());
for (const auto& d : input_shape) {
shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size);
}
}
Status ConvertToMlirShape(const TensorShapeProto& input_shape,
llvm::SmallVectorImpl<int64_t>* shape) {
shape->reserve(input_shape.dim_size());
auto& dims = input_shape.dim();
for (auto& d : dims) {
if (d.size() > std::numeric_limits<int64_t>::max()) {
return errors::InvalidArgument("Shape element overflows");
}
shape->push_back(d.size() == kTFDynamicSize ? ShapedType::kDynamic
: d.size());
}
return absl::OkStatus();
}
absl::StatusOr<mlir::Type> ConvertToMlirTensorType(
const TensorShapeProto& shape, DataType dtype, mlir::Builder* builder) {
mlir::Type element_type;
TF_RETURN_IF_ERROR(ConvertDataType(dtype, *builder, &element_type));
if (shape.unknown_rank()) {
return mlir::UnrankedTensorType::get(element_type);
}
llvm::SmallVector<int64_t, 4> shape_dims;
TF_RETURN_IF_ERROR(ConvertToMlirShape(shape, &shape_dims));
return GetTypeFromTFTensorShape(shape_dims, element_type);
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include <string>
#include <vector>
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace tensorflow {
namespace {
std::string ConvertToMlirString(const std::vector<int64_t>& dims,
bool unknown_rank, DataType dtype) {
TensorShapeProto shape;
shape.set_unknown_rank(unknown_rank);
for (int64_t dim : dims) {
shape.add_dim()->set_size(dim);
}
mlir::MLIRContext context;
mlir::Builder b(&context);
auto status_or = ConvertToMlirTensorType(shape, dtype, &b);
std::string buf;
llvm::raw_string_ostream os(buf);
status_or.value().print(os);
return os.str();
}
TEST(MlirConvertType, ConvertToMlirTensorType) {
EXPECT_EQ("tensor<4x8x16xi32>",
ConvertToMlirString({4, 8, 16}, false,
DataType::DT_INT32));
EXPECT_EQ("tensor<?x27x?xbf16>",
ConvertToMlirString({-1, 27, -1}, false,
DataType::DT_BFLOAT16));
EXPECT_EQ("tensor<*xf32>",
ConvertToMlirString({}, true, DataType::DT_FLOAT));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/convert_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6144cd52-9e2b-40c8-ba8c-c992924cf36d | cpp | tensorflow/tensorflow | tftext_utils | tensorflow/compiler/mlir/lite/utils/tftext_utils.cc | tensorflow/compiler/mlir/lite/utils/tftext_utils_test.cc | #include "tensorflow/compiler/mlir/lite/utils/tftext_utils.h"
#include <optional>
#include <string>
#include <vector>
#include "flatbuffers/flexbuffers.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
namespace TFL {
namespace {
constexpr char kNgrams[] = "tftext:Ngrams";
constexpr char kWhitespaceTokenizer[] = "tftext:WhitespaceTokenizer";
constexpr char kCustomSgnnProjection[] = "tftext:custom:SgnnProjection";
constexpr char kTFImplements[] = "tf._implements";
using mlir::TF::FuncAttr;
using mlir::TF::StringType;
inline ConstBytesAttr CustomOption(OpBuilder* builder,
const std::string& content) {
return ConstBytesAttr::get(builder->getContext(),
StringRef(content.data(), content.size()));
}
inline TensorType GetInputType(func::FuncOp func, int idx) {
return mlir::dyn_cast_or_null<TensorType>(
func.getFunctionType().getInput(idx));
}
inline TensorType GetResultType(func::FuncOp func, int idx) {
return mlir::dyn_cast_or_null<TensorType>(
func.getFunctionType().getResult(idx));
}
inline bool RankEquals(const TensorType& type, int rank) {
return type && type.hasRank() && type.getRank() == rank;
}
LogicalResult VerifyWhitespaceTokenizer(func::FuncOp func) {
auto input_type = GetInputType(func, 0);
if (!input_type || !mlir::isa<StringType>(input_type.getElementType()) ||
!input_type.hasRank()) {
return func.emitError() << "Input should be a string tensor";
}
const std::vector<int> kValidNumOfOutput = {1, 2, 3};
if (input_type.getRank() >= kValidNumOfOutput.size()) {
return func.emitError()
<< "Unrecognized input rank: " << input_type.getRank();
}
if (func.getNumResults() != kValidNumOfOutput[input_type.getRank()]) {
return func.emitError()
<< "Expect " << kValidNumOfOutput[input_type.getRank()]
<< "output(s) when input has rank " << input_type.getRank();
}
auto value_type = GetResultType(func, 0);
if (!RankEquals(value_type, 1) ||
!mlir::isa<StringType>(value_type.getElementType())) {
return func.emitError() << "1st output should be string tensor";
}
if (func.getNumResults() > 1) {
auto offset_type = GetResultType(func, 1);
if (!RankEquals(offset_type, 1) ||
!offset_type.getElementType().isInteger(64)) {
return func.emitError() << "2nd output should be int64 tensor";
}
}
if (func.getNumResults() > 2) {
auto offset_type = GetResultType(func, 2);
if (!RankEquals(offset_type, 1) ||
!offset_type.getElementType().isInteger(64)) {
return func.emitError() << "3rd output should be int64 tensor";
}
}
return success();
}
LogicalResult ConvertWhitespaceTokenizer(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
OpBuilder builder(func.getBody());
std::string empty_option_buffer;
auto op = builder.create<CustomOp>(
func.getLoc(), func.getFunctionType().getResults(), func.getArguments(),
api, CustomOption(&builder, empty_option_buffer));
builder.create<func::ReturnOp>(func.getLoc(), op.getResults());
return success();
}
LogicalResult VerifyNgrams(func::FuncOp func) {
constexpr int kValues = 0;
constexpr int kRowSplits = 1;
if (func.getFunctionType().getInputs().size() !=
func.getFunctionType().getResults().size()) {
return func.emitError() << "Mismatched number of inputs and outputs.";
}
int row_splits = func.getFunctionType().getInputs().size() - kRowSplits;
if (row_splits == 0) {
auto input_values = GetInputType(func, kValues);
if (!input_values ||
!mlir::isa<StringType>(input_values.getElementType())) {
return func.emitError()
<< "Input " << kValues << " should be a string tensor";
}
auto output_values = GetResultType(func, kValues);
if (!output_values ||
!mlir::isa<StringType>(output_values.getElementType())) {
return func.emitError()
<< "Output " << kValues << " should be a string tensor";
}
if (input_values.hasRank() && output_values.hasRank() &&
input_values.getRank() != output_values.getRank()) {
return func.emitError() << "Input " << kValues << " and output "
<< kValues << " should have the same rank";
}
} else {
auto input_values = GetInputType(func, kValues);
if (!RankEquals(input_values, 1) ||
!mlir::isa<StringType>(input_values.getElementType())) {
return func.emitError()
<< "Input " << kValues << " should be a 1D string tensor";
}
auto output_values = GetResultType(func, kValues);
if (!RankEquals(output_values, 1) ||
!mlir::isa<StringType>(output_values.getElementType())) {
return func.emitError()
<< "Output " << kValues << " should be a 1D string tensor";
}
for (int i = 0; i < row_splits; ++i) {
const int row_index = i + kRowSplits;
auto input_row_splits = GetInputType(func, row_index);
if (!RankEquals(input_row_splits, 1) ||
!input_row_splits.getElementType().isInteger(64)) {
return func.emitError()
<< "Input " << row_index << " should be a 1D int64 tensor";
}
auto output_row_splits = GetResultType(func, row_index);
if (!RankEquals(output_row_splits, 1) ||
!output_row_splits.getElementType().isInteger(64)) {
return func.emitError()
<< "Output " << row_index << " should be a 1D int64 tensor";
}
}
}
return success();
}
LogicalResult CreateNgramsCustomOption(func::FuncOp func, DictionaryAttr attrs,
std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
auto width = mlir::dyn_cast_or_null<IntegerAttr>(attrs.get("width"));
if (!width) {
return func.emitError() << "'width' attribute is not set or not an integer";
}
fbb.Int("width", width.getInt());
auto string_separator =
mlir::dyn_cast_or_null<StringAttr>(attrs.get("string_separator"));
if (!string_separator) {
return func.emitError()
<< "'string_separator' attribute is not set or not a string";
}
std::string string_separator_str(string_separator.getValue().data(),
string_separator.getValue().size());
fbb.String("string_separator", string_separator_str);
auto axis = mlir::dyn_cast_or_null<IntegerAttr>(attrs.get("axis"));
if (!axis) {
return func.emitError() << "'axis' attribute is not set or not an integer";
}
fbb.Int("axis", axis.getInt());
auto reduction_type =
mlir::dyn_cast_or_null<StringAttr>(attrs.get("reduction_type"));
if (!reduction_type) {
return func.emitError()
<< "'reduction_type' attribute is not set or not a string";
}
std::string reduction_type_str(reduction_type.getValue().data(),
reduction_type.getValue().size());
fbb.String("reduction_type", reduction_type_str);
fbb.EndMap(start_map);
fbb.Finish();
custom_option_buffer.assign(fbb.GetBuffer().begin(), fbb.GetBuffer().end());
return success();
}
LogicalResult ConvertNgrams(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
OpBuilder builder(func.getBody());
std::string custom_option_buffer;
if (failed(CreateNgramsCustomOption(func, attr.getAttrs(),
custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func.getLoc(), func.getFunctionType().getResults(), func.getArguments(),
api, CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func.getLoc(), op.getResults());
return success();
}
LogicalResult VerifySgnnProjection(func::FuncOp func, FuncAttr attr) {
if (func.getFunctionType().getNumInputs() != 2 ||
func.getFunctionType().getNumResults() != 1) {
return func.emitError() << "Mismatched number of inputs and outputs.";
}
auto values_type = GetInputType(func, 0);
if (!values_type || !mlir::isa<StringType>(values_type.getElementType())) {
return func.emitError() << "First input should be a string tensor";
}
auto row_splits_type = GetInputType(func, 1);
if (!row_splits_type ||
!mlir::isa<IntegerType>(row_splits_type.getElementType())) {
return func.emitError() << "Second input should be an integer tensor";
}
auto hash_seed =
mlir::dyn_cast_or_null<ArrayAttr>(attr.getAttrs().get("hash_seed"));
if (!hash_seed) {
return func.emitError()
<< "'hash_seed' attribute is not set or not an array";
}
auto output_type = GetResultType(func, 0);
if (!output_type || !mlir::isa<FloatType>(output_type.getElementType()) ||
!RankEquals(output_type, 2)) {
return func.emitError() << "Output should be a 2D float tensor.";
}
if (output_type.getDimSize(1) != hash_seed.size()) {
return func.emitError()
<< "Output 2nd dimension should be the num of hash seeds.";
}
auto buckets =
mlir::dyn_cast_or_null<IntegerAttr>(attr.getAttrs().get("buckets"));
if (!buckets) {
return func.emitError() << "'buckets' attribute is not set or not int";
}
return success();
}
LogicalResult CreateSgnnProjectionCustomOption(
func::FuncOp func, DictionaryAttr attrs,
std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
auto hash_seed = mlir::dyn_cast_or_null<ArrayAttr>(attrs.get("hash_seed"));
auto vector_start = fbb.StartVector("hash_seed");
for (int i = 0; i < hash_seed.size(); i++) {
fbb.Add(static_cast<int32_t>(
mlir::dyn_cast<IntegerAttr>(*(hash_seed.getValue().data() + i))
.getInt()));
}
fbb.EndVector(vector_start, true, false);
auto buckets = mlir::dyn_cast_or_null<IntegerAttr>(attrs.get("buckets"));
fbb.Int("buckets", buckets.getInt());
fbb.EndMap(start_map);
fbb.Finish();
custom_option_buffer.assign(fbb.GetBuffer().begin(), fbb.GetBuffer().end());
return success();
}
LogicalResult ConvertSgnnProjection(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
OpBuilder builder(func.getBody());
std::string custom_option_buffer;
if (failed(CreateSgnnProjectionCustomOption(func, attr.getAttrs(),
custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func.getLoc(), func.getFunctionType().getResults(), func.getArguments(),
api, CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func.getLoc(), op.getResults());
return success();
}
}
LogicalResult ConvertTFTextAPI(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
if (api.str() == kWhitespaceTokenizer) {
if (succeeded(VerifyWhitespaceTokenizer(func))) {
return ConvertWhitespaceTokenizer(func, api, attr);
}
} else if (api.str() == kNgrams) {
if (succeeded(VerifyNgrams(func))) {
return ConvertNgrams(func, api, attr);
}
} else if (api.str() == kCustomSgnnProjection) {
if (succeeded(VerifySgnnProjection(func, attr))) {
return ConvertSgnnProjection(func, api, attr);
}
}
return failure();
}
bool IsTFTextRegistered(const tensorflow::OpRegistry* op_registery) {
const std::vector<std::string> kTFTextOps = {
"WhitespaceTokenizeWithOffsets",
};
for (const auto& iter : kTFTextOps) {
if (op_registery->LookUp(iter)) {
return true;
}
}
return false;
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/tftext_utils.h"
#include <memory>
#include <string>
#include "absl/status/status.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/status.h"
namespace mlir {
namespace TFL {
using tensorflow::OpRegistrationData;
using tensorflow::OpRegistry;
using tensorflow::Status;
namespace {
void Register(const std::string& op_name, OpRegistry* registry) {
registry->Register([op_name](OpRegistrationData* op_reg_data) -> Status {
op_reg_data->op_def.set_name(op_name);
return absl::OkStatus();
});
}
}
TEST(TfTextUtilsTest, TestTfTextRegistered) {
std::unique_ptr<OpRegistry> registry(new OpRegistry);
Register("WhitespaceTokenizeWithOffsets", registry.get());
EXPECT_TRUE(IsTFTextRegistered(registry.get()));
}
TEST(TfTextUtilsTest, TestTfTextNotRegistered) {
std::unique_ptr<OpRegistry> registry(new OpRegistry);
Register("Test", registry.get());
EXPECT_FALSE(IsTFTextRegistered(registry.get()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/tftext_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/tftext_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3501a4c3-1526-4991-a582-faa2a95b8622 | cpp | tensorflow/tensorflow | perception_ops_utils | tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc | tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc | #include "tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h"
#include <string>
#include "flatbuffers/base.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
namespace mlir {
namespace TFL {
namespace {
constexpr char kTFImplements[] = "tf._implements";
constexpr char kMaxUnpooling[] = "MaxUnpooling2D";
constexpr char kImageWarping[] = "DenseImageWarp";
inline ConstBytesAttr CustomOption(OpBuilder* builder,
const std::string& content) {
return ConstBytesAttr::get(builder->getContext(),
StringRef(content.data(), content.size()));
}
inline LogicalResult HasIntegerArrayWithSize(func::FuncOp* func,
const DictionaryAttr& attrs,
const std::string& attr_name,
int N) {
ArrayAttr array_attr =
mlir::dyn_cast_or_null<ArrayAttr>(attrs.get(attr_name));
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitWarning()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " must be set and has size of " << N;
}
for (Attribute integer_attr : array_attr.getValue()) {
IntegerAttr value = mlir::dyn_cast<IntegerAttr>(integer_attr);
if (!value) {
return func->emitWarning()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " does not contain integer values";
}
}
return success();
}
inline LogicalResult GetIntegerArraySafe(
func::FuncOp* func, const DictionaryAttr& attrs,
const std::string& attr_name, llvm::SmallVectorImpl<int32_t>* results,
int N) {
ArrayAttr array_attr =
mlir::dyn_cast_or_null<ArrayAttr>(attrs.get(attr_name));
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitError()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " must be set and has size of " << N;
}
results->reserve(N);
for (Attribute integer_attr : array_attr.getValue()) {
IntegerAttr value = mlir::dyn_cast<IntegerAttr>(integer_attr);
if (!value) {
return func->emitError()
<< "'" << attr_name << "' attribute for " << kMaxUnpooling
<< " does not contain integer values";
}
results->push_back(value.getInt());
}
return success();
}
}
LogicalResult ConvertMaxUnpoolingFunc::RewriteFunc() {
func_.eraseBody();
func_.addEntryBlock();
func_->setAttr(kTFImplements,
StringAttr::get(func_.getContext(), kMaxUnpooling));
OpBuilder builder(func_.getBody());
std::string custom_option_buffer;
if (failed(CreateCustomOptions(custom_option_buffer))) {
return failure();
}
auto op = builder.create<CustomOp>(
func_.getLoc(), func_.getFunctionType().getResults(),
func_.getArguments(), kMaxUnpooling,
CustomOption(&builder, custom_option_buffer));
builder.create<func::ReturnOp>(func_.getLoc(), op.getResults());
return success();
}
LogicalResult ConvertMaxUnpoolingFunc::VerifySignature() {
if (func_.getNumArguments() != 2) {
return func_.emitWarning()
<< "Invalid number of arguments to " << kMaxUnpooling << ": "
<< func_.getNumArguments();
}
if (func_.getFunctionType().getNumResults() != 1) {
return func_.emitWarning()
<< "Invalid number of results from " << kMaxUnpooling << ": "
<< func_.getFunctionType().getNumResults();
}
auto attrs = attr_.getAttrs();
if (failed(HasIntegerArrayWithSize(&func_, attrs, "pool_size", 2))) {
return failure();
}
if (failed(HasIntegerArrayWithSize(&func_, attrs, "strides", 2))) {
return failure();
}
auto padding = mlir::dyn_cast_or_null<StringAttr>(attrs.get("padding"));
if (!padding) {
return func_.emitWarning() << "'padding' attribute for " << kMaxUnpooling
<< " is not set or not a string";
}
if (padding.getValue() != "VALID" && padding.getValue() != "SAME") {
return func_.emitWarning()
<< "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'";
}
return success();
}
LogicalResult ConvertMaxUnpoolingFunc::CreateCustomOptions(
std::string& custom_option_buffer) {
auto attrs = attr_.getAttrs();
TfLitePoolParams pool_params;
llvm::SmallVector<int32_t, 2> pool_size;
if (failed(GetIntegerArraySafe(&func_, attrs, "pool_size", &pool_size, 2))) {
return failure();
}
pool_params.filter_height = pool_size[0];
pool_params.filter_width = pool_size[1];
llvm::SmallVector<int32_t, 2> strides;
if (failed(GetIntegerArraySafe(&func_, attrs, "strides", &strides, 2))) {
return failure();
}
pool_params.stride_height = strides[0];
pool_params.stride_width = strides[1];
auto padding = mlir::dyn_cast_or_null<StringAttr>(attrs.get("padding"));
if (!padding) {
return func_.emitError() << "'padding' attribute for " << kMaxUnpooling
<< " is not set or not a string";
}
if (padding.getValue() == "VALID") {
pool_params.padding = kTfLitePaddingValid;
} else if (padding.getValue() == "SAME") {
pool_params.padding = kTfLitePaddingSame;
} else {
return func_.emitError()
<< "Padding for " << kMaxUnpooling << " must be 'SAME' or 'VALID'";
}
pool_params.activation = kTfLiteActNone;
pool_params.computed.padding = TfLitePaddingValues{0, 0, 0, 0};
#if FLATBUFFERS_LITTLEENDIAN == 0
int32_t* p = reinterpret_cast<int32_t*>(&pool_params);
for (size_t i = 0; i < sizeof(TfLitePoolParams) / 4; i++, p++)
*p = flatbuffers::EndianSwap(*p);
#endif
custom_option_buffer.assign(reinterpret_cast<char*>(&pool_params),
sizeof(TfLitePoolParams));
return success();
}
LogicalResult ConvertDenseImageWarpFunc::RewriteFunc() {
func_.eraseBody();
func_.addEntryBlock();
func_->setAttr(kTFImplements,
StringAttr::get(func_.getContext(), kImageWarping));
OpBuilder builder(func_.getBody());
auto op = builder.create<CustomOp>(func_.getLoc(),
func_.getFunctionType().getResults(),
func_.getArguments(), kImageWarping,
CustomOption(&builder, ""));
builder.create<func::ReturnOp>(func_.getLoc(), op.getResults());
return success();
}
LogicalResult ConvertDenseImageWarpFunc::VerifySignature() {
if (func_.getNumArguments() != 2) {
return func_.emitWarning()
<< "Invalid number of arguments to " << kImageWarping << ": "
<< func_.getNumArguments();
}
if (func_.getFunctionType().getNumResults() != 1) {
return func_.emitWarning()
<< "Invalid number of results from " << kImageWarping << ": "
<< func_.getFunctionType().getNumResults();
}
auto image_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getInput(0));
if (!image_type || !image_type.getElementType().isF32() ||
image_type.getRank() != 4) {
return func_.emitWarning() << "Image should be a 4D float tensor";
}
auto flow_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getInput(1));
if (!flow_type || !flow_type.getElementType().isF32() ||
flow_type.getRank() != 4) {
return func_.emitWarning() << "Flow should be a 4D float tensor";
}
auto output_type = mlir::dyn_cast_or_null<RankedTensorType>(
func_.getFunctionType().getResult(0));
if (!output_type || !output_type.getElementType().isF32() ||
output_type.getRank() != 4) {
return func_.emitWarning() << "Output should be a 4D float tensor";
}
return success();
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h"
#include <memory>
#include <string>
#include <vector>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
namespace {
template <int NInput, int NOutput>
func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<mlir::Type, NInput>& input_types,
const SmallVector<mlir::Type, NOutput>& output_types) {
auto func_type = builder->getFunctionType(input_types, output_types);
auto func = func::FuncOp::create(
mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
func_type, {});
func.addEntryBlock();
mlir::StringAttr attr_value = builder->getStringAttr("MaxUnpooling2D");
func->setAttr("tf._implements", attr_value);
return func;
}
func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<int64_t, 4>& input_shape,
const SmallVector<int64_t, 4>& output_shape) {
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto indices_type = RankedTensorType::get(input_shape, builder->getI64Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
SmallVector<mlir::Type, 2> input_types{input_type, indices_type};
SmallVector<mlir::Type, 1> output_types{output_type};
return createMaxUnpoolingFunc<2, 1>(builder, input_types, output_types);
}
template <int N>
ArrayAttr createInt32Array(mlir::Builder* builder, mlir::MLIRContext* context,
const SmallVector<int32_t, N>& values) {
SmallVector<Attribute, N> ret;
for (int32_t value : values) {
ret.push_back(builder->getI32IntegerAttr(value));
}
return ArrayAttr::get(context, ret);
}
template <int N>
ArrayAttr createInt64Array(mlir::Builder* builder, mlir::MLIRContext* context,
const SmallVector<int64_t, N>& values) {
SmallVector<Attribute, N> ret;
for (int64_t value : values) {
ret.push_back(builder->getI64IntegerAttr(value));
}
return ArrayAttr::get(context, ret);
}
mlir::TF::FuncAttr createMaxUnpoolingAttr(mlir::MLIRContext* context,
const std::string& padding,
const ArrayAttr& pool_size,
const ArrayAttr& strides) {
SmallVector<::mlir::NamedAttribute, 3> fields;
auto padding_id = ::mlir::StringAttr::get(context, "padding");
fields.emplace_back(padding_id, StringAttr::get(context, padding));
auto pool_size_id = ::mlir::StringAttr::get(context, "pool_size");
fields.emplace_back(pool_size_id, pool_size);
auto strides_id = ::mlir::StringAttr::get(context, "strides");
fields.emplace_back(strides_id, strides);
DictionaryAttr dict = DictionaryAttr::get(context, fields);
return TF::FuncAttr::get(context, "MaxUnpooling2D", dict);
}
}
class PerceptionUtilsTest : public ::testing::Test {
protected:
PerceptionUtilsTest() {}
void SetUp() override {
context_ = std::make_unique<mlir::MLIRContext>();
context_->loadDialect<mlir::arith::ArithDialect, mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect, TensorFlowLiteDialect>();
builder_ = std::make_unique<mlir::Builder>(context_.get());
fused_max_unpooling_func_ =
createMaxUnpoolingFunc(builder_.get(), {2, 4, 4, 2}, {2, 2, 2, 2});
func_attr_ = createMaxUnpoolingAttr(
context_.get(), "SAME",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
}
void TearDown() override {
fused_max_unpooling_func_.erase();
builder_.reset();
}
func::FuncOp fused_max_unpooling_func_;
mlir::TF::FuncAttr func_attr_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
TEST_F(PerceptionUtilsTest, VerifySignatureValid) {
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr_);
EXPECT_FALSE(failed(convert.VerifySignature()));
}
TEST_F(PerceptionUtilsTest, VerifySignatureInvalid) {
auto input_type = RankedTensorType::get({1, 2, 2, 1}, builder_->getF32Type());
auto output_type =
RankedTensorType::get({1, 2, 1, 1}, builder_->getF32Type());
SmallVector<mlir::Type, 1> input_types{input_type};
SmallVector<mlir::Type, 1> output_types{output_type};
auto max_unpooling_func =
createMaxUnpoolingFunc<1, 1>(builder_.get(), input_types, output_types);
mlir::TFL::ConvertMaxUnpoolingFunc convert(max_unpooling_func, func_attr_);
EXPECT_TRUE(failed(convert.VerifySignature()));
max_unpooling_func->erase();
}
TEST_F(PerceptionUtilsTest, RewriteValid) {
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr_);
EXPECT_FALSE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongPadding) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "INVALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongFilter) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "VALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
TEST_F(PerceptionUtilsTest, RewriteWrongStrides) {
auto func_attr = createMaxUnpoolingAttr(
context_.get(), "VALID",
createInt32Array<2>(builder_.get(), context_.get(), {2, 2}),
createInt32Array<2>(builder_.get(), context_.get(), {2, 2, 0}));
mlir::TFL::ConvertMaxUnpoolingFunc convert(fused_max_unpooling_func_,
func_attr);
EXPECT_TRUE(failed(convert.RewriteFunc()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9ce76e47-c64f-4c1a-ba7e-8254ed622e78 | cpp | tensorflow/tensorflow | size_utils | tensorflow/compiler/mlir/lite/utils/size_utils.cc | tensorflow/compiler/mlir/lite/utils/size_utils_test.cc | #include "tensorflow/compiler/mlir/lite/utils/size_utils.h"
#include <cstdint>
#include "mlir/IR/BuiltinTypeInterfaces.h"
namespace mlir {
namespace TFL {
int32_t ConvertToTfliteSize(int64_t size) {
return mlir::ShapedType::isDynamic(size) ? -1 : static_cast<int32_t>(size);
}
}
} | #include "tensorflow/compiler/mlir/lite/utils/size_utils.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace TFL {
namespace {
TEST(SizeUtilTest, TestConvertsSize) {
ASSERT_EQ(ConvertToTfliteSize(1), 1);
ASSERT_EQ(ConvertToTfliteSize(-1), -1);
ASSERT_EQ(ConvertToTfliteSize(mlir::ShapedType::kDynamic), -1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/size_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/utils/size_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
58dea728-e29e-444e-823b-bcc5f8ff23b4 | cpp | tensorflow/tensorflow | custom_call | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.cc | third_party/xla/xla/service/gpu/custom_call_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.h"
#include <optional>
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace odml {
class ConvertCustomCallOp : public OpConversionPattern<mhlo::CustomCallOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::CustomCallOp mhlo_custom_call, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult ConvertCustomCallOp::matchAndRewrite(
mhlo::CustomCallOp mhlo_custom_call, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto call_target_name = mhlo_custom_call.getCallTargetName();
if (!call_target_name.starts_with("custom_call.")) {
return failure();
}
auto tfl_custom = rewriter.create<TFL::CustomOp>(
mhlo_custom_call.getLoc(), mhlo_custom_call.getResultTypes(),
mhlo_custom_call.getInputs());
tfl_custom.setCustomCodeAttr(rewriter.getStringAttr(call_target_name));
if (auto bc = mhlo_custom_call.getBackendConfig()) {
if (auto stringattr = mlir::dyn_cast_or_null<mlir::StringAttr>(*bc)) {
tfl_custom.setCustomOptionAttr(
TFL::ConstBytesAttr::get(rewriter.getContext(), stringattr));
}
} else {
tfl_custom.setCustomOptionAttr(
TFL::ConstBytesAttr::get(rewriter.getContext(), ""));
}
rewriter.replaceOp(mhlo_custom_call, tfl_custom);
return success();
}
class RemoveCustomCallWithShapeAssertion
: public OpRewritePattern<mhlo::CustomCallOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::CustomCallOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult RemoveCustomCallWithShapeAssertion::matchAndRewrite(
mhlo::CustomCallOp op, PatternRewriter& rewriter) const {
if (op.getCallTargetName() != "shape_assertion") {
return mlir::failure();
}
rewriter.eraseOp(op);
return success();
}
std::optional<bool> IsCustomCallLegal(mhlo::CustomCallOp op) {
auto call_target_name = op.getCallTargetName();
if (call_target_name.starts_with("custom_call.")) {
auto bc = op.getBackendConfig();
if (!bc || mlir::isa<mlir::StringAttr>(*bc)) {
return false;
}
}
return true;
}
void PopulateCustomCallPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<ConvertCustomCallOp>(ctx);
target.addDynamicallyLegalOp<mhlo::CustomCallOp>(IsCustomCallLegal);
}
void PopulateCustomCallPreparePatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<RemoveCustomCallWithShapeAssertion>(ctx);
}
}
} | #include <cstddef>
#include <cstdint>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <string_view>
#include <vector>
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cuda/include/cuda_runtime_api.h"
#include "third_party/gpus/cuda/include/driver_types.h"
#define PLATFORM "CUDA"
#elif TENSORFLOW_USE_ROCM
#include "rocm/include/hip/hip_runtime.h"
#define PLATFORM "ROCM"
#endif
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/ffi/execution_context.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/custom_call_status.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/scratch_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#define gpuSuccess cudaSuccess
#define gpuMemcpyAsync cudaMemcpyAsync
#define gpuMemcpyDeviceToDevice cudaMemcpyDeviceToDevice
#define gpuMemcpy cudaMemcpy
#define gpuMemcpyDeviceToHost cudaMemcpyDeviceToHost
#define gpuMemcpyHostToDevice cudaMemcpyHostToDevice
#elif TENSORFLOW_USE_ROCM
#define gpuSuccess hipSuccess
#define gpuMemcpyAsync hipMemcpyAsync
#define gpuMemcpyDeviceToDevice hipMemcpyDeviceToDevice
#define gpuMemcpy hipMemcpy
#define gpuMemcpyDeviceToHost hipMemcpyDeviceToHost
#define gpuMemcpyHostToDevice hipMemcpyHostToDevice
#endif
namespace xla {
struct Range {
int64_t lo;
int64_t hi;
};
}
XLA_FFI_REGISTER_STRUCT_ATTR_DECODING(::xla::Range, StructMember<int64_t>("lo"),
StructMember<int64_t>("hi"));
namespace xla {
namespace {
class CustomCallTest : public ClientLibraryTestBase {};
bool is_invoked_called = false;
void Callback_IsInvoked(se::gpu::GpuStreamHandle , void** ,
const char* , size_t ) {
is_invoked_called = true;
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_IsInvoked, PLATFORM);
TEST_F(CustomCallTest, IsInvoked) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_IsInvoked", {},
ShapeUtil::MakeShape(F32, {}),
"");
EXPECT_FALSE(is_invoked_called);
TF_ASSERT_OK(Execute(&b, {}).status());
EXPECT_TRUE(is_invoked_called);
}
TEST_F(CustomCallTest, UnknownTarget) {
XlaBuilder b(TestName());
CustomCall(&b, "UnknownTarget", {},
ShapeUtil::MakeShape(F32, {}),
"");
ASSERT_FALSE(Execute(&b, {}).ok());
}
void Callback_Memcpy(se::gpu::GpuStreamHandle stream, void** buffers,
const char* , size_t ) {
void* src = buffers[0];
void* dst = buffers[1];
auto err = gpuMemcpyAsync(dst, src, sizeof(float) * 128,
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Memcpy, PLATFORM);
TEST_F(CustomCallTest, Memcpy) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_Memcpy",
{Broadcast(ConstantR0WithType(&b, F32, 42.0), {128})},
ShapeUtil::MakeShape(F32, {128}), "");
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>(), ::testing::Each(42));
}
std::string& kExpectedOpaque = *new std::string("abc\0def", 7);
void Callback_Opaque(se::gpu::GpuStreamHandle , void** ,
const char* opaque, size_t opaque_len) {
std::string opaque_str(opaque, opaque_len);
ASSERT_EQ(opaque_str, kExpectedOpaque);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Opaque, PLATFORM);
TEST_F(CustomCallTest, Opaque) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_Opaque", {},
ShapeUtil::MakeShape(F32, {}), kExpectedOpaque);
TF_ASSERT_OK(Execute(&b, {}).status());
}
void Callback_SubBuffers(se::gpu::GpuStreamHandle stream, void** buffers,
const char* , size_t ) {
auto err = gpuMemcpyAsync(buffers[4], buffers[3], 8 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
err = gpuMemcpyAsync(buffers[5], buffers[0], 128 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
err = gpuMemcpyAsync(buffers[6], buffers[1], 256 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
err = gpuMemcpyAsync(buffers[7], buffers[2], 1024 * sizeof(float),
gpuMemcpyDeviceToDevice, stream);
ASSERT_EQ(err, gpuSuccess);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_SubBuffers, PLATFORM);
TEST_F(CustomCallTest, SubBuffers) {
XlaBuilder b(TestName());
CustomCall(&b, "Callback_SubBuffers",
{
Tuple(&b,
{
Broadcast(ConstantR0WithType(&b, F32, 1), {128}),
Broadcast(ConstantR0WithType(&b, F32, 2), {256}),
}),
Tuple(&b,
{
Broadcast(ConstantR0WithType(&b, F32, 3), {1024}),
Broadcast(ConstantR0WithType(&b, F32, 4), {8}),
}),
},
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {8}),
ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {128}),
ShapeUtil::MakeShape(F32, {256}),
}),
ShapeUtil::MakeShape(F32, {1024}),
}),
"");
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>({0}), ::testing::Each(4));
EXPECT_THAT(result.data<float>({1, 0}), ::testing::Each(1));
EXPECT_THAT(result.data<float>({1, 1}), ::testing::Each(2));
EXPECT_THAT(result.data<float>({2}), ::testing::Each(3));
}
struct TokenTestCase {
std::string input;
std::string output;
std::string opaque;
};
std::ostream& operator<<(std::ostream& s, const TokenTestCase& tc) {
s << tc.input << "x" << tc.output << "x" << tc.opaque;
return s;
}
void Callback_Tokens(se::gpu::GpuStreamHandle stream, void** buffers,
const char* opaque, size_t opaque_len) {
for (int i = 0; i < opaque_len; ++i) {
char c = opaque[i];
ASSERT_TRUE(c == 'A' || c == 'T');
if (c == 'A') {
ASSERT_NE(buffers[i], nullptr);
} else {
ASSERT_EQ(buffers[i], nullptr);
}
}
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Tokens, PLATFORM);
std::vector<TokenTestCase> GetTokenTestCases() {
return {{"{AT}{AT}", "{A{AT}A}", "ATATAATA"},
{"{A}", "T", "AT"},
{"{{T}}", "A", "TA"},
{"AA", "{TA}", "AATA"},
{"TA{TA{TA}}", "{AA}", "TATATAAA"}};
}
class CustomCallTokensTest
: public ::testing::WithParamInterface<TokenTestCase>,
public ClientLibraryTestBase {
public:
static std::vector<XlaOp> BuildInputs(XlaBuilder& b,
std::istringstream& str) {
std::vector<XlaOp> values;
while (!str.eof()) {
int ch = str.get();
if (ch == 'A') {
values.push_back(Broadcast(ConstantR0WithType(&b, F32, 1), {128}));
} else if (ch == 'T') {
values.push_back(CreateToken(&b));
} else if (ch == '{') {
std::vector<XlaOp> tuple_elements = BuildInputs(b, str);
values.push_back(Tuple(&b, tuple_elements));
} else if (ch == '}') {
break;
}
}
return values;
}
static std::vector<Shape> BuildOutputType(std::istringstream& str) {
std::vector<Shape> shapes;
while (!str.eof()) {
int ch = str.get();
if (ch == 'A') {
shapes.push_back(ShapeUtil::MakeShape(F32, {8}));
} else if (ch == 'T') {
shapes.push_back(ShapeUtil::MakeTokenShape());
} else if (ch == '{') {
std::vector<Shape> tuple_elements = BuildOutputType(str);
shapes.push_back(ShapeUtil::MakeTupleShape(tuple_elements));
} else if (ch == '}') {
break;
}
}
return shapes;
}
};
TEST_P(CustomCallTokensTest, TokensTest) {
const TokenTestCase& tc = GetParam();
XlaBuilder b("CustomCallTokens");
std::istringstream input(tc.input);
std::istringstream output(tc.output);
std::vector<XlaOp> call_inputs = BuildInputs(b, input);
std::vector<Shape> call_output = BuildOutputType(output);
ASSERT_EQ(call_output.size(), 1);
CustomCall(&b, "Callback_Tokens", call_inputs, call_output.front(),
tc.opaque);
TF_ASSERT_OK(Execute(&b, {}).status());
}
INSTANTIATE_TEST_CASE_P(CustomCallTokens, CustomCallTokensTest,
::testing::ValuesIn(GetTokenTestCases()));
void Callback_WithStatusSucceeded(se::gpu::GpuStreamHandle ,
void** , const char* ,
size_t ,
XlaCustomCallStatus* status) {
XlaCustomCallStatusSetSuccess(status);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_WithStatusSucceeded, PLATFORM);
TEST_F(CustomCallTest, WithStatusSucceeded) {
XlaBuilder b(TestName());
CustomCall(
&b, "Callback_WithStatusSucceeded", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_STATUS_RETURNING);
TF_ASSERT_OK(Execute(&b, {}).status());
}
void Callback_WithStatusFailed(se::gpu::GpuStreamHandle ,
void** , const char* ,
size_t ,
XlaCustomCallStatus* status) {
XlaCustomCallStatusSetFailure(status, "Failed", 6);
}
XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_WithStatusFailed, PLATFORM);
TEST_F(CustomCallTest, WithStatusFailed) {
XlaBuilder b(TestName());
CustomCall(
&b, "Callback_WithStatusFailed", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_STATUS_RETURNING);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("Failed"));
}
static absl::Status AlwaysFail(ffi::Result<ffi::AnyBuffer>, int32_t value) {
return absl::InternalError(absl::StrCat("Uh oh, wrong value: ", value));
}
XLA_FFI_DEFINE_HANDLER(kAlwaysFail, AlwaysFail,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<int32_t>("value")
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$always_fail",
PLATFORM, kAlwaysFail);
TEST_F(CustomCallTest, RuntimeCustomCallAlwaysFail) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$always_fail", {},
ShapeUtil::MakeShape(F32, {}), "{value = 42 : i32}",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("Uh oh, wrong value: 42"));
}
static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src,
ffi::Result<ffi::AnyBuffer> dst) {
se::DeviceMemoryBase dst_mem = dst->device_memory();
se::DeviceMemoryBase src_mem = src.device_memory();
return stream->MemcpyD2D(&dst_mem, src_mem, src_mem.size());
}
XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", PLATFORM,
kMemcpy);
TEST_F(CustomCallTest, ExportedFfiMemcpy) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$memcpy",
{Broadcast(ConstantR0WithType(&b, F32, 42.0), {128})},
ShapeUtil::MakeShape(F32, {128}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>(), ::testing::Each(42));
}
static absl::Status HandleUserPointer(ffi::Result<ffi::AnyBuffer>,
const std::string* str) {
return absl::InternalError(*str);
}
XLA_FFI_DEFINE_HANDLER(kHandleUserPointer, HandleUserPointer,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<ffi::Pointer<std::string>>("message"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$user_data", PLATFORM,
kHandleUserPointer);
TEST_F(CustomCallTest, PassUserPointerWithAttrs) {
std::string message = "User-defined message";
auto ptr = reinterpret_cast<uintptr_t>(&message);
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$user_data", {},
ShapeUtil::MakeShape(F32, {}),
absl::StrFormat("{message = %d : i64}", ptr),
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kInternal);
EXPECT_THAT(status.message(), ::testing::HasSubstr("User-defined message"));
}
bool is_ffi_invoked = false;
static absl::Status IsInvoked(ffi::Result<ffi::AnyBuffer>) {
is_ffi_invoked = true;
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(
kIsInvoked, IsInvoked,
ffi::Ffi::Bind().Ret<ffi::AnyBuffer>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$isinvoked", PLATFORM,
kIsInvoked);
TEST_F(CustomCallTest, ExportedFfiIsInvoked) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$isinvoked", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_TRUE(is_ffi_invoked);
}
TEST_F(CustomCallTest, ExportedFfiUnknownTarget) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$unknown_target", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
auto status = Execute(&b, {}).status();
EXPECT_EQ(status.code(), absl::StatusCode::kUnimplemented);
EXPECT_THAT(status.message(),
::testing::HasSubstr("No registered implementation"));
}
static absl::Status Opaque(ffi::Result<ffi::AnyBuffer>,
const std::string* str) {
std::string opaque(*str);
if (opaque != kExpectedOpaque)
return absl::InternalError(absl::StrFormat(
"Opaque string does not match. Expected `%s` but got `%s`",
kExpectedOpaque, opaque));
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kOpaque, Opaque,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<ffi::Pointer<std::string>>("opaque"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$opaque", PLATFORM,
kOpaque);
TEST_F(CustomCallTest, ExportedFfiOpaque) {
XlaBuilder b(TestName());
const std::string opaque = absl::StrFormat(
"{opaque = %d : i64}", reinterpret_cast<uintptr_t>(&kExpectedOpaque));
CustomCall(&b, "__xla_test$$opaque", {},
ShapeUtil::MakeShape(F32, {}),
opaque,
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
static absl::Status CheckTokens(std::vector<PrimitiveType> args,
std::string_view pattern) {
if (args.size() != pattern.size()) {
return absl::InternalError("Incorrect number of arguments");
}
for (auto i = 0; i < pattern.size(); ++i) {
char c = pattern[i];
bool is_token = args[i] == PrimitiveType::TOKEN;
if (c == 'T') {
if (!is_token) {
return absl::InvalidArgumentError(
absl::StrFormat("Expected token at position %d", i));
}
} else if (c == 'A') {
if (is_token) {
return absl::InvalidArgumentError(
absl::StrFormat("Unexpected token at position %d", i));
}
} else {
return absl::InternalError(
absl::StrFormat("Unexpected character %c at position %d", c, i));
}
}
return absl::OkStatus();
}
static absl::Status FfiTokens(ffi::RemainingArgs inputs,
ffi::RemainingRets outputs,
std::string_view pattern) {
std::vector<PrimitiveType> types;
for (auto i = 0; i < inputs.size(); ++i) {
types.push_back(inputs.get<ffi::AnyBuffer>(i).value().element_type());
}
for (auto i = 0; i < outputs.size(); ++i) {
types.push_back(outputs.get<ffi::AnyBuffer>(i).value()->element_type());
}
return CheckTokens(types, pattern);
}
XLA_FFI_DEFINE_HANDLER(
kFfiTokens, FfiTokens,
ffi::Ffi::Bind().RemainingArgs().RemainingRets().Attr<std::string_view>(
"pattern"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$tokens", PLATFORM,
kFfiTokens);
TEST_P(CustomCallTokensTest, ExportedTokensTest) {
const TokenTestCase& tc = GetParam();
XlaBuilder b(TestName());
std::istringstream input(tc.input);
std::istringstream output(tc.output);
std::vector<XlaOp> call_inputs = BuildInputs(b, input);
std::vector<Shape> call_output = BuildOutputType(output);
ASSERT_GE(call_inputs.size(), 1);
ASSERT_LE(call_inputs.size(), 3);
ASSERT_EQ(call_output.size(), 1);
const std::string custom_call_name = "__xla_test$$tokens";
const std::string opaque = absl::StrFormat("{pattern = \"%s\"}", tc.opaque);
CustomCall(&b, custom_call_name, call_inputs,
call_output.front(),
opaque,
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
INSTANTIATE_TEST_SUITE_P(CustomCallTokensTest, CustomCallTokensTest,
::testing::ValuesIn(GetTokenTestCases()));
static absl::Status AlwaysSucceed(ffi::Result<ffi::AnyBuffer>) {
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kAlwaysSucceed, AlwaysSucceed,
ffi::Ffi::Bind().Ret<ffi::AnyBuffer>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$always_succeed",
PLATFORM, kAlwaysSucceed);
TEST_F(CustomCallTest, ExportedFfiWithStatusSucceeded) {
XlaBuilder b(TestName());
CustomCall(&b, "__xla_test$$always_succeed", {},
ShapeUtil::MakeShape(F32, {}), "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
static absl::Status FfiAttributes(ffi::Result<ffi::AnyBuffer>,
absl::Span<const int32_t> i32_arr,
Range range) {
if (i32_arr.size() != 4)
return absl::InternalError("i32_arr size does not match");
if (i32_arr[0] != 1 || i32_arr[1] != 2 || i32_arr[2] != 3 || i32_arr[3] != 4)
return absl::InternalError("i32_arr values do not match");
if (range.lo != 0 || range.hi != 42) {
return absl::InternalError("range values do not match");
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kFfiAttributes, FfiAttributes,
ffi::Ffi::Bind()
.Ret<ffi::AnyBuffer>()
.Attr<absl::Span<const int32_t>>("i32_arr")
.Attr<Range>("range"));
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "xla.gpu.ffi_attributes",
PLATFORM, kFfiAttributes);
TEST_F(CustomCallTest, FfiAttributes) {
XlaBuilder b(TestName());
CustomCall(&b, "xla.gpu.ffi_attributes", {},
ShapeUtil::MakeShape(F32, {}),
"{ i32_arr = array<i32: 1, 2, 3, 4>,"
" range = { lo = 0 : i64, hi = 42 : i64 } }",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
static absl::Status MemcpyWithCalledComputation(
se::Stream* stream, int32_t device_ordinal,
se::DeviceMemoryAllocator* allocator,
se::OwningScratchAllocator<> scratch_allocator, ffi::AnyBuffer src,
ffi::Result<ffi::AnyBuffer> dst, const HloComputation* called_computation) {
if (called_computation == nullptr)
return absl::InternalError("Called computation is not defined");
if (called_computation->instruction_count() != 1)
return absl::InternalError("Unexpected number of instructions");
if (!DynCast<HloParameterInstruction>(called_computation->root_instruction()))
return absl::InternalError("ROOT must be a paremeter");
auto scratch = scratch_allocator.AllocateBytes(1024);
if (!scratch.ok()) return scratch.status();
return Memcpy(stream, src, dst);
}
XLA_FFI_DEFINE_HANDLER(kMemcpyWithCalledComputation,
MemcpyWithCalledComputation,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Ctx<ffi::DeviceOrdinal>()
.Ctx<ffi::Allocator>()
.Ctx<ffi::ScratchAllocator>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::CalledComputation>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(),
"xla.gpu.ext.memcpy_with_called_computation", PLATFORM,
kMemcpyWithCalledComputation);
TEST_F(CustomCallTest, WithCalledComputation) {
auto shape = ShapeUtil::MakeShape(F32, {128});
XlaBuilder copy("copy");
auto p0 = Parameter(©, 0, shape, "l_val");
Copy(p0);
auto copy_computation = copy.Build().value();
XlaBuilder b(TestName());
CustomCallWithComputation(
&b, "xla.gpu.ext.memcpy_with_called_computation",
{Broadcast(ConstantR0WithType(&b, F32, 42.0), {128})},
copy_computation, shape, "",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK_AND_ASSIGN(auto result, ExecuteAndTransfer(&b, {}));
EXPECT_THAT(result.data<float>(), ::testing::Each(42));
}
struct SomeExtraContext {
explicit SomeExtraContext(int32_t value) : value(value) {}
int32_t value;
bool prepared = false;
bool initialized = false;
bool executed = false;
};
template <ffi::ExecutionStage stage>
static absl::Status ExecutionContext(ffi::Result<ffi::AnyBuffer>,
SomeExtraContext* ctx) {
if (ctx->value != 42) return absl::InternalError("Unexpected value");
if constexpr (stage == ffi::ExecutionStage::kPrepare) {
ctx->prepared = true;
} else if constexpr (stage == ffi::ExecutionStage::kInitialize) {
ctx->initialized = true;
} else if constexpr (stage == ffi::ExecutionStage::kExecute) {
ctx->executed = true;
} else {
return absl::InternalError("Unexpected stage");
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kExecutionContextPrepare,
ExecutionContext<ffi::ExecutionStage::kPrepare>,
ffi::Ffi::Bind<ffi::ExecutionStage::kPrepare>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::UserData<SomeExtraContext>>());
XLA_FFI_DEFINE_HANDLER(kExecutionContextInitialize,
ExecutionContext<ffi::ExecutionStage::kInitialize>,
ffi::Ffi::Bind<ffi::ExecutionStage::kInitialize>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::UserData<SomeExtraContext>>());
XLA_FFI_DEFINE_HANDLER(kExecutionContextExecute,
ExecutionContext<ffi::ExecutionStage::kExecute>,
ffi::Ffi::Bind<ffi::ExecutionStage::kExecute>()
.Ret<ffi::AnyBuffer>()
.Ctx<ffi::UserData<SomeExtraContext>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "xla.gpu.ffi_execution_context",
PLATFORM,
{
nullptr,
kExecutionContextPrepare,
kExecutionContextInitialize,
kExecutionContextExecute,
});
TEST_F(CustomCallTest, FfiExecutionContext) {
XlaBuilder b(TestName());
CustomCall(&b, "xla.gpu.ffi_execution_context", {},
ShapeUtil::MakeShape(F32, {}),
"",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
ffi::ExecutionContext execution_context;
TF_ASSERT_OK(execution_context.Emplace<SomeExtraContext>(42));
ffi::internal::ScopedExecutionContext scoped_execution_context(
&execution_context);
TF_ASSERT_OK(Execute(&b, {}).status());
TF_ASSERT_OK_AND_ASSIGN(auto* user_context,
execution_context.Lookup<SomeExtraContext>());
EXPECT_TRUE(user_context->prepared);
EXPECT_TRUE(user_context->initialized);
EXPECT_TRUE(user_context->executed);
}
struct SomeState {
explicit SomeState(int32_t value) : value(value) {}
int32_t value = 0;
};
static absl::StatusOr<std::unique_ptr<SomeState>> InstantiateState() {
return std::make_unique<SomeState>(42);
}
static absl::Status GetState(ffi::Result<ffi::AnyBuffer>, SomeState* state) {
if (state->value != 42) {
return absl::InternalError("Unexpected value");
}
return absl::OkStatus();
}
XLA_FFI_DEFINE_HANDLER(kInstantiateState, InstantiateState,
ffi::Ffi::BindInstantiate());
XLA_FFI_DEFINE_HANDLER(
kGetState, GetState,
ffi::Ffi::Bind().Ret<ffi::AnyBuffer>().Ctx<ffi::State<SomeState>>());
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "xla.gpu.ffi_execution_state",
PLATFORM,
{
kInstantiateState,
nullptr,
nullptr,
kGetState,
});
TEST_F(CustomCallTest, FfiExecutionState) {
XlaBuilder b(TestName());
CustomCall(&b, "xla.gpu.ffi_execution_state", {},
ShapeUtil::MakeShape(F32, {}),
"",
false,
{}, nullptr,
CustomCallSchedule::SCHEDULE_NONE,
CustomCallApiVersion::API_VERSION_TYPED_FFI);
TF_ASSERT_OK(Execute(&b, {}).status());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/custom_call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/custom_call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf851bbb-4aa8-47fa-9034-b88394650fd3 | cpp | tensorflow/tensorflow | reduce_window | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window.cc | tensorflow/lite/kernels/reduce_window_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/op_util_common.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window_util.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
using TFLPoolAttrsT = std::tuple<IntegerAttr, IntegerAttr, IntegerAttr,
IntegerAttr, StringAttr, StringAttr>;
bool AreDilationsSupported(const ReduceWindowView& op) {
auto is_one = [](int64_t v) { return v == 1; };
return llvm::all_of(op.BaseDilations(), is_one) &&
llvm::all_of(op.WindowDilations(), is_one);
}
bool IsRankSupported(const ReduceWindowView& op) { return op.Rank() == 4; }
std::optional<std::tuple<ReduceWindowView, Layout>> GetViewIfAttrsSupported(
mhlo::ReduceWindowOp op) {
const ReduceWindowView view(op);
if (!IsRankSupported(view)) {
return std::nullopt;
}
if (!AreDilationsSupported(view)) {
return std::nullopt;
}
auto opt_layout = view.GuessLayout();
if (!opt_layout.has_value()) {
return std::nullopt;
}
auto layout = opt_layout.value();
const int64_t batch = layout.SpecialDim1();
if (!view.Paddings()[batch].Trivial()) {
return std::nullopt;
}
const int64_t chan = layout.SpecialDim2();
if (!view.Paddings()[chan].Trivial()) {
return std::nullopt;
}
return std::tuple(view, layout);
}
std::optional<bool> IsReduceWindowLegal(mhlo::ReduceWindowOp op) {
return std::nullopt;
}
std::optional<bool> IsDivideLegal(mhlo::DivOp op) { return std::nullopt; }
Layout TFLNativePoolingLayout(int64_t rank) {
return Layout(0, rank - 1, llvm::to_vector(llvm::seq<int64_t>(1, rank - 1)));
}
bool IsCstFloatZero(Value val) {
DenseFPElementsAttr initial_value;
return matchPattern(val, m_Constant(&initial_value)) &&
initial_value.getNumElements() == 1 &&
initial_value.getValues<APFloat>()[0].isZero();
}
bool IsCstIntZero(Value val) {
DenseIntElementsAttr initial_value;
return matchPattern(val, m_Constant(&initial_value)) &&
initial_value.getNumElements() == 1 &&
initial_value.getValues<APInt>()[0].isZero();
}
llvm::SmallVector<int64_t> Permute(llvm::ArrayRef<int64_t> data,
llvm::ArrayRef<int64_t> perm) {
llvm::SmallVector<int64_t> res(data.size());
for (int i = 0; i < data.size(); ++i) {
res[i] = data[perm[i]];
}
return res;
}
Value TransposeTensor(OpBuilder& b, Value tensor,
llvm::SmallVector<int64_t> perm) {
const int64_t perm_size = perm.size();
auto perm_attr_type = RankedTensorType::get({perm_size}, b.getI64Type());
auto perm_attr = DenseIntElementsAttr::get(perm_attr_type, perm);
return b.create<mhlo::TransposeOp>(tensor.getLoc(), tensor, perm_attr);
}
DenseIntElementsAttr BuildDenseI64(OpBuilder& b, ArrayRef<int64_t> shape,
ArrayRef<int64_t> data) {
return DenseIntElementsAttr::get(RankedTensorType::get(shape, b.getI64Type()),
data);
}
DenseIntElementsAttr BuildDenseI64(OpBuilder& b, ArrayRef<int64_t> data) {
const int64_t dim = data.size();
return BuildDenseI64(b, {dim}, data);
}
std::optional<std::tuple<Value, Value>> GetInputAndInitIfValid(
mhlo::ReduceWindowOp op) {
if (op->getNumResults() != 1) {
return std::nullopt;
}
if (op.getInputs().size() > 1) {
return std::nullopt;
}
if (op.getInitValues().size() > 1) {
return std::nullopt;
}
auto init_val = op.getInitValues().front();
if (llvm::dyn_cast<ShapedType>(init_val.getType()).getNumElements() != 1) {
return std::nullopt;
}
return std::tuple(op.getInputs().front(), op.getInitValues().front());
}
std::optional<std::string> GetTFLPadding(ArrayRef<DimPadding> paddings,
ArrayRef<int64_t> window_strides,
ArrayRef<int64_t> in_shape,
ArrayRef<int64_t> window_dims) {
const int64_t rank = paddings.size();
std::string tfl_padding = "VALID";
for (int i = 1; i < rank - 1; ++i) {
const auto& dim_pad = paddings[i];
if (dim_pad.Trivial()) {
continue;
}
if (!IsSamePaddingOnDim(in_shape[i], 1, window_strides[i], window_dims[i],
dim_pad)) {
return std::nullopt;
}
tfl_padding = "SAME";
}
return tfl_padding;
}
TFLPoolAttrsT BuildTFLPoolAttrs(OpBuilder& b, const ReduceWindowView& view,
StringRef padding) {
const int32_t filter_h = view.WindowDims()[1];
auto filter_h_attr = b.getI32IntegerAttr(filter_h);
const int32_t filter_w = view.WindowDims()[2];
auto filter_w_attr = b.getI32IntegerAttr(filter_w);
const int32_t stride_h = view.WindowStrides()[1];
auto stride_h_attr = b.getI32IntegerAttr(stride_h);
const int32_t stride_w = view.WindowStrides()[2];
auto stride_w_attr = b.getI32IntegerAttr(stride_w);
auto padding_attr = b.getStringAttr(padding);
auto faf_attr = b.getStringAttr("NONE");
return std::tuple(filter_h_attr, filter_w_attr, stride_h_attr, stride_w_attr,
padding_attr, faf_attr);
}
class RelayoutReduceWindow : public OpRewritePattern<mhlo::ReduceWindowOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(mhlo::ReduceWindowOp op,
PatternRewriter& rewriter) const final;
};
LogicalResult RelayoutReduceWindow::matchAndRewrite(
mhlo::ReduceWindowOp op, PatternRewriter& rewriter) const {
auto opt_view = GetViewIfAttrsSupported(op);
if (!opt_view.has_value()) {
return rewriter.notifyMatchFailure(
op, "Reduce window attributes not supported.");
}
const auto [view, layout] = opt_view.value();
auto opt_input_and_init = GetInputAndInitIfValid(op);
if (!opt_input_and_init.has_value()) {
return rewriter.notifyMatchFailure(
op, "Reduce window has wrong number of inputs or init values.");
}
auto [input, init_val] = opt_input_and_init.value();
const auto target_layout = TFLNativePoolingLayout(view.Rank());
if (layout == target_layout) {
return rewriter.notifyMatchFailure(
op, "Reduce window does not need layout change");
}
llvm::SmallVector<int64_t> perm_for_inputs =
layout.GetPermForReLayout(target_layout);
auto paddings = view.Paddings();
llvm::SmallVector<int64_t> new_paddings(paddings.size() * 2);
for (int i = 0; i < new_paddings.size() / 2; ++i) {
const auto& dim_pad = paddings[perm_for_inputs[i]];
new_paddings[2 * i] = dim_pad.Lo();
new_paddings[2 * i + 1] = dim_pad.Hi();
}
const int64_t new_paddings_size = paddings.size();
auto new_paddings_type =
RankedTensorType::get({new_paddings_size, 2}, rewriter.getI64Type());
auto new_paddings_attr =
DenseIntElementsAttr::get(new_paddings_type, new_paddings);
llvm::SmallVector<int64_t> new_window_dims =
Permute(view.WindowDims(), perm_for_inputs);
auto new_window_dims_attr = BuildDenseI64(rewriter, new_window_dims);
llvm::SmallVector<int64_t> new_window_strides =
Permute(view.WindowStrides(), perm_for_inputs);
auto new_window_strides_attr = BuildDenseI64(rewriter, new_window_strides);
llvm::SmallVector<int64_t> perm_for_outputs =
target_layout.GetPermForReLayout(layout);
auto cur_out_type = llvm::dyn_cast<ShapedType>(op.getResult(0).getType());
llvm::SmallVector<int64_t> new_rw_out_shape =
layout.PermuteShape(target_layout, cur_out_type.getShape());
auto new_out_type = cur_out_type.clone(new_rw_out_shape);
auto new_input = TransposeTensor(rewriter, input, perm_for_inputs);
auto new_rw = rewriter.create<mhlo::ReduceWindowOp>(
op.getLoc(), new_out_type, new_input, init_val, new_window_dims_attr,
new_window_strides_attr, BuildDenseI64(rewriter, view.BaseDilations()),
BuildDenseI64(rewriter, view.WindowDilations()), new_paddings_attr);
IRMapping ir_map;
op.getBody().cloneInto(&new_rw.getBody(), ir_map);
auto new_output =
TransposeTensor(rewriter, new_rw.getResult(0), perm_for_outputs);
rewriter.replaceOp(op, new_output);
return success();
}
class LegalizeCumSum : public OpConversionPattern<mhlo::ReduceWindowOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeCumSum::matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto opt_input_init = GetInputAndInitIfValid(op);
if (!opt_input_init.has_value()) {
return rewriter.notifyMatchFailure(op,
"Must have 1 input, init and result.");
}
auto [input, init] = opt_input_init.value();
if (failed(MatchBinaryReduceFunction<mhlo::AddOp>(op.getBody()))) {
return rewriter.notifyMatchFailure(op, "Requires scalar add in region.");
}
if (!IsCstFloatZero(init) && !IsCstIntZero(init)) {
return rewriter.notifyMatchFailure(op, "Requires 0 for init value.");
}
const ReduceWindowView view(op);
auto trivial = [](int64_t v) { return v == 1; };
const bool trivial_window_dilate =
llvm::all_of(view.WindowDilations(), trivial);
const bool trivial_base_dilate = llvm::all_of(view.BaseDilations(), trivial);
const bool trivial_stride = llvm::all_of(view.WindowStrides(), trivial);
if (!trivial_window_dilate || !trivial_stride || !trivial_base_dilate) {
return rewriter.notifyMatchFailure(
op, "Requires trivial strides and dilations attributes.");
}
auto input_type = llvm::cast<ShapedType>(input.getType());
if (view.WindowDims().size() != input_type.getRank()) {
return rewriter.notifyMatchFailure(op, "Splat window dims not supported.");
}
int64_t axis = -1;
for (auto [ind, val] : llvm::enumerate(view.WindowDims())) {
if (val == 1) {
continue;
}
if (axis != -1) {
return rewriter.notifyMatchFailure(op, "Multiple non 1 dimensions.");
}
if (val != input_type.getShape()[ind]) {
return rewriter.notifyMatchFailure(
op, "Axis dimension requires size be same as input shape's.");
}
axis = ind;
}
if (axis == -1) {
return rewriter.notifyMatchFailure(op, "Could not identify axis.");
}
const int64_t axis_size = input_type.getShape()[axis];
for (const auto& [ind, dim_pad] : llvm::enumerate(view.Paddings())) {
if (dim_pad.Hi() != 0) {
return rewriter.notifyMatchFailure(op, "Has non trivial high padding.");
}
if (ind != axis) {
if (!dim_pad.Trivial()) {
return rewriter.notifyMatchFailure(
op, "Has non trivial padding on non axis dim.");
}
} else {
if (dim_pad.Lo() != axis_size - 1) {
return rewriter.notifyMatchFailure(
op, "Requires low padding on axis dim to be N - 1.");
}
}
}
auto axis_cst_attr = DenseIntElementsAttr::get(
RankedTensorType::get({}, rewriter.getI32Type()),
static_cast<int32_t>(axis));
auto axis_cst =
rewriter.create<arith::ConstantOp>(op->getLoc(), axis_cst_attr);
auto tfl_exclusive_attr = rewriter.getBoolAttr(false);
auto tfl_reverse_attr = rewriter.getBoolAttr(false);
rewriter.replaceOpWithNewOp<TFL::CumsumOp>(op, op->getResultTypes()[0], input,
axis_cst, tfl_exclusive_attr,
tfl_reverse_attr);
return success();
}
bool isFloatMinusInfinity(Value value) {
DenseFPElementsAttr float_value;
if (!matchPattern(value, m_Constant(&float_value))) {
return false;
}
if (float_value.getNumElements() != 1) {
return false;
}
APFloat element = float_value.getValues<APFloat>()[0];
return element.isInfinity() && element.isNegative();
}
class LegalizeMaxPool : public OpConversionPattern<mhlo::ReduceWindowOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
private:
TFL::PadV2Op BuildExplicitPadOp(mhlo::ReduceWindowOp op, const Layout& layout,
const ShapedType& input_type,
const ShapedType& output_type, Value input,
Value init, const ReduceWindowView& view,
PatternRewriter& rewriter) const;
};
TFL::PadV2Op LegalizeMaxPool::BuildExplicitPadOp(
mhlo::ReduceWindowOp op, const Layout& layout, const ShapedType& input_type,
const ShapedType& output_type, Value input, Value init,
const ReduceWindowView& view, PatternRewriter& rewriter) const {
std::vector<int64_t> shape = {layout.Rank(), layout.NumSpatials()};
llvm::SmallVector<int64_t, 8> padding_values;
for (auto& padding : view.Paddings()) {
padding_values.push_back(padding.Lo());
padding_values.push_back(padding.Hi());
}
auto padding_dense_attr = mlir::DenseElementsAttr::get(
mlir::RankedTensorType::get(shape, rewriter.getIntegerType(64)),
llvm::ArrayRef<int64_t>(padding_values));
auto padding_values_op =
rewriter.create<arith::ConstantOp>(op.getLoc(), padding_dense_attr);
llvm::SmallVector<int64_t, 4> pad_output_shape_vector;
pad_output_shape_vector.push_back(input_type.getDimSize(0));
pad_output_shape_vector.push_back(input_type.getDimSize(1) +
view.Paddings()[1].Lo() +
view.Paddings()[1].Hi());
pad_output_shape_vector.push_back(input_type.getDimSize(2) +
view.Paddings()[2].Lo() +
view.Paddings()[2].Hi());
pad_output_shape_vector.push_back(input_type.getDimSize(3));
auto pad_output_type = mlir::RankedTensorType::get(
pad_output_shape_vector, output_type.getElementType());
return rewriter.create<TFL::PadV2Op>(op.getLoc(), pad_output_type, input,
padding_values_op, init);
}
LogicalResult LegalizeMaxPool::matchAndRewrite(
mhlo::ReduceWindowOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
const auto opt_view = GetViewIfAttrsSupported(op);
if (!opt_view.has_value()) {
return rewriter.notifyMatchFailure(op, "Reduce window is not valid.");
}
const auto [view, layout] = opt_view.value();
if (layout != TFLNativePoolingLayout(layout.Rank())) {
return rewriter.notifyMatchFailure(op, "Not tfl standard layout.");
}
if (failed(MatchBinaryReduceFunction<mhlo::MaxOp>(op.getBody()))) {
return rewriter.notifyMatchFailure(op, "Must be a max pool.");
}
auto type = mlir::dyn_cast<ShapedType>(op.getResult(0).getType());
if (!mlir::isa<FloatType>(type.getElementType())) {
return rewriter.notifyMatchFailure(op, "Not a floating point pool.");
}
auto opt_inputs_and_init = GetInputAndInitIfValid(op);
if (!opt_inputs_and_init.has_value()) {
return rewriter.notifyMatchFailure(op, "Too many inputs or inits.");
}
auto [input, init] = opt_inputs_and_init.value();
auto input_type = llvm::dyn_cast<ShapedType>(input.getType());
if (!isFloatMinusInfinity(init)) {
return rewriter.notifyMatchFailure(op, "Init not minus infinity.");
}
auto opt_tfl_padding =
GetTFLPadding(view.Paddings(), view.WindowStrides(),
input_type.getShape(), view.WindowDims());
Value max_pool_input;
std::string tfl_padding_attr;
if (opt_tfl_padding.has_value()) {
max_pool_input = input;
tfl_padding_attr = opt_tfl_padding.value();
} else {
max_pool_input = BuildExplicitPadOp(op, layout, input_type, type, input,
init, view, rewriter);
tfl_padding_attr = "VALID";
}
auto [fh, fw, sh, sw, p, faf] =
BuildTFLPoolAttrs(rewriter, view, tfl_padding_attr);
rewriter.replaceOpWithNewOp<TFL::MaxPool2DOp>(op, type, max_pool_input, p, sw,
sh, fw, fh, faf);
return success();
}
void ReplaceWithAvgPool(mhlo::DivOp op, Value rw_lhs_input,
const ReduceWindowView& lhs_view,
llvm::StringRef padding, PatternRewriter& rewriter,
mhlo::TransposeOp opt_final_tpose) {
Type out_type =
opt_final_tpose ? opt_final_tpose.getOperand().getType() : op.getType();
auto [fh, fw, sh, sw, p, faf] =
BuildTFLPoolAttrs(rewriter, lhs_view, padding);
Value final_op = rewriter.create<TFL::AveragePool2DOp>(
op->getLoc(), out_type, rw_lhs_input, fh, fw, p, sh, sw, faf);
if (opt_final_tpose) {
final_op = rewriter
.create<mhlo::TransposeOp>(final_op.getLoc(), final_op,
opt_final_tpose.getPermutation())
.getResult();
}
rewriter.replaceOp(op, final_op);
}
template <typename... Tys>
Value RecursivelyWalkUp(Value op) {
while (llvm::isa_and_nonnull<Tys...>(op.getDefiningOp())) {
Operation* producer = op.getDefiningOp();
op = producer->getOperand(0);
}
return op;
}
class LegalizeAvgPool : public OpConversionPattern<mhlo::DivOp> {
public:
using OpConversionPattern::OpConversionPattern;
explicit LegalizeAvgPool(MLIRContext* context)
: OpConversionPattern<mhlo::DivOp>(context, 10) {}
LogicalResult matchAndRewrite(
mhlo::DivOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeAvgPool::matchAndRewrite(
mhlo::DivOp div_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
auto div_lhs = div_op.getLhs();
mhlo::TransposeOp opt_final_tpose;
if (auto div_lhs_op = div_lhs.getDefiningOp()) {
opt_final_tpose = llvm::dyn_cast_or_null<mhlo::TransposeOp>(div_lhs_op);
}
auto rw_lhs_val = RecursivelyWalkUp<mhlo::TransposeOp>(div_lhs);
auto rw_lhs =
llvm::dyn_cast_or_null<mhlo::ReduceWindowOp>(rw_lhs_val.getDefiningOp());
if (!rw_lhs) {
return rewriter.notifyMatchFailure(
div_op, "Could not match lhs of div on reduce window.");
}
const auto opt_rw_lhs_view = GetViewIfAttrsSupported(rw_lhs);
if (!opt_rw_lhs_view.has_value()) {
return rewriter.notifyMatchFailure(div_op, "Lhs rw is not valid.");
}
const auto [rw_lhs_view, rw_lhs_layout] = opt_rw_lhs_view.value();
if (rw_lhs_layout != TFLNativePoolingLayout(rw_lhs_layout.Rank())) {
return rewriter.notifyMatchFailure(
div_op, "Lhs reduce window not tfl standard layout.");
}
if (failed(MatchBinaryReduceFunction<mhlo::AddOp>(rw_lhs.getBody()))) {
return rewriter.notifyMatchFailure(div_op,
"Failed to match rw lhs binary func.");
}
auto opt_rw_lhs_input_and_init = GetInputAndInitIfValid(rw_lhs);
if (!opt_rw_lhs_input_and_init.has_value()) {
return rewriter.notifyMatchFailure(
div_op, "Lhs reduce window has wrong number of inputs or init values.");
}
auto [rw_lhs_input, rw_lhs_init_val] = opt_rw_lhs_input_and_init.value();
auto rw_lhs_input_type = llvm::dyn_cast<ShapedType>(rw_lhs_input.getType());
auto rw_lhs_type =
mlir::dyn_cast<RankedTensorType>(rw_lhs.getResult(0).getType());
if (!mlir::isa<FloatType>(rw_lhs_type.getElementType())) {
return rewriter.notifyMatchFailure(div_op,
"Reduce window lhs most be float type.");
}
if (!IsCstFloatZero(rw_lhs_init_val)) {
return rewriter.notifyMatchFailure(
div_op, "Reduce window lhs init value is not zero.");
}
auto opt_tfl_padding =
GetTFLPadding(rw_lhs_view.Paddings(), rw_lhs_view.WindowStrides(),
rw_lhs_input_type.getShape(), rw_lhs_view.WindowDims());
if (!opt_tfl_padding.has_value()) {
return rewriter.notifyMatchFailure(div_op,
"Padding must be VALID or SAME.");
}
const auto& tfl_padding = opt_tfl_padding.value();
{
DenseFPElementsAttr divisor;
auto div_rhs = RecursivelyWalkUp<mhlo::BroadcastInDimOp, mhlo::TransposeOp>(
div_op.getRhs());
if (matchPattern(div_rhs, m_Constant(&divisor))) {
if (!divisor.isSplat()) {
return failure();
}
if (!divisor.getSplatValue<APFloat>().isExactlyValue(
rw_lhs_view.WindowSize())) {
return rewriter.notifyMatchFailure(
div_op, "Rhs splat const is not equal to window size.");
}
if (tfl_padding != "VALID") {
return rewriter.notifyMatchFailure(div_op,
"Matching on rhs splat const where "
"rw lhs has non-trivial padding.");
}
ReplaceWithAvgPool(div_op, rw_lhs_input, rw_lhs_view, tfl_padding,
rewriter, opt_final_tpose);
return success();
}
}
{
Value divisor = RecursivelyWalkUp<mhlo::BroadcastInDimOp, mhlo::ReshapeOp,
mhlo::TransposeOp>(div_op.getRhs());
auto rw_rhs =
dyn_cast_or_null<mhlo::ReduceWindowOp>(divisor.getDefiningOp());
if (!rw_rhs) {
return rewriter.notifyMatchFailure(
div_op, "Rhs of div op is not a reduce window.");
}
const auto opt_rw_rhs_view = GetViewIfAttrsSupported(rw_rhs);
if (!opt_rw_rhs_view.has_value()) {
return rewriter.notifyMatchFailure(div_op, "Rhs rw is not valid.");
}
const auto [rw_rhs_view, rw_rhs_layout] = opt_rw_rhs_view.value();
if (rw_rhs_layout != TFLNativePoolingLayout(rw_rhs_layout.Rank())) {
return rewriter.notifyMatchFailure(
div_op, "Rhs reduce window not tfl standard layout.");
}
if (failed(MatchBinaryReduceFunction<mhlo::AddOp>(rw_rhs.getBody()))) {
return rewriter.notifyMatchFailure(
div_op, "Rhs rw body function is not an add op.");
}
auto opt_rw_rhs_input_and_init = GetInputAndInitIfValid(rw_rhs);
if (!opt_rw_rhs_input_and_init.has_value()) {
return rewriter.notifyMatchFailure(
div_op,
"Rhs reduce window has wrong number of inputs or init values.");
}
auto [rw_rhs_input, rw_rhs_init_val] = opt_rw_rhs_input_and_init.value();
if (!IsCstFloatZero(rw_rhs_init_val)) {
return rewriter.notifyMatchFailure(div_op,
"Rhs rw init vals is not zero.");
}
rw_rhs_input = RecursivelyWalkUp<mhlo::BroadcastInDimOp, mhlo::TransposeOp>(
rw_rhs_input);
DenseFPElementsAttr rhs_input_data;
if (!matchPattern(rw_rhs_input, m_Constant(&rhs_input_data)) ||
!rhs_input_data.isSplat() ||
!rhs_input_data.getSplatValue<APFloat>().isExactlyValue(1.0)) {
return rewriter.notifyMatchFailure(div_op,
"Rw rhs input is not splat of 1.0.");
}
if (rw_lhs.getWindowDimensions() != rw_rhs.getWindowDimensions() ||
rw_lhs.getWindowStrides() != rw_rhs.getWindowStrides() ||
rw_lhs.getPadding() != rw_rhs.getPadding()) {
return rewriter.notifyMatchFailure(
div_op, "Lhs rw and Rhs rw do not have the same config.");
}
ReplaceWithAvgPool(div_op, rw_lhs_input, rw_lhs_view, tfl_padding, rewriter,
opt_final_tpose);
return success();
}
return failure();
}
}
void PopulateLegalizeReduceWindowPatterns(MLIRContext* ctx,
RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeAvgPool, LegalizeMaxPool, LegalizeCumSum>(ctx);
target.addDynamicallyLegalOp<mhlo::ReduceWindowOp>(IsReduceWindowLegal);
target.addDynamicallyLegalOp<mhlo::DivOp>(IsDivideLegal);
}
void PopulatePrepareReduceWindowPatterns(MLIRContext* ctx,
RewritePatternSet& patterns) {
patterns.add<RelayoutReduceWindow>(ctx);
}
} | #include <cstdint>
#include <functional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using testing::ElementsAre;
template <class T>
struct TensorTypeFor;
#define TENSOR_TYPE_ASSOC(CPP_TYPE, TENSORTYPE_VALUE) \
template <> \
struct TensorTypeFor<CPP_TYPE> { \
static constexpr TensorType value = TENSORTYPE_VALUE; \
};
TENSOR_TYPE_ASSOC(int8_t, TensorType_INT8);
TENSOR_TYPE_ASSOC(int16_t, TensorType_INT16);
TENSOR_TYPE_ASSOC(int32_t, TensorType_INT32);
TENSOR_TYPE_ASSOC(int64_t, TensorType_INT64);
TENSOR_TYPE_ASSOC(uint8_t, TensorType_UINT8);
TENSOR_TYPE_ASSOC(uint16_t, TensorType_UINT16);
TENSOR_TYPE_ASSOC(uint32_t, TensorType_UINT32);
TENSOR_TYPE_ASSOC(uint64_t, TensorType_UINT64);
TENSOR_TYPE_ASSOC(float, TensorType_FLOAT32);
static_assert(sizeof(float) == 4, "float type is expected to be 32 bit long");
TENSOR_TYPE_ASSOC(double, TensorType_FLOAT64);
static_assert(sizeof(double) == 8, "double type is expected to be 64 bit long");
template <class Container>
int32_t intsize(const Container& c) {
return static_cast<int32_t>(c.size());
}
template <class T>
class DilateOpModel : public SingleOpModel {
static constexpr TensorType kTensorType = TensorTypeFor<T>::value;
public:
void SetInput(absl::Span<const int32_t> shape,
absl::Span<const T> data = {}) {
input_shape_.assign(shape.begin(), shape.end());
if (data.empty()) {
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<int>()));
absl::c_iota(input_data_, 1);
} else {
input_data_.assign(data.begin(), data.end());
}
}
void SetWindowShape(absl::Span<const int64_t> shape) {
window_shape_data_.assign(shape.begin(), shape.end());
}
void SetWindowStrides(absl::Span<const int64_t> strides) {
window_strides_data_.assign(strides.begin(), strides.end());
}
void SetWindowDilations(absl::Span<const int64_t> dilations) {
window_dilations_data_.assign(dilations.begin(), dilations.end());
}
void SetInitValue(const T& val) { init_value_data_ = val; }
void Build() {
input_ = AddInput({kTensorType, input_shape_});
init_value_ = AddConstInput(kTensorType, {init_value_data_}, {1});
window_shape_ = AddConstInput(TensorType_INT64, window_shape_data_,
{intsize(window_shape_data_)});
window_strides_ = AddConstInput(TensorType_INT64, window_strides_data_,
{intsize(window_strides_data_)});
window_dilations_ = AddConstInput(TensorType_INT64, window_dilations_data_,
{intsize(window_dilations_data_)});
output_ = AddOutput(kTensorType);
SetBuiltinOp(
BuiltinOperator_REDUCE_WINDOW, BuiltinOptions2_ReduceWindowOptions,
CreateReduceWindowOptions(builder_, ReduceWindowFunction_ADD).Union());
BuildInterpreter({input_shape_});
PopulateTensor(input_, input_data_);
}
TfLiteStatus BuildAndInvoke() {
Build();
return Invoke();
}
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_),
GetTensorSize(output_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape = *(interpreter_->tensor(output_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
const std::vector<T>& GetInput() const { return input_data_; }
const std::vector<int32_t>& GetInputShape() const { return input_shape_; }
const std::vector<int64_t>& GetWindowShape() const {
return window_shape_data_;
}
const std::vector<int64_t>& GetWindowStrides() const {
return window_strides_data_;
}
const std::vector<int64_t>& GetWindowDilations() const {
return window_dilations_data_;
}
const T& GetInitValue() const { return init_value_data_; }
protected:
int input_ = -1;
int window_shape_ = -1;
int window_strides_ = -1;
int window_dilations_ = -1;
int init_value_ = -1;
int output_ = -1;
std::vector<T> input_data_;
T init_value_data_;
std::vector<int32_t> input_shape_;
std::vector<int64_t> window_shape_data_;
std::vector<int64_t> window_strides_data_;
std::vector<int64_t> window_dilations_data_;
};
template <class StorageType>
class ReduceWindowTest : public testing::Test {
protected:
DilateOpModel<StorageType> model_;
};
using TestList =
testing::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, float, double>;
TYPED_TEST_SUITE(ReduceWindowTest, TestList);
TYPED_TEST(ReduceWindowTest, FullWindow) {
auto& model = this->model_;
model.SetInput({3, 3});
model.SetWindowShape({3, 3});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(45));
}
TYPED_TEST(ReduceWindowTest, NoDilation) {
auto& model = this->model_;
model.SetInput({3, 3});
model.SetWindowShape({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(12, 16, 24, 28));
}
TYPED_TEST(ReduceWindowTest, FullWindowWithDilation) {
auto& model = this->model_;
model.SetInput({3, 3});
model.SetWindowShape({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(20));
}
TYPED_TEST(ReduceWindowTest, WithDilation) {
auto& model = this->model_;
model.SetInput({4, 4});
model.SetWindowShape({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(24, 28, 40, 44));
}
TYPED_TEST(ReduceWindowTest, WithStrides) {
auto& model = this->model_;
model.SetInput({4, 4});
model.SetWindowShape({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(14, 22, 46, 54));
}
TYPED_TEST(ReduceWindowTest, WithDilationAndStrides) {
auto& model = this->model_;
model.SetInput({5, 5});
model.SetWindowShape({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({2, 2});
model.SetInitValue(2);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(this->model_.GetOutputData(), ElementsAre(30, 38, 70, 78));
}
TYPED_TEST(ReduceWindowTest, OutputShapeRoundingIsCorrect) {
auto& model = this->model_;
model.SetInput({1, 64, 114, 114});
model.SetWindowShape({1, 1, 3, 3});
model.SetWindowStrides({1, 1, 2, 2});
model.SetWindowDilations({1, 1, 1, 1});
model.SetInitValue(2);
EXPECT_EQ(this->model_.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(this->model_.GetOutputShape(), ElementsAre(1, 64, 56, 56));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce_window.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/reduce_window_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4bc119a0-e934-485a-b15c-e6be4ebd330b | cpp | tensorflow/tensorflow | gelu | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gelu.cc | tensorflow/lite/delegates/xnnpack/gelu_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gelu.h"
#include <cmath>
#include <cstdlib>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
namespace mlir::odml {
constexpr float kOne = 1.0;
const float kOneOverRoot2 = kOne / std::sqrt(2);
constexpr float kHalf = kOne / 2.0;
constexpr float kTolerance = kOne / 1000.0;
Operation* GetUserIfOnlyOne(Operation* op) {
if (op->getNumResults() != 1) return nullptr;
auto result = op->getResult(0);
if (!result.hasOneUse()) return nullptr;
return (*result.getUses().begin()).getOwner();
}
Operation* GetInputOpWithOneUse(Operation* op, int opr_num) {
if (opr_num >= op->getNumOperands()) return nullptr;
auto opr = op->getOperand(opr_num);
if (llvm::isa<BlockArgument>(opr)) return nullptr;
auto* res = opr.getDefiningOp();
if (!res->hasOneUse()) return nullptr;
return res;
}
bool HasSplatArg(Operation* op, float val, int opr_num) {
auto* cst_input = GetInputOpWithOneUse(op, 1);
if (!cst_input) return false;
auto cst_op = llvm::dyn_cast_or_null<stablehlo::ConstantOp>(cst_input);
if (!cst_op) return false;
ElementsAttr value = cst_op.getValue();
if (!value.isSplat()) return false;
if (!value.getElementType().isF32()) return false;
return std::abs(value.getSplatValue<float>() - val) < kTolerance;
}
bool MatchERF(Operation* op) {
if (auto custom_call = llvm::dyn_cast_or_null<stablehlo::CustomCallOp>(op)) {
return custom_call.getCallTargetName() == "mhlo.erf";
}
return llvm::isa<chlo::ErfOp>(op);
}
LogicalResult LowerGELU::matchAndRewrite(Operation* op,
PatternRewriter& rewriter) const {
if (!MatchERF(op)) return failure();
auto* erf_user = GetUserIfOnlyOne(op);
if (!erf_user) return failure();
auto* erf_user_user = GetUserIfOnlyOne(erf_user);
if (!erf_user_user) return failure();
auto* erf_input = GetInputOpWithOneUse(op, 0);
if (!erf_input) return failure();
auto* erf_user_user_input = GetInputOpWithOneUse(erf_user_user, 0);
if (!erf_user_user_input) return failure();
if (erf_user_user_input->getOperand(0) != erf_input->getOperand(0)) {
return failure();
}
auto rhs_mul = llvm::dyn_cast_or_null<stablehlo::MulOp>(erf_input);
if (!rhs_mul) return failure();
auto lhs_mul = llvm::dyn_cast_or_null<stablehlo::MulOp>(erf_user_user_input);
if (!lhs_mul) return failure();
auto output_mul = llvm::dyn_cast_or_null<stablehlo::MulOp>(erf_user_user);
if (!output_mul) return failure();
auto rhs_add = llvm::dyn_cast_or_null<stablehlo::AddOp>(erf_user);
if (!rhs_add) return failure();
if (!HasSplatArg(rhs_add, kOne, 1)) return failure();
if (!HasSplatArg(lhs_mul, kHalf, 1)) return failure();
if (!HasSplatArg(rhs_mul, kOneOverRoot2, 1)) return failure();
auto is_approx_attr = rewriter.getBoolAttr(false);
auto gelu = rewriter.create<TFL::GeluOp>(
output_mul.getLoc(), output_mul.getResult().getType(),
erf_input->getOperand(0), is_approx_attr);
rewriter.replaceAllOpUsesWith(output_mul, gelu);
rewriter.eraseOp(output_mul);
rewriter.eraseOp(rhs_add);
rewriter.eraseOp(op);
rewriter.eraseOp(lhs_mul);
rewriter.eraseOp(rhs_mul);
return success();
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/unary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Gelu, 4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(5)
.AbsoluteTolerance(10)
.Test(BuiltinOperator_GELU, xnnpack_delegate.get());
}
TEST(Gelu, 3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, width, channels})
.RelativeTolerance(5)
.AbsoluteTolerance(10)
.Test(BuiltinOperator_GELU, xnnpack_delegate.get());
}
TEST(Gelu, 2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, channels})
.RelativeTolerance(5)
.AbsoluteTolerance(10)
.Test(BuiltinOperator_GELU, xnnpack_delegate.get());
}
TEST(Gelu, 1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
UnaryElementwiseTester()
.Shape({batch})
.RelativeTolerance(5)
.AbsoluteTolerance(10)
.Test(BuiltinOperator_GELU, xnnpack_delegate.get());
}
TEST(Gelu, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
UnaryElementwiseTester()
.Shape({batch, height, width, channels})
.RelativeTolerance(5)
.AbsoluteTolerance(10)
.Test(BuiltinOperator_GELU, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/gelu.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/gelu_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aacfa9f6-1622-4f49-9337-a55a4bb65fda | cpp | tensorflow/tensorflow | sort | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/sort.cc | third_party/xla/xla/tests/sort_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/sort.h"
#include <cstdint>
#include "llvm/ADT/ilist.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Region.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/hlo_matchers.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
using OpListType = llvm::iplist<Operation>;
template <typename ReturnOpType>
bool MatchTopKComparator(Region& comparator) {
if (!comparator.hasOneBlock()) return false;
Block& comparator_blk = comparator.front();
OpListType& operations = comparator_blk.getOperations();
if (operations.size() != 2) return false;
auto compare_op =
llvm::dyn_cast_or_null<mhlo::CompareOp>(&operations.front());
auto return_op = llvm::dyn_cast_or_null<ReturnOpType>(&operations.back());
if (!compare_op || !return_op) return false;
if (compare_op.getComparisonDirection() != mhlo::ComparisonDirection::GT) {
return false;
}
if (compare_op.getOperands()[0] != comparator_blk.getArgument(0) ||
compare_op.getOperands()[1] != comparator_blk.getArgument(1)) {
return false;
}
return return_op.getOperands().front() == compare_op.getResult();
}
bool IsSortOpNotTopK(mhlo::SortOp op) {
if (op->getNumOperands() != 2) {
return true;
}
auto keys_opr = op.getInputs().front();
auto keys_type = llvm::cast<ShapedType>(keys_opr.getType());
if (!keys_type.hasStaticShape() ||
!keys_type.getElementType().isIntOrFloat()) {
return true;
}
auto indices_opr = op.getInputs().back();
auto indices_type = llvm::cast<ShapedType>(indices_opr.getType());
if (!indices_type.hasStaticShape() ||
!indices_type.getElementType().isInteger(32)) {
return true;
}
const int64_t sort_dim = op.getDimension();
const auto k = indices_type.getDimSize(sort_dim);
const auto rank = keys_type.getRank();
if (sort_dim != rank - 1 || k < 1) {
return true;
}
OpBuilder b(op->getContext());
if (!MatchIota(b.getI64TensorAttr({sort_dim}), indices_opr)) {
return true;
}
if (!MatchTopKComparator<mhlo::ReturnOp>(op.getComparator())) {
return true;
}
return false;
}
class LegalizeSortOp : public OpConversionPattern<mhlo::SortOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::SortOp sort_op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final;
};
LogicalResult LegalizeSortOp::matchAndRewrite(
mhlo::SortOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const {
if (IsSortOpNotTopK(op)) {
return failure();
}
auto keys = op.getInputs().front();
auto indices = op.getInputs().back();
auto indices_type = llvm::cast<ShapedType>(indices.getType());
const int32_t k = indices_type.getShape().back();
auto k_cst_attr = DenseIntElementsAttr::get(
RankedTensorType::get({}, rewriter.getI32Type()), k);
auto k_cst = rewriter.create<arith::ConstantOp>(op->getLoc(), k_cst_attr);
rewriter.replaceOpWithNewOp<TFL::TopKV2Op>(op, keys.getType(),
indices.getType(), keys, k_cst);
return success();
}
}
void PopulateSortPatterns(MLIRContext* ctx, RewritePatternSet& patterns,
ConversionTarget& target) {
patterns.add<LegalizeSortOp>(ctx);
target.addDynamicallyLegalOp<mhlo::SortOp>(IsSortOpNotTopK);
}
} | #include <string>
#include <string_view>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "xla/error_spec.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace {
class SortTest : public HloTestBase {};
XLA_TEST_F(SortTest, SortDim0) {
std::string_view hlo_text_module = R"(
HloModule sort
compare {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
x = f32[32,64] parameter(0)
ROOT sort = f32[32,64] sort(x), dimensions={0}, to_apply=compare
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
XLA_TEST_F(SortTest, SortDim1) {
std::string_view hlo_text_module = R"(
HloModule sort
compare {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
x = f32[32,64] parameter(0)
ROOT sort = f32[32,64] sort(x), dimensions={1}, to_apply=compare
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
XLA_TEST_F(SortTest, SortTwiceWithSameComparator) {
std::string_view hlo_text_module = R"(
HloModule sort
compare {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
x = f32[32,64] parameter(0)
y = f32[64,32] parameter(1)
sort_x = f32[32,64] sort(x), dimensions={0}, to_apply=compare
sort_y = f32[64,32] sort(y), dimensions={1}, to_apply=compare
ROOT tuple = (f32[32,64], f32[64,32]) tuple(sort_x, sort_y)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
class SortManyInputsTest : public SortTest,
public ::testing::WithParamInterface<int> {
public:
static std::string Name(const ::testing::TestParamInfo<int>& info) {
auto num_inputs = info.param;
return absl::StrFormat("Sort%dInputs", num_inputs);
}
};
XLA_TEST_P(SortManyInputsTest, SortManyInputs) {
int num_inputs = GetParam();
std::string_view hlo_text_module_template = R"(
HloModule sort
compare {
${COMPARE_DECLARATIONS}
ROOT lt = pred[] compare(p0, p1), direction=LT
}
ENTRY e {
${SORT_DECLARATIONS}
ROOT sort = (${SORT_SHAPE}) sort(${SORT_PARAMS}), dimensions={0},
to_apply=compare
}
)";
std::string sort_decls = "";
std::vector<std::string> param_names;
param_names.reserve(num_inputs * 2);
for (int i = 0; i < num_inputs; ++i) {
sort_decls += absl::StrFormat("p%d = f32[32,64] parameter(%d)\n", i, i);
param_names.emplace_back(absl::StrCat("p", i));
}
std::string sort_params = absl::StrJoin(param_names, ", ");
std::string sort_shape =
absl::StrJoin(std::vector<std::string>(num_inputs, "f32[32,64]"), ",");
std::string compare_decls = "";
for (int i = 0; i < num_inputs * 2; ++i) {
compare_decls += absl::StrFormat("p%d = f32[] parameter(%d)\n", i, i);
}
std::string compare_params = absl::StrJoin(param_names, ", ");
std::string hlo_text_module = absl::StrReplaceAll(
hlo_text_module_template, {{"${SORT_DECLARATIONS}", sort_decls},
{"${SORT_SHAPE}", sort_shape},
{"${SORT_PARAMS}", sort_params},
{"${COMPARE_DECLARATIONS}", compare_decls}});
EXPECT_TRUE(RunAndCompare(hlo_text_module, ErrorSpec{0.0, 0.0}));
}
INSTANTIATE_TEST_SUITE_P(ManyInputs, SortManyInputsTest,
::testing::Values(17, 20), SortManyInputsTest::Name);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/sort.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/sort_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
65c4ba39-beed-4193-b33f-f21f494b92c6 | cpp | tensorflow/tensorflow | get_dimension_size | tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/get_dimension_size.cc | third_party/xla/xla/tests/get_dimension_size_test.cc | #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/get_dimension_size.h"
#include <cstdint>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir::odml {
namespace {
class LeagalizeDimensionSizeOp
: public OpConversionPattern<mhlo::GetDimensionSizeOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
mhlo::GetDimensionSizeOp op, OpAdaptor adaptor,
ConversionPatternRewriter& rewriter) const final {
ImplicitLocOpBuilder builder(op.getLoc(), rewriter);
auto operand_type = llvm::cast<ShapedType>(op.getOperand().getType());
auto shaped_op_type =
RankedTensorType::get({operand_type.getRank()}, rewriter.getI64Type());
Value shape_op = rewriter.create<TFL::ShapeOp>(op.getLoc(), shaped_op_type,
op.getOperand());
Value size = BuildIntArrayConstOp<arith::ConstantOp>(builder, rewriter, {1},
rewriter.getI64Type());
auto begin = BuildIntArrayConstOp<arith::ConstantOp>(
builder, rewriter,
llvm::SmallVector<int64_t>({static_cast<int64_t>(op.getDimension())}),
rewriter.getI64Type());
auto slice_type = RankedTensorType::get({1}, rewriter.getI64Type());
Value slice = rewriter.create<TFL::SliceOp>(op.getLoc(), slice_type,
shape_op, begin, size);
auto op_el_type = llvm::cast<ShapedType>(op.getType()).getElementType();
if (op_el_type != slice_type.getElementType()) {
slice = rewriter.create<TFL::CastOp>(op->getLoc(),
slice_type.clone(op_el_type), slice);
}
rewriter.replaceOpWithNewOp<TFL::SqueezeOp>(op, op.getType(), slice,
rewriter.getI64ArrayAttr({0}));
return success();
}
};
}
void PopulateGetDimensionSizePatterns(MLIRContext* ctx,
RewritePatternSet& patterns,
ConversionTarget& target) {
target.addIllegalOp<mhlo::GetDimensionSizeOp>();
patterns.add<LeagalizeDimensionSizeOp>(ctx);
}
} | #include <utility>
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
void DisableAllHloPasses(HloModule& module) {
auto debug_options = module.config().debug_options();
debug_options.set_xla_disable_all_hlo_passes(true);
module.mutable_config().set_debug_options(debug_options);
}
class GetDimensionSizeTest : public HloTestBase {};
TEST_F(GetDimensionSizeTest, CorrectComputation) {
const char* const kModuleStr = R"(
HloModule a_inference_call_110__.55
ENTRY %a_inference_call_110__.55 (arg0.1: f32[1,8], arg1.2: f32[8], arg2.3: f32[8]) -> s32[] {
%constant.37 = f32[] constant(1e-12)
%broadcast.38 = f32[1,1]{1,0} broadcast(f32[] %constant.37), dimensions={}
%arg0.1 = f32[1,8]{1,0} parameter(0), parameter_replication={false}
%reshape.4 = f32[1,8]{1,0} reshape(f32[1,8]{1,0} %arg0.1)
%convert.5 = f32[1,8]{1,0} convert(f32[1,8]{1,0} %reshape.4)
%constant.6 = f32[] constant(0)
%convert.7 = f32[] convert(f32[] %constant.6)
ROOT %get-dimension-size.13 = s32[] get-dimension-size(f32[1,8]{1,0} %convert.5), dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0.01, 0.01}));
}
TEST_F(GetDimensionSizeTest,
DISABLED_ON_INTERPRETER(DISABLED_ON_GPU(
DISABLED_ON_TPU(ReturnsErrorWhenHloPassesDisabled)))) {
const char* const kModuleStr = R"(
HloModule m
ENTRY %test {
%arg0 = f32[1,8] parameter(0)
ROOT %get-dimension-size.0 = s32[] get-dimension-size(%arg0),
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kModuleStr));
DisableAllHloPasses(*module);
Literal arg0 =
LiteralUtil::CreateR1<float>({0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0});
auto status_or_result = Execute(std::move(module), {&arg0});
EXPECT_EQ(status_or_result.status().code(), absl::StatusCode::kUnimplemented);
EXPECT_THAT(
status_or_result.status().message(),
::testing::HasSubstr("GetDimensionSize should be rewritten for CPU"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/get_dimension_size.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/get_dimension_size_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
31ca920e-06dc-41f7-8f63-bf1d9dda7801 | cpp | tensorflow/tensorflow | numerical_utils | tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc | tensorflow/compiler/mlir/lite/quantization/numerical_utils_test.cc | #include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <assert.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <optional>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
QuantizedMultiplier QuantizeMultiplier(double double_multiplier) {
if (double_multiplier < 1e-6) {
return {0, 0};
}
int32_t shift;
const double q = frexp(double_multiplier, &shift);
int64_t quantized_multiplier = round(q * (1LL << 31));
assert(quantized_multiplier <= (1LL << 31));
if (quantized_multiplier == (1LL << 31)) {
quantized_multiplier /= 2;
++shift;
}
assert(quantized_multiplier <= std::numeric_limits<int32_t>::max());
if (shift > 31 || shift < -31) {
return {0, 0};
}
return {static_cast<int32_t>(quantized_multiplier), shift};
}
QuantizedRange CalculateQuantizedRange(double scale, int32_t zero_point,
std::optional<double> rmin,
std::optional<double> rmax, int32_t qmin,
int32_t qmax) {
auto quantize = [scale, zero_point](float f) {
return zero_point + static_cast<int32_t>(std::round(f / scale));
};
if (rmin.has_value() && rmax.has_value()) {
return {std::max(qmin, quantize(rmin.value())),
std::min(qmax, quantize(rmax.value()))};
} else if (rmin.has_value()) {
return {std::max(qmin, quantize(rmin.value())), qmax};
} else if (rmax.has_value()) {
return {qmin, std::min(qmax, quantize(rmax.value()))};
} else {
return {qmin, qmax};
}
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/numerical_utils.h"
#include <cmath>
#include <optional>
#include <gtest/gtest.h>
#include "absl/types/optional.h"
namespace mlir {
namespace quant {
namespace {
double ComposeScale(const QuantizedMultiplier& input) {
return input.first * exp2(-31 + input.second);
}
TEST(NumericalUtils, QuantizeMultiplier) {
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e6)), 1.0e6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e3)), 1.0e3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(10.)), 10.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(5.)), 5.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(2.)), 2.);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(0.0)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0)), 1.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-1)), 1.0e-1);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-2)), 1.0e-2);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-3)), 1.0e-3);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-4)), 1.0e-4);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-5)), 1.0e-5);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-6)), 1.0e-6);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-7)), 0.0);
ASSERT_FLOAT_EQ(ComposeScale(QuantizeMultiplier(1.0e-8)), 0.0);
}
TEST(NumericalUtils, ActivationRange) {
auto a =
CalculateQuantizedRange(1e-6, 0, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(a.first, -128);
ASSERT_EQ(a.second, 127);
auto b = CalculateQuantizedRange(1e-6, 0, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(b.first, 0);
ASSERT_EQ(b.second, 127);
auto c = CalculateQuantizedRange(1e-6, 0, -1.0, 1.0, -128, 127);
ASSERT_EQ(c.first, -128);
ASSERT_EQ(c.second, 127);
auto d = CalculateQuantizedRange(1e-6, 0, 0.0, 6.0, -128, 127);
ASSERT_EQ(d.first, 0);
ASSERT_EQ(d.second, 127);
auto e =
CalculateQuantizedRange(1e-6, 100, std::nullopt, std::nullopt, -128, 127);
ASSERT_EQ(e.first, -128);
ASSERT_EQ(e.second, 127);
auto f = CalculateQuantizedRange(1e-6, 100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(f.first, 100);
ASSERT_EQ(f.second, 127);
auto g = CalculateQuantizedRange(1e-6, 100, -1.0, 1.0, -128, 127);
ASSERT_EQ(g.first, -128);
ASSERT_EQ(g.second, 127);
auto h = CalculateQuantizedRange(1e-6, 100, 0.0, 6.0, -128, 127);
ASSERT_EQ(h.first, 100);
ASSERT_EQ(h.second, 127);
auto i = CalculateQuantizedRange(1e-6, -100, std::nullopt, std::nullopt, -128,
127);
ASSERT_EQ(i.first, -128);
ASSERT_EQ(i.second, 127);
auto j = CalculateQuantizedRange(1e-6, -100, 0.0, std::nullopt, -128, 127);
ASSERT_EQ(j.first, -100);
ASSERT_EQ(j.second, 127);
auto k = CalculateQuantizedRange(1e-6, -100, -1.0, 1.0, -128, 127);
ASSERT_EQ(k.first, -128);
ASSERT_EQ(k.second, 127);
auto l = CalculateQuantizedRange(1e-6, -100, 0.0, 6.0, -128, 127);
ASSERT_EQ(l.first, -100);
ASSERT_EQ(l.second, 127);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/numerical_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/numerical_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
33336fcd-e3c1-42aa-9201-e1fd29254023 | cpp | tensorflow/tensorflow | quantization | tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc | tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc | #include "tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h"
#include <string>
#include <unordered_set>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/static_range_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/weight_only_ptq.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/py_function_lib.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace {
using ::mlir::quant::stablehlo::StaticRangePtqComponent;
using ::mlir::quant::stablehlo::WeightOnlyPtqComponent;
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::SignatureDef;
using ::tensorflow::quantization::PyFunctionLibrary;
absl::flat_hash_map<std::string, SignatureDef> GetSignatureDefMapFromBundle(
const SavedModelBundle& saved_model_bundle) {
const protobuf::Map<std::string, SignatureDef>& signatures =
saved_model_bundle.GetSignatures();
absl::flat_hash_map<std::string, SignatureDef> signature_def_map(
signatures.begin(), signatures.end());
signature_def_map.erase(kSavedModelInitOpSignatureKey);
return signature_def_map;
}
absl::flat_hash_map<std::string, std::string> GetFunctionAliases(
const SavedModelBundle& saved_model_bundle) {
const protobuf::Map<std::string, std::string>& function_aliases =
saved_model_bundle.meta_graph_def.meta_info_def().function_aliases();
return absl::flat_hash_map<std::string, std::string>(function_aliases.begin(),
function_aliases.end());
}
}
absl::StatusOr<mlir::ModuleOp> RunQuantization(
const SavedModelBundle* saved_model_bundle,
const absl::string_view saved_model_dir,
const std::unordered_set<std::string>& saved_model_tags,
const QuantizationConfig& quantization_config,
const PyFunctionLibrary* quantization_py_function_lib,
mlir::ModuleOp module_op) {
if (saved_model_bundle == nullptr) {
return absl::InvalidArgumentError(
"Failed to run quantization. `saved_model_bundle` should not be "
"nullptr.");
}
if (quantization_py_function_lib == nullptr) {
return absl::InvalidArgumentError(
"Failed to run quantization. `quantization_py_function_lib` should not "
"be nullptr.");
}
LOG(INFO) << "User-provided quantization config: "
<< quantization_config.DebugString();
const QuantizationConfig updated_config =
ExpandPresets(PopulateDefaults(quantization_config));
LOG(INFO) << "Updated quantization config: " << updated_config.DebugString();
const absl::flat_hash_map<std::string, SignatureDef> signature_def_map =
GetSignatureDefMapFromBundle(*saved_model_bundle);
std::vector<std::string> exported_names;
for (const auto& [key, value_unused] : signature_def_map) {
exported_names.push_back(key);
}
if (failed(mlir::tf_saved_model::FreezeVariables(
module_op, saved_model_bundle->GetSession()))) {
return absl::InternalError("Failed to freeze variables.");
}
mlir::PassManager pm(module_op.getContext());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
mlir::odml::AddLegalizeTFToStablehloPasses(pm, true,
false,
false);
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::createRemoveShardingCustomCallPass());
if (failed(pm.run(module_op))) {
return absl::InternalError("Failed to run legalize TF to StableHLO.");
}
absl::StatusOr<mlir::ModuleOp> quantized_module_op;
if (HasQuantizationMethod(updated_config.specs(),
Method::MethodCase::kStaticRangePtq)) {
StaticRangePtqComponent static_range_ptq_component(
module_op.getContext(), quantization_py_function_lib, saved_model_dir,
exported_names, saved_model_tags, signature_def_map,
GetFunctionAliases(*saved_model_bundle));
quantized_module_op =
static_range_ptq_component.Run(module_op, updated_config);
} else if (HasQuantizationMethod(updated_config.specs(),
Method::MethodCase::kWeightOnlyPtq)) {
WeightOnlyPtqComponent weight_only_ptq_component(module_op.getContext());
quantized_module_op =
weight_only_ptq_component.Run(module_op, updated_config);
} else {
return absl::InvalidArgumentError(
"Quantization config must have either static_range_ptq_preset or "
"weight_only_ptq_preset.");
}
if (!quantized_module_op.ok()) {
return absl::InternalError("Failed to run quantization. Status msg: " +
quantized_module_op.status().ToString());
}
return quantized_module_op;
}
} | #include "tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::CreateTmpDir;
using ::testing::HasSubstr;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(RunQuantizationTest,
WhenSavedModelBundleIsNullptrReturnsInvalidArgumentError) {
const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
ASSERT_THAT(tmp_saved_model_dir, IsOk());
QuantizationConfig config;
const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
nullptr, *tmp_saved_model_dir,
{}, config,
nullptr, {});
EXPECT_THAT(
quantized_module_op,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("`saved_model_bundle` should not be nullptr")));
}
TEST(RunQuantizationTest,
WhenPyFunctionLibIsNullptrReturnsInvalidArgumentError) {
const absl::StatusOr<std::string> tmp_saved_model_dir = CreateTmpDir();
ASSERT_THAT(tmp_saved_model_dir, IsOk());
SavedModelBundle bundle{};
QuantizationConfig config;
const absl::StatusOr<mlir::ModuleOp> quantized_module_op = RunQuantization(
&bundle, *tmp_saved_model_dir,
{}, config,
nullptr, {});
EXPECT_THAT(
quantized_module_op,
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("`quantization_py_function_lib` should not be nullptr")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/stablehlo/quantization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cf7afda-654e-4b2b-b84f-a2497f1f2c5d | cpp | tensorflow/tensorflow | allowlisted_flex_ops | tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.cc | tensorflow/lite/delegates/flex/allowlisted_flex_ops_test.cc | #include "tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.h"
#include <set>
#include <string>
#include "tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops_internal.h"
#include "tensorflow/core/framework/op.h"
namespace tflite {
namespace flex {
const std::set<std::string>& GetFlexAllowlist() {
static const std::set<std::string>* allowlisted_flex_ops =
new std::set<std::string>({
"Abort",
"Abs",
"Add",
"AddN",
"AddV2",
"AdjustContrast",
"AdjustContrastv2",
"AdjustHue",
"AdjustSaturation",
"All",
"Angle",
"Any",
"ApplyAdaMax",
"ApplyAdadelta",
"ApplyAdagrad",
"ApplyAdagradDA",
"ApplyAdagradV2",
"ApplyAdam",
"ApplyAddSign",
"ApplyCenteredRMSProp",
"ApplyFtrl",
"ApplyFtrlV2",
"ApplyGradientDescent",
"ApplyMomentum",
"ApplyPowerSign",
"ApplyProximalAdagrad",
"ApplyProximalGradientDescent",
"ApplyRMSProp",
"ApproximateEqual",
"ArgMax",
"ArgMin",
"AsString",
"Assert",
"Assign",
"AssignAdd",
"AssignAddVariableOp",
"AssignSub",
"AssignSubVariableOp",
"AssignVariableOp",
"Atan",
"Atan2",
"AudioSpectrogram",
"AvgPool",
"AvgPool3D",
"AvgPool3DGrad",
"AvgPoolGrad",
"BatchCholesky",
"BatchDatasetV2",
"BatchMatMul",
"BatchMatMulV2",
"BatchMatrixBandPart",
"BatchMatrixDeterminant",
"BatchMatrixDiag",
"BatchMatrixDiagPart",
"BatchMatrixInverse",
"BatchMatrixSetDiag",
"BatchMatrixTriangularSolve",
"BatchNormWithGlobalNormalization",
"BatchNormWithGlobalNormalizationGrad",
"BatchToSpace",
"BatchToSpaceND",
"BiasAdd",
"BiasAddGrad",
"BiasAddV1",
"Bincount",
"Bitcast",
"BitwiseAnd",
"BitwiseOr",
"BitwiseXor",
"BroadcastArgs",
"BroadcastGradientArgs",
"BroadcastTo",
"Bucketize",
"CTCBeamSearchDecoder",
"CTCGreedyDecoder",
"Case",
"Cast",
"Ceil",
"CheckNumerics",
"CheckNumericsV2",
"Cholesky",
"ClipByValue",
"CombinedNonMaxSuppression",
"Complex",
"ComplexAbs",
"Concat",
"ConcatOffset",
"ConcatV2",
"Conj",
"ConjugateTranspose",
"Const",
"ControlTrigger",
"Conv2D",
"Conv2DBackpropFilter",
"Conv2DBackpropInput",
"Conv3D",
"Conv3DBackpropFilter",
"Conv3DBackpropFilterV2",
"Conv3DBackpropInput",
"Conv3DBackpropInputV2",
"Cos",
"Cosh",
"CropAndResize",
"CropAndResizeGradBoxes",
"CropAndResizeGradImage",
"Cumprod",
"Cumsum",
"CumulativeLogsumexp",
"DataFormatDimMap",
"DataFormatVecPermute",
"DebugGradientIdentity",
"DebugGradientRefIdentity",
"DecodeAndCropJpeg",
"DecodeBase64",
"DecodeBmp",
"DecodeGif",
"DecodeImage",
"DecodeJpeg",
"DecodePaddedRaw",
"DecodePng",
"DecodeRaw",
"DecodeWav",
"DeepCopy",
"DeleteSessionTensor",
"DenseBincount",
"DenseToDenseSetOperation",
"DenseToSparseSetOperation",
"DepthToSpace",
"DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter",
"DepthwiseConv2dNativeBackpropInput",
"Dequantize",
"DestroyResourceOp",
"DestroyTemporaryVariable",
"Diag",
"DiagPart",
"Dilation2D",
"Dilation2DBackpropFilter",
"Dilation2DBackpropInput",
"Div",
"DivNoNan",
"DynamicPartition",
"DynamicStitch",
"Einsum",
"Elu",
"EluGrad",
"Empty",
"EmptyTensorList",
"EmptyTensorMap",
"EncodeBase64",
"EncodeJpeg",
"EncodeJpegVariableQuality",
"EncodePng",
"EncodeWav",
"EnsureShape",
"Enter",
"Equal",
"Erf",
"Exit",
"Exp",
"ExpandDims",
"ExtractImagePatches",
"FFT",
"FFT2D",
"FFT3D",
"FIFOQueue",
"FIFOQueueV2",
"FakeQuantWithMinMaxArgs",
"FakeQuantWithMinMaxArgsGradient",
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxVarsGradient",
"FakeQuantWithMinMaxVarsPerChannel",
"FakeQuantWithMinMaxVarsPerChannelGradient",
"FakeQueue",
"Fill",
"FilterDataset",
"FinalizeDataset",
"Fingerprint",
"FlatMapDataset",
"Floor",
"FloorDiv",
"FloorMod",
"FusedBatchNorm",
"FusedBatchNormGrad",
"FusedBatchNormGradV2",
"FusedBatchNormGradV3",
"FusedBatchNormV2",
"FusedBatchNormV3",
"FusedPadConv2D",
"FusedResizeAndPadConv2D",
"Gather",
"GatherNd",
"GatherV2",
"GetSessionHandle",
"GetSessionHandleV2",
"GetSessionTensor",
"Greater",
"GreaterEqual",
"HSVToRGB",
"HashTable",
"HashTableV2",
"HistogramSummary",
"IFFT",
"IFFT2D",
"IFFT3D",
"IRFFT",
"IRFFT2D",
"IRFFT3D",
"Identity",
"IdentityN",
"Imag",
"ImageProjectiveTransformV2",
"ImageProjectiveTransformV3",
"ImmutableConst",
"InTopK",
"InTopKV2",
"InitializeTable",
"InitializeTableFromDataset",
"InitializeTableFromTextFile",
"InitializeTableFromTextFileV2",
"InitializeTableV2",
"InplaceAdd",
"InplaceSub",
"InplaceUpdate",
"Inv",
"InvGrad",
"Invert",
"InvertPermutation",
"IsFinite",
"IsNan",
"IsVariableInitialized",
"LRN",
"LeakyRelu",
"LeakyReluGrad",
"LeftShift",
"Less",
"LessEqual",
"LinSpace",
"ListDiff",
"Log",
"LogMatrixDeterminant",
"LogSoftmax",
"LogicalAnd",
"LogicalNot",
"LogicalOr",
"LookupTableExport",
"LookupTableExportV2",
"LookupTableFind",
"LookupTableFindV2",
"LookupTableImport",
"LookupTableImportV2",
"LookupTableInsert",
"LookupTableInsertV2",
"LookupTableRemoveV2",
"LookupTableSize",
"LookupTableSizeV2",
"LoopCond",
"MapDataset",
"MatMul",
"MatrixBandPart",
"MatrixDeterminant",
"MatrixDiag",
"MatrixDiagPart",
"MatrixDiagPartV2",
"MatrixDiagPartV3",
"MatrixDiagV2",
"MatrixDiagV3",
"MatrixInverse",
"MatrixSetDiag",
"MatrixSetDiagV2",
"MatrixSetDiagV3",
"MatrixTriangularSolve",
"Max",
"MaxPool",
"MaxPool3D",
"MaxPool3DGrad",
"MaxPool3DGradGrad",
"MaxPoolGrad",
"MaxPoolGradGrad",
"MaxPoolGradGradV2",
"MaxPoolGradV2",
"MaxPoolGradWithArgmax",
"MaxPoolV2",
"MaxPoolWithArgmax",
"Maximum",
"Mean",
"Merge",
"MergeSummary",
"MergeV2Checkpoints",
"Mfcc",
"Min",
"Minimum",
"MirrorPad",
"MirrorPadGrad",
"ModelDataset",
"Mul",
"MulNoNan",
"Multinomial",
"MutableDenseHashTable",
"MutableDenseHashTableV2",
"MutableHashTable",
"MutableHashTableOfTensors",
"MutableHashTableOfTensorsV2",
"MutableHashTableV2",
"Neg",
"NextIteration",
"NoOp",
"NonMaxSuppression",
"NonMaxSuppressionV2",
"NonMaxSuppressionV3",
"NonMaxSuppressionV4",
"NonMaxSuppressionV5",
"NonMaxSuppressionWithOverlaps",
"NotEqual",
"OneHot",
"OnesLike",
"OptimizeDatasetV2",
"OptionalFromValue",
"OptionalGetValue",
"OptionalHasValue",
"OptionalNone",
"Pack",
"Pad",
"PadV2",
"PaddingFIFOQueue",
"PaddingFIFOQueueV2",
"ParallelConcat",
"ParallelDynamicStitch",
"ParseExample",
"ParseExampleV2",
"ParseSequenceExample",
"ParseSequenceExampleV2",
"ParseSingleExample",
"ParseSingleSequenceExample",
"Placeholder",
"PlaceholderV2",
"PlaceholderWithDefault",
"PopulationCount",
"Pow",
"PreventGradient",
"Print",
"PrintV2",
"Prod",
"Qr",
"QuantizeDownAndShrinkRange",
"QuantizeV2",
"QuantizedAdd",
"QuantizedAvgPool",
"QuantizedBatchNormWithGlobalNormalization",
"QuantizedBiasAdd",
"QuantizedConcat",
"QuantizedConv2D",
"QuantizedInstanceNorm",
"QuantizedMatMul",
"QuantizedMaxPool",
"QuantizedMul",
"QuantizedRelu",
"QuantizedRelu6",
"QuantizedReshape",
"QuantizedResizeBilinear",
"QueueClose",
"QueueCloseV2",
"QueueDequeue",
"QueueDequeueMany",
"QueueDequeueManyV2",
"QueueDequeueUpTo",
"QueueDequeueUpToV2",
"QueueDequeueV2",
"QueueEnqueue",
"QueueEnqueueMany",
"QueueEnqueueManyV2",
"QueueEnqueueV2",
"QueueIsClosed",
"QueueIsClosedV2",
"QueueSize",
"QueueSizeV2",
"RFFT",
"RFFT2D",
"RFFT3D",
"RGBToHSV",
"RaggedBincount",
"RaggedGather",
"RaggedRange",
"RaggedTensorFromVariant",
"RaggedTensorToSparse",
"RaggedTensorToTensor",
"RaggedTensorToVariant",
"RaggedTensorToVariantGradient",
"RandomGamma",
"RandomPoisson",
"RandomPoissonV2",
"RandomShuffle",
"RandomStandardNormal",
"RandomUniform",
"RandomUniformInt",
"Range",
"Rank",
"ReadFile",
"ReadVariableOp",
"Real",
"RealDiv",
"Reciprocal",
"ReciprocalGrad",
"Recv",
"ReduceDataset",
"ReduceJoin",
"RefEnter",
"RefExit",
"RefIdentity",
"RefMerge",
"RefNextIteration",
"RefSelect",
"RefSwitch",
"RegexFullMatch",
"RegexReplace",
"Relu",
"Relu6",
"Relu6Grad",
"ReluGrad",
"RemoteCall",
"RepeatDataset",
"RequantizationRange",
"Requantize",
"Reshape",
"ResizeBicubic",
"ResizeBicubicGrad",
"ResizeBilinear",
"ResizeBilinearGrad",
"ResizeNearestNeighbor",
"ResizeNearestNeighborGrad",
"ResourceApplyAdaMax",
"ResourceApplyAdadelta",
"ResourceApplyAdagrad",
"ResourceApplyAdagradDA",
"ResourceApplyAdagradV2",
"ResourceApplyAdam",
"ResourceApplyAdamWithAmsgrad",
"ResourceApplyAddSign",
"ResourceApplyCenteredRMSProp",
"ResourceApplyFtrl",
"ResourceApplyFtrlV2",
"ResourceApplyGradientDescent",
"ResourceApplyKerasMomentum",
"ResourceApplyMomentum",
"ResourceApplyPowerSign",
"ResourceApplyProximalAdagrad",
"ResourceApplyProximalGradientDescent",
"ResourceApplyRMSProp",
"ResourceGather",
"ResourceGatherNd",
"ResourceScatterAdd",
"ResourceScatterDiv",
"ResourceScatterMax",
"ResourceScatterMin",
"ResourceScatterMul",
"ResourceScatterNdAdd",
"ResourceScatterNdMax",
"ResourceScatterNdMin",
"ResourceScatterNdSub",
"ResourceScatterNdUpdate",
"ResourceScatterSub",
"ResourceScatterUpdate",
"ResourceSparseApplyAdadelta",
"ResourceSparseApplyAdagrad",
"ResourceSparseApplyAdagradDA",
"ResourceSparseApplyAdagradV2",
"ResourceSparseApplyCenteredRMSProp",
"ResourceSparseApplyFtrl",
"ResourceSparseApplyFtrlV2",
"ResourceSparseApplyKerasMomentum",
"ResourceSparseApplyMomentum",
"ResourceSparseApplyProximalAdagrad",
"ResourceSparseApplyProximalGradientDescent",
"ResourceSparseApplyRMSProp",
"ResourceStridedSliceAssign",
"Restore",
"RestoreSlice",
"RestoreV2",
"Reverse",
"ReverseSequence",
"ReverseV2",
"RightShift",
"Roll",
"Round",
"Rsqrt",
"RsqrtGrad",
"SampleDistortedBoundingBox",
"SampleDistortedBoundingBoxV2",
"Save",
"SaveSlices",
"SaveV2",
"ScalarSummary",
"ScatterNd",
"ScatterNdAdd",
"ScatterNdMax",
"ScatterNdMin",
"ScatterNdNonAliasingAdd",
"ScatterNdSub",
"ScatterNdUpdate",
"SegmentMax",
"SegmentMean",
"SegmentMin",
"SegmentProd",
"SegmentSum",
"Select",
"SelectV2",
"Selu",
"SeluGrad",
"Send",
"SerializeTensor",
"Shape",
"ShapeN",
"ShardedFilename",
"ShardedFilespec",
"Sigmoid",
"SigmoidGrad",
"Sign",
"Sin",
"Sinh",
"Size",
"Slice",
"Softmax",
"SoftmaxCrossEntropyWithLogits",
"Softplus",
"SoftplusGrad",
"Softsign",
"SoftsignGrad",
"SpaceToBatch",
"SpaceToBatchND",
"SpaceToDepth",
"SparseAdd",
"SparseApplyAdadelta",
"SparseApplyAdagrad",
"SparseApplyAdagradDA",
"SparseApplyAdagradV2",
"SparseApplyCenteredRMSProp",
"SparseApplyFtrl",
"SparseApplyFtrlV2",
"SparseApplyMomentum",
"SparseApplyProximalAdagrad",
"SparseApplyProximalGradientDescent",
"SparseApplyRMSProp",
"SparseBincount",
"SparseCross",
"SparseCrossHashed",
"SparseCrossV2",
"SparseFillEmptyRows",
"SparseFillEmptyRowsGrad",
"SparseReduceSum",
"SparseReorder",
"SparseReshape",
"SparseSegmentMean",
"SparseSegmentMeanGrad",
"SparseSegmentMeanWithNumSegments",
"SparseSegmentSqrtN",
"SparseSegmentSqrtNGrad",
"SparseSegmentSqrtNWithNumSegments",
"SparseSegmentSum",
"SparseSegmentSumGrad",
"SparseSegmentSumWithNumSegments",
"SparseSlice",
"SparseSoftmaxCrossEntropyWithLogits",
"SparseTensorDenseMatMul",
"SparseToDense",
"SparseToSparseSetOperation",
"Split",
"SplitV",
"Sqrt",
"SqrtGrad",
"Square",
"SquaredDifference",
"Squeeze",
"Stack",
"StackClose",
"StackCloseV2",
"StackPop",
"StackPopV2",
"StackPush",
"StackPushV2",
"StackV2",
"StatelessMultinomial",
"StatelessRandomGammaV2",
"StatelessRandomGammaV3",
"StatelessRandomGetAlg",
"StatelessRandomGetKeyCounter",
"StatelessRandomGetKeyCounterAlg",
"StatelessRandomNormal",
"StatelessRandomNormalV2",
"StatelessRandomPoisson",
"StatelessRandomUniform",
"StatelessRandomUniformFullInt",
"StatelessRandomUniformFullIntV2",
"StatelessRandomUniformInt",
"StatelessRandomUniformIntV2",
"StatelessRandomUniformV2",
"StatelessSampleDistortedBoundingBox",
"StatelessTruncatedNormal",
"StatelessTruncatedNormalV2",
"StaticRegexFullMatch",
"StaticRegexReplace",
"StopGradient",
"StridedSlice",
"StridedSliceAssign",
"StridedSliceGrad",
"StringFormat",
"StringJoin",
"StringLength",
"StringLower",
"StringSplit",
"StringSplitV2",
"StringStrip",
"StringToHashBucket",
"StringToHashBucketFast",
"StringToHashBucketStrong",
"StringToNumber",
"Sub",
"Substr",
"Sum",
"Switch",
"SymbolicGradient",
"TakeDataset",
"TakeWhileDataset",
"Tan",
"Tanh",
"TanhGrad",
"TemporaryVariable",
"TensorArray",
"TensorArrayClose",
"TensorArrayCloseV2",
"TensorArrayCloseV3",
"TensorArrayConcat",
"TensorArrayConcatV2",
"TensorArrayConcatV3",
"TensorArrayGather",
"TensorArrayGatherV2",
"TensorArrayGatherV3",
"TensorArrayGrad",
"TensorArrayGradV2",
"TensorArrayGradV3",
"TensorArrayGradWithShape",
"TensorArrayPack",
"TensorArrayRead",
"TensorArrayReadV2",
"TensorArrayReadV3",
"TensorArrayScatter",
"TensorArrayScatterV2",
"TensorArrayScatterV3",
"TensorArraySize",
"TensorArraySizeV2",
"TensorArraySizeV3",
"TensorArraySplit",
"TensorArraySplitV2",
"TensorArraySplitV3",
"TensorArrayUnpack",
"TensorArrayV2",
"TensorArrayV3",
"TensorArrayWrite",
"TensorArrayWriteV2",
"TensorArrayWriteV3",
"TensorListConcat",
"TensorListConcatLists",
"TensorListConcatV2",
"TensorListElementShape",
"TensorListFromTensor",
"TensorListGather",
"TensorListGetItem",
"TensorListLength",
"TensorListPopBack",
"TensorListPushBack",
"TensorListPushBackBatch",
"TensorListReserve",
"TensorListResize",
"TensorListScatter",
"TensorListScatterIntoExistingList",
"TensorListScatterV2",
"TensorListSetItem",
"TensorListSplit",
"TensorListStack",
"TensorMapErase",
"TensorMapHasKey",
"TensorMapInsert",
"TensorMapLookup",
"TensorMapSize",
"TensorMapStackKeys",
"TensorScatterAdd",
"TensorScatterMax",
"TensorScatterMin",
"TensorScatterSub",
"TensorScatterUpdate",
"TensorSliceDataset",
"TensorStridedSliceUpdate",
"Tile",
"TileGrad",
"Timestamp",
"TopK",
"TopKV2",
"Transpose",
"TruncateDiv",
"TruncatedNormal",
"UnicodeDecode",
"UnicodeDecodeWithOffsets",
"UnicodeEncode",
"UnicodeTranscode",
"Unique",
"UniqueV2",
"UniqueWithCounts",
"UniqueWithCountsV2",
"Unpack",
"UnsortedSegmentJoin",
"UnsortedSegmentMax",
"UnsortedSegmentMin",
"UnsortedSegmentProd",
"UnsortedSegmentSum",
"UnwrapDatasetVariant",
"UpperBound",
"VarHandleOp",
"VarIsInitializedOp",
"Variable",
"VariableShape",
"VariableV2",
"Where",
"WrapDatasetVariant",
"WriteFile",
"Xdivy",
"Xlog1py",
"Xlogy",
"ZerosLike",
"_Arg",
"_ArrayToList",
"_DeviceArg",
"_DeviceRetval",
"_FusedConv2D",
"_HostCast",
"_HostRecv",
"_HostSend",
"_ListToArray",
"_ParallelConcatStart",
"_ParallelConcatUpdate",
"_ReadVariablesOp",
"_Recv",
"_Retval",
"_Send",
"_SwitchN",
"_VarHandlesOp",
});
return *allowlisted_flex_ops;
}
const std::set<std::string>& GetTFTextFlexAllowlist() {
static const std::set<std::string>* tftext_flex_ops =
new std::set<std::string>({
"CaseFoldUTF8",
"ConstrainedSequence",
"MaxSpanningTree",
"NormalizeUTF8",
"NormalizeUTF8WithOffsetsMap",
"RegexSplitWithOffsets",
"RougeL",
"SentenceFragments",
"SentencepieceOp",
"SentencepieceTokenizeOp",
"SentencepieceTokenizeWithOffsetsOp",
"SentencepieceDetokenizeOp",
"SentencepieceVocabSizeOp",
"SplitMergeTokenizeWithOffsets",
"TFText>NgramsStringJoin",
"TFText>WhitespaceTokenizeWithOffsetsV2",
"TokenizerFromLogits",
"UnicodeScriptTokenizeWithOffsets",
"WhitespaceTokenizeWithOffsets",
"WordpieceTokenizeWithOffsets",
});
return *tftext_flex_ops;
}
bool IsAllowedTFTextOpForFlex(const std::string& op_name) {
if (GetTFTextFlexAllowlist().count(op_name) == 0) return false;
return tensorflow::OpRegistry::Global()->LookUp(op_name) != nullptr;
}
const std::set<std::string>& GetSentencePieceFlexAllowlist() {
static const std::set<std::string>* sentencepiece_flex_ops =
new std::set<std::string>({
"SentencepieceGetPieceSize",
"SentencepiecePieceToId",
"SentencepieceIdToPiece",
"SentencepieceEncodeDense",
"SentencepieceEncodeSparse",
"SentencepieceDecode",
});
return *sentencepiece_flex_ops;
}
bool IsAllowedSentencePieceOpForFlex(const std::string& op_name) {
if (GetSentencePieceFlexAllowlist().count(op_name) == 0) return false;
return tensorflow::OpRegistry::Global()->LookUp(op_name) != nullptr;
}
bool IsAllowlistedFlexOp(const std::string& tensorflow_op_name) {
if (GetFlexAllowlist().count(tensorflow_op_name) != 0) return true;
return IsAllowedTFTextOpForFlex(tensorflow_op_name) ||
IsAllowedSentencePieceOpForFlex(tensorflow_op_name);
}
}
} | #include "tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.h"
#include <set>
#include <string>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops_internal.h"
#include "tensorflow/core/framework/op_kernel.h"
namespace tflite {
namespace flex {
std::set<std::string> GetAllCpuKernels() {
auto is_cpu_kernel = [](const tensorflow::KernelDef& def) {
return (def.device_type() == "CPU" || def.device_type() == "DEFAULT");
};
tensorflow::KernelList kernel_list =
tensorflow::GetFilteredRegisteredKernels(is_cpu_kernel);
std::set<std::string> result;
for (int i = 0; i < kernel_list.kernel_size(); ++i) {
tensorflow::KernelDef kernel_def = kernel_list.kernel(i);
result.insert(kernel_def.op());
}
return result;
}
TEST(AllowlistedFlexOpsTest, EveryOpHasKernel) {
const std::set<std::string>& allowlist = GetFlexAllowlist();
std::set<std::string> all_kernels = GetAllCpuKernels();
for (const std::string& op_name : allowlist) {
EXPECT_EQ(all_kernels.count(op_name), 1)
<< op_name << " op is added to flex allowlist "
<< "but its kernel is not found.";
}
}
TEST(TfTextUtilsTest, TestFlexOpAllowed) {
EXPECT_FALSE(IsAllowedTFTextOpForFlex("ConstrainedSequence"));
}
TEST(TfTextUtilsTest, TestFlexOpNotAllowed) {
EXPECT_FALSE(IsAllowedTFTextOpForFlex("ngrams"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/delegates/flex/allowlisted_flex_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/flex/allowlisted_flex_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
785c1424-b0f8-4811-be48-bce25417681f | cpp | tensorflow/tensorflow | legalize_tf_to_hlo | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/log/log.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using metrics::IncrementTfMlirBridgeSecondPhaseCounter;
using metrics::MlirBridgeSecondPhaseMetric;
using tpu::MlirToHloArgs;
absl::StatusOr<XlaCompilationResult> LegalizeTfToHlo(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
xla::CompileOnlyClient* client, XlaCompilationResult* compilation_result) {
LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
"Combined MLIR Tf2Xla Bridge.";
absl::StatusOr<std::string> mlir_compilation =
internal::CompileFromMlirToXlaHlo(
false, computation, metadata, device_type,
shape_determination_fns, use_tuple_args, compilation_result,
custom_legalization_passes, arg_shapes, arg_core_mapping,
per_core_arg_shapes);
if (!mlir_compilation.ok()) {
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedMlirFailure);
return mlir_compilation.status();
}
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedMlirSuccess);
Status old_bridge_status = v1::CompileTensorflowGraphToHlo(
MlirToHloArgs{mlir_compilation.value()}, metadata, use_tuple_args,
shape_determination_fns, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result);
if (!old_bridge_status.ok()) {
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedOldFailure);
return old_bridge_status;
}
IncrementTfMlirBridgeSecondPhaseCounter(
MlirBridgeSecondPhaseMetric::kMlirCombinedOldSuccess);
return *compilation_result;
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.h"
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using ::tensorflow::monitoring::testing::CellReader;
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kMlirLegalizeCount[] =
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_count";
static constexpr char kMlirLegalizeErrors[] =
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count";
static constexpr char kBridgeStatusCounter[] =
"/tensorflow/core/tf2xla/api/v2/phase2_compilation_status";
constexpr char kMlirCombinedMlirSuccess[] = "kMlirCombinedMlirSuccess";
constexpr char kMlirCombinedOldSuccess[] = "kMlirCombinedOldSuccess";
constexpr char kMlirCombinedOldFailure[] = "kMlirCombinedOldFailure";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0 : tensor<1xf32>) -> tensor<1xf32> {
%0 = "tf.Acos"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
func.return %0 : tensor<1xf32>
}
})";
static constexpr char kBadMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
absl::StatusOr<XlaCompiler::CompilationResult> CompileMlirModule(
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_UNSPECIFIED;
mlir_to_hlo_args.mlir_module = module_str;
se::Platform* platform =
se::PlatformManager::PlatformWithName("Host").value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
std::vector<TensorShape> arg_shapes = {{1}};
TPUCompileMetadataProto metadata_proto;
auto arg = metadata_proto.add_args();
arg->set_dtype(DataType::DT_FLOAT);
arg->set_kind(TPUCompileMetadataProto::Arg::PARAMETER);
metadata_proto.add_retvals();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return LegalizeTfToHlo(mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
{}, arg_shapes,
&arg_core_mapping, &per_core_arg_shapes,
custom_legalization_passes, client,
compilation_result.get());
}
TEST(LegalizeWithCombinedBridge, DoesNotUseMlirLowering) {
CellReader<int64_t> mlir_bridge_legalize_count(kMlirLegalizeCount);
CellReader<int64_t> counts(kBridgeStatusCounter);
auto result = CompileMlirModule(kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_EQ(mlir_bridge_legalize_count.Delta("tf.Acos"), 0);
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedMlirSuccess), 1));
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedOldSuccess), 1));
}
TEST(LegalizeWithCombinedBridge,
CorrectlyCountsMlirBridgePassingAndGraphBridgeFailing) {
CellReader<int64_t> legalize_failure_count(kMlirLegalizeErrors);
CellReader<int64_t> counts(kBridgeStatusCounter);
auto result = CompileMlirModule(kBadMlirModuleStr);
ASSERT_FALSE(result.ok());
EXPECT_EQ(legalize_failure_count.Read("tf.DoesntExist", "Unknown"), 0);
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedMlirSuccess), 1));
EXPECT_THAT(result,
IncrementedOrFiltered(counts.Delta(kMlirCombinedOldFailure), 1));
}
TEST(LegalizeWithCombinedBridge, RecordsDynamicOps) {
static constexpr char kDynamismFunctionCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter";
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
CellReader<int64_t> dynamic_function_op_count(
kDynamismFunctionCounterStreamzName);
auto result = CompileMlirModule(kMlirModuleStr);
ASSERT_TRUE(result.ok());
EXPECT_EQ(dynamic_function_op_count.Delta(kNotDynamicFunctionName), 1);
}
};
};
}; | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_to_hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e95af84-26d7-4b74-b1e2-81c690cb5511 | cpp | tensorflow/tensorflow | clustering_bridge_passes | tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc | tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include <string>
#include "absl/log/log.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::OpPassManager;
using mlir::func::FuncOp;
void AddReplicatedBridgeClusteringPipelinePasses(OpPassManager& pm,
llvm::StringRef module_name) {
const llvm::SmallVector<std::string, 4> ops_to_preserve = {
"tf.TPUReplicateMetadata", "tf.TPUCompilationResult",
"tf.TPUReplicatedOutput"};
bool strict_clusters =
tensorflow::GetMlirCommonFlags()->tf_mlir_enable_strict_clusters;
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
pm.addNestedPass<FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUReorderReplicateAndPartitionedInputsPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateDecomposeReduceDatasetPass());
pm.addPass(mlir::TFDevice::CreateEmbeddingPipeliningPass());
pm.addPass(mlir::TFDevice::CreateEmbeddingSequencingPass());
pm.addPass(tensorflow::tf2xla::internal::CreateTPUClusterFormationPass(
strict_clusters));
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateDeviceAttributeToLaunchPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsInClusterPass());
{
OpPassManager& func_pm = pm.nest<FuncOp>();
func_pm.addPass(mlir::TFTPU::CreateTPUHostComputationExpansionPass());
func_pm.addPass(mlir::TFTPU::CreateTPUUpdateEmbeddingEnqueueOpInputsPass());
}
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateLaunchToDeviceAttributePass());
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantInDeviceClusterPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
pm.addPass(mlir::TFDevice::CreateResourceOpLiftingPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<FuncOp>(mlir::createCSEPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_merge_control_flow_pass) {
pm.addPass(mlir::TFDevice::CreateMergeControlFlowPass());
}
pm.addPass(
tensorflow::tf2xla::internal::CreateMarkOpsForOutsideCompilationPass());
pm.addPass(tensorflow::tf2xla::internal::
CreateExtractHeadTailOutsideCompilationPass());
pm.addPass(
tensorflow::tf2xla::internal::CreateExtractOutsideCompilationPass());
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateVerifyNoOutsideCompilationMarkersPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateClusterConstantSinkingPass());
pm.addPass(mlir::TF::CreateResourceDeviceInferencePass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateHoistBroadcastReadPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateXlaBroadcastPass());
pm.addPass(mlir::TFDevice::CreateClusterOutliningPass());
pm.addPass(mlir::TFTPU::CreateTPUResourceReadForWritePass());
pm.addPass(mlir::TFDevice::CreateMarkInputOutputAliasesPass());
pm.addPass(
tensorflow::tf2xla::internal::CreateTPUShardingIdentificationPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass());
pm.addPass(mlir::TFDevice::CreateAnnotateParameterReplicationPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateRewriteTPUEmbeddingOpsPass());
pm.addPass(mlir::TFTPU::CreateTPUAnnotateDynamicShapeInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateHoistReplicateInvariantResourceWritesPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateVerifyClusteringPass());
}
void NoCanonicalization(OpPassManager& pm) {}
void AddNonReplicatedBridgeClusteringPipelinePasses(OpPassManager& pm) {
VLOG(2) << "Create TF XLA Bridge pipeline";
pm.addPass(mlir::TFDevice::CreateXlaValidateInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateCanonicalizeCompileAndReplicateAttributesPass());
const llvm::SmallVector<std::string, 4> ops_to_preserve = {};
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
pm.addNestedPass<FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(tensorflow::tf2xla::internal::CreateXlaClusterFormationPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsInClusterPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createInlinerPass({}, NoCanonicalization));
pm.addPass(mlir::TFDevice::CreateResourceOpLiftingPass());
pm.addPass(mlir::TFDevice::CreateClusterOutliningPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateVerifyClusteringPass());
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include <gtest/gtest.h>
#include "mlir/Pass/PassManager.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::OpPassManager;
TEST(ClusteringBridgePassesTest, AddsBridgePasses) {
OpPassManager pass_manager;
AddReplicatedBridgeClusteringPipelinePasses(pass_manager);
EXPECT_EQ(pass_manager.size(), 45);
}
TEST(ClusteringBridgePassesTest, AddsNonTPUBridgePasses) {
OpPassManager pass_manager;
AddNonReplicatedBridgeClusteringPipelinePasses(pass_manager);
EXPECT_EQ(pass_manager.size(), 15);
}
};
};
}; | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d75b907d-df98-4296-ba6f-3db885a2d24f | cpp | tensorflow/tensorflow | mlir_pass_instrumentation | tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.cc | tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include <algorithm>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/platform/logging.h"
namespace mlir {
class MlirPassInstrumentationRegistry {
public:
static MlirPassInstrumentationRegistry& Instance() {
static MlirPassInstrumentationRegistry* r =
new MlirPassInstrumentationRegistry;
return *r;
}
std::unordered_map<std::string,
std::function<std::unique_ptr<PassInstrumentation>()>>
instrumentors_;
};
void RegisterPassInstrumentor(
const std::string& name,
std::function<std::unique_ptr<PassInstrumentation>()> creator) {
MlirPassInstrumentationRegistry& r =
MlirPassInstrumentationRegistry::Instance();
auto result = r.instrumentors_.emplace(name, creator);
if (!result.second) {
VLOG(1) << "Duplicate MLIR pass instrumentor registration";
}
}
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>>
GetPassInstrumentors() {
MlirPassInstrumentationRegistry& r =
MlirPassInstrumentationRegistry::Instance();
std::vector<std::function<std::unique_ptr<PassInstrumentation>()>> result;
result.reserve(r.instrumentors_.size());
std::transform(r.instrumentors_.begin(), r.instrumentors_.end(),
std::back_inserter(result), [](auto v) { return v.second; });
return result;
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include <cstddef>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
namespace mlir {
namespace {
static const char* kTestInstrumentationName = "test-intrumentatron";
static const char* kTestInstrumentationSearch = "tf.Identity";
struct StringStream : public llvm::raw_ostream {
StringStream() { SetUnbuffered(); }
~StringStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
ss.write(ptr, size);
}
std::stringstream ss;
};
class TestPassInstrumentation : public ::testing::Test {
public:
void SetPassThatChangedIdentity(absl::string_view pass_name) {
pass_that_changed_identity_ = pass_name;
}
absl::string_view GetPassThatChangedIdentity() {
return pass_that_changed_identity_;
}
private:
std::string pass_that_changed_identity_;
friend class TestInstrumentor;
};
class TestInstrumentor : public PassInstrumentation {
public:
explicit TestInstrumentor(TestPassInstrumentation* test) : test_(test) {}
private:
void runBeforePass(Pass* pass, Operation* op) override {
StringStream stream;
op->print(stream, mlir::OpPrintingFlags().useLocalScope());
ops_seen_by_pass_[pass] = stream.ss.str();
}
void runAfterPass(Pass* pass, Operation* op) override {
StringStream stream;
op->print(stream, mlir::OpPrintingFlags().useLocalScope());
if (!absl::StrContains(stream.ss.str(), kTestInstrumentationSearch) &&
absl::StrContains(ops_seen_by_pass_[pass],
kTestInstrumentationSearch)) {
test_->SetPassThatChangedIdentity(pass->getName().str());
}
}
private:
TestPassInstrumentation* test_;
std::unordered_map<mlir::Pass*, std::string> ops_seen_by_pass_;
};
TEST_F(TestPassInstrumentation, CreatedCalledAndSetsPassName) {
RegisterPassInstrumentor(kTestInstrumentationName, [&]() {
return std::make_unique<TestInstrumentor>(this);
});
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
SetPassThatChangedIdentity("");
std::vector<::tensorflow::TensorShape> arg_shapes = {{1}};
auto compilation_result = tensorflow::XlaCompilationResult();
TF_EXPECT_OK(tensorflow::CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result)
.status());
EXPECT_FALSE(GetPassThatChangedIdentity().empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a8c62eca-f2f6-4cb4-a2bd-14bcec19ffd3 | cpp | tensorflow/tensorflow | mlir_bridge_pass_util | tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc | tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h"
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/tf2xla/tf2xla_defs.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/graph/graph.h"
#include "tsl/platform/status.h"
namespace tensorflow {
using ::mlir::failure;
using ::mlir::LogicalResult;
using ::mlir::success;
namespace {
constexpr absl::string_view kPartitionedCall = "TPUPartitionedCall";
LogicalResult HasAttr(
const Graph& graph, const FunctionLibraryDefinition* function_library,
const std::function<bool(const Graph& graph)>& predicate) {
if (predicate(graph)) {
return success();
}
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (!function_library) return failure();
for (const std::string& func_name :
function_library->ReachableDefinitions(graph_def).ListFunctionNames()) {
const FunctionDef* func_def = function_library->Find(func_name);
std::unique_ptr<FunctionBody> func_body;
absl::Status status = FunctionDefToBodyHelper(
*func_def, AttrSlice(&func_def->attr()), function_library, &func_body);
if (!status.ok()) {
LOG(ERROR) << "Failed to parse " << func_name << ": "
<< absl::StatusMessageAsCStr(status);
return failure();
}
if (predicate(*func_body->graph)) {
return success();
}
}
return failure();
}
bool HasPsWithResourceVariable(const Graph& graph) {
const std::string jobType = "ps";
const std::string nodeType = "_Arg";
const std::string attrKey = "T";
for (const Node* node : graph.nodes()) {
if (node->type_string() == nodeType) {
auto device_name = node->assigned_device_name();
DeviceNameUtils::ParsedName device;
if (DeviceNameUtils::ParseFullName(device_name, &device) &&
device.has_job && device.job == jobType) {
for (const auto& attr : node->attrs()) {
auto attr_key = attr.first;
auto attr_value = attr.second;
if (attr_key == attrKey &&
attr_value.value_case() == AttrValue::kType &&
attr_value.type() == DT_RESOURCE) {
return true;
break;
}
}
}
}
}
return false;
}
bool IsNonReplicatedGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
auto predicate = [](const Graph& graph) {
const std::string kStatefulPartitionedCallOp = "StatefulPartitionedCall";
for (const Node* node : graph.nodes()) {
auto node_op = node->type_string();
if (node_op == kStatefulPartitionedCallOp) {
auto attr = node->attrs().FindByString(std::string(kMustCompileAttr));
if (attr != nullptr && attr->b() == true) {
return true;
}
}
}
return false;
};
return HasAttr(graph, function_library, predicate).succeeded();
}
bool IsReplicatedGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
auto predicate = [](const Graph& graph) {
for (const Node* node : graph.nodes()) {
if (node->attrs().FindByString(std::string(kTpuReplicateAttr))) {
return true;
}
}
return false;
};
return HasAttr(graph, function_library, predicate).succeeded();
}
bool IsReplicatedGraph(mlir::ModuleOp module) {
auto walk_result = module.walk([&](mlir::Operation* op) {
const llvm::StringRef tpu_replicate_attr_name(kTpuReplicateAttr.data(),
kTpuReplicateAttr.size());
auto replicate_attr =
op->getAttrOfType<mlir::StringAttr>(tpu_replicate_attr_name);
if (replicate_attr) return mlir::WalkResult::interrupt();
return mlir::WalkResult::advance();
});
return walk_result.wasInterrupted();
}
bool DoesGraphContainTPUPartitionedCall(const Graph& graph) {
for (const Node* node : graph.nodes()) {
if (node->type_string() == kPartitionedCall) return true;
}
return false;
}
bool DoReachableFuncsContainTPUPartitionedCall(
const GraphDef& graph_def, const FunctionLibraryDefinition& flib_def) {
for (const std::string& func_name :
flib_def.ReachableDefinitions(graph_def).ListFunctionNames()) {
const FunctionDef* func_def = flib_def.Find(func_name);
std::unique_ptr<FunctionBody> func_body;
if (!FunctionDefToBodyHelper(*func_def, AttrSlice(&func_def->attr()),
&flib_def, &func_body)
.ok())
return false;
if (DoesGraphContainTPUPartitionedCall(*func_body->graph)) return true;
}
return false;
}
bool AreFunctionsFromFlibDefInference(
const FunctionLibraryDefinition& flib_def) {
for (const std::string& func_name : flib_def.ListFunctionNames()) {
const FunctionDef* func_def = flib_def.Find(func_name);
for (const NodeDef& node_def : func_def->node_def()) {
if (node_def.op() == kPartitionedCall) return true;
}
}
return false;
}
}
bool IsSupportedByNonReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library) {
return IsNonReplicatedGraph(graph, function_library) &&
HasPsWithResourceVariable(graph);
}
bool IsSupportedByReplicatedBridge(
const Graph& graph, const FunctionLibraryDefinition* function_library) {
return IsReplicatedGraph(graph, function_library);
}
bool IsSupportedByReplicatedBridge(mlir::ModuleOp module) {
return IsReplicatedGraph(module);
}
bool HasTPUPartitionedCallOpInModule(mlir::ModuleOp module) {
bool has_tpu_partitioned_call = false;
for (auto func_op : module.getOps<mlir::func::FuncOp>()) {
func_op->walk([&](mlir::TF::TPUPartitionedCallOp op) {
has_tpu_partitioned_call = true;
});
if (has_tpu_partitioned_call) break;
}
return has_tpu_partitioned_call;
}
bool IsInferenceGraph(const Graph& graph,
const FunctionLibraryDefinition* function_library) {
if (DoesGraphContainTPUPartitionedCall(graph)) return true;
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
if (DoReachableFuncsContainTPUPartitionedCall(graph_def, graph.flib_def()))
return true;
if (AreFunctionsFromFlibDefInference(graph.flib_def())) return true;
if (function_library == nullptr) return false;
if (DoReachableFuncsContainTPUPartitionedCall(graph_def, *function_library))
return true;
if (AreFunctionsFromFlibDefInference(*function_library)) return true;
return false;
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.h"
#include <vector>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/tpu_functional_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/tf2xla/tf2xla_defs.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/platform/enable_tf2_utils.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
FunctionDef PassThroughResource() {
return FunctionDefHelper::Define(
"PassThroughResource",
{"in: resource"},
{"out: resource"},
{},
{{{"out"}, "Identity", {"in"}, {{"T", DataType::DT_RESOURCE}}}});
}
TEST(IsSupportedByNonReplicatedBridge, NonReplicatedGraph) {
const FunctionDef& fd = PassThroughResource();
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kEagerRuntime);
tensorflow::set_tf2_execution(true);
ConfigProto config = ConfigProto();
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::_Arg(root.WithOpName("A"), DT_RESOURCE, 0);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* call;
NameAttrList f_name_attr;
f_name_attr.set_name(fd.signature().name());
TF_ASSERT_OK(
NodeBuilder("B", "StatefulPartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_RESOURCE})
.Attr("Tout", {DT_RESOURCE})
.Attr("f", f_name_attr)
.Finalize(root.graph(), &call));
call->AddAttr(std::string(kMustCompileAttr), true);
TF_ASSERT_OK(root.ToGraph(&graph));
for (Node* node : graph.nodes()) {
node->set_assigned_device_name("/job:ps/replica:0/task:0/device:GPU:0");
}
EXPECT_TRUE(
IsSupportedByNonReplicatedBridge(graph, nullptr));
}
TEST(IsSupportedByReplicatedBridge, ReplicatedGraph) {
const FunctionDef& fd = test::function::XTimesTwo();
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kEagerRuntime);
tensorflow::set_tf2_execution(true);
ConfigProto config = ConfigProto();
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::_Arg(root.WithOpName("A"), DT_FLOAT, 0);
std::vector<NodeBuilder::NodeOut> inputs({NodeBuilder::NodeOut(a.node())});
Node* call;
NameAttrList f_name_attr;
f_name_attr.set_name(fd.signature().name());
TF_ASSERT_OK(
NodeBuilder("B", "StatefulPartitionedCall", &root.graph()->flib_def())
.Input(inputs)
.Attr("Tin", {DT_FLOAT})
.Attr("Tout", {DT_FLOAT})
.Attr("f", f_name_attr)
.Finalize(root.graph(), &call));
call->AddAttr(std::string(kTpuReplicateAttr), "cluster");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(
IsSupportedByReplicatedBridge(graph, nullptr));
}
TEST(IsSupportedByReplicatedBridge, ReplicatedModule) {
const char* const code = R"mlir(
func.func @entry_func_1(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.Identity"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_TRUE(IsSupportedByReplicatedBridge(*module));
}
TEST(HasTPUPartitionedCallOpInModule, HasTPUPartitionedCallModule) {
const char* const code = R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() {
%outputs_0 = "tf.TPUOrdinalSelector"() {device = ""} : () -> tensor<?xi32>
"tf.TPUPartitionedCall"(%outputs_0) {f = @reachable_func} : (tensor<?xi32>) -> ()
func.return
}
func.func @reachable_func() {
func.return
}
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_TRUE(HasTPUPartitionedCallOpInModule(*module));
}
TEST(HasTPUPartitionedCallOpInModule, HasNotTPUPartitionedCallModule) {
const char* const code = R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() {
"tf.StatefulPartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @reachable_func} : () -> ()
func.return
}
func.func @reachable_func() {
func.return
}
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
EXPECT_FALSE(HasTPUPartitionedCallOpInModule(*module));
}
TEST(IsInferenceGraph, GraphContrainsTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
ops::TPUPartitionedCall f(root.WithOpName("f"), {x}, 0,
{DT_FLOAT}, f_name_attr);
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(IsInferenceGraph(graph, nullptr));
}
TEST(IsInferenceGraph, GraphDoesNotContrainTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_FALSE(IsInferenceGraph(graph, nullptr));
}
TEST(IsInferenceGraph, FlibDefIsNotNullptrAndContainsTPUPartitionedCall) {
FunctionDef fd = FunctionDefHelper::Define(
"XTimesTwoFloat",
{"x: float"},
{"y: float"},
{},
{
{{"two"},
"Const",
{},
{{"value", test::AsScalar<int32>(2)}, {"dtype", DT_INT64}}},
{{"scale"},
"Cast",
{"two"},
{{"SrcT", DT_INT64}, {"DstT", DT_FLOAT}}},
{{"y"}, "Mul", {"x", "scale"}, {{"T", DT_FLOAT}}},
{{"tpu_op"}, "TPUPartitionedCall", {}, {{"Tout", DT_FLOAT}}},
});
tensorflow::set_tf2_execution(true);
FunctionDefLibrary flib;
*flib.add_function() = fd;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
Graph graph(flib_def);
graph.SetConstructionContext(ConstructionContext::kDirectSession);
Scope root = Scope::NewRootScope().ExitOnError();
Output x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
NameAttrList f_name_attr;
f_name_attr.set_name("XTimesTwoFloat");
TF_ASSERT_OK(root.ToGraph(&graph));
EXPECT_TRUE(IsInferenceGraph(graph, &flib_def));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/mlir_bridge_pass_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b4f0a6ed-ff45-41a0-bf78-a11ec8f1d3c1 | cpp | tensorflow/tensorflow | logging_hooks | tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.cc | tensorflow/compiler/mlir/tf2xla/internal/logging_hooks_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include <memory>
#include <string>
#include "llvm/ADT/StringRef.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/core/util/debug_data_dumper.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::PassManager;
void EnablePassIRPrinting(PassManager& pm, const std::string& dump_group_name,
llvm::StringRef module_name) {
pm.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name, dump_group_name](const std::string& pass_tag_name,
mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), dump_group_name, pass_tag_name);
},
"",
true));
pm.enableTiming();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/file_statistics.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::DialectRegistry;
using mlir::LogicalResult;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using mlir::PassManager;
using mlir::func::FuncOp;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/internal/testdata/");
}
class LoggingHooksTest : public ::testing::Test {
public:
LoggingHooksTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
env_ = Env::Default();
test_group_name_ = "TestGroup";
test_dir_ = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir_.c_str(), 1);
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
Env* env_;
std::string test_dir_;
std::string test_group_name_;
};
TEST_F(LoggingHooksTest, DumpsPassData) {
std::vector<std::string> files;
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::IsEmpty());
TF_ASSERT_OK(CreateMlirModule("dead_const.mlir"));
PassManager pass_manager(&context_);
pass_manager.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
EnablePassIRPrinting(pass_manager, test_group_name_);
LogicalResult pass_status = pass_manager.run(mlir_module_.get());
EXPECT_TRUE(pass_status.succeeded());
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::SizeIs(2));
}
};
};
};
}; | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/logging_hooks_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50fe09d5-ed31-4976-9a59-0f902604663b | cpp | tensorflow/tensorflow | legalize_tf_mlir | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc | tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/log/log.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/Pass.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/compilation_timer.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/tpu/tpu_compile.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
constexpr char kBridgeComponent[] = "TFXLABridge";
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
absl::StatusOr<std::string> CompileFromMlirToXlaHlo(
bool lower_to_xla_hlo, const MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, llvm::StringRef device_type,
const XlaShapeLayoutHelpers::ShapeDeterminationFns& shape_determination_fns,
bool use_tuple_args, XlaCompiler::CompilationResult* compilation_result,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
const std::vector<TensorShape>& arg_shapes,
std::vector<ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes) {
LOG_FIRST_N(INFO, 1)
<< "Compiling MLIR computation to XLA HLO using MLIR tf2xla bridge in "
"the op by op fallback mode. This is Phase 2 of the TF2XLA Bridge. "
"Old (non-MLIR) bridge may be used in case of unsupported feature "
"or compilation failure from the MLIR bridge (full fallback mode).";
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
TF_RETURN_IF_ERROR(
DeserializeMlirModule(computation.mlir_module, &context, &mlir_module));
if (!mlir::SetTPUInfeedLayout(mlir_module))
return errors::Internal("Failed to set layouts attribute");
TF_ASSIGN_OR_RETURN(
auto compiled_mlir,
CompileSerializedMlirToXlaHlo(
SerializeMlirModule(mlir_module.get()), arg_shapes, device_type,
use_tuple_args, true, shape_determination_fns, compilation_result,
custom_legalization_passes, metadata.module_name(),
lower_to_xla_hlo));
auto sharding_result =
tpu::GetShardingInfo(metadata, arg_shapes, shape_determination_fns,
arg_core_mapping, per_core_arg_shapes);
if (!sharding_result.ok()) {
return sharding_result;
}
return compiled_mlir;
}
absl::StatusOr<XlaCompilationResult> LegalizeWithMlirBridge(
const tpu::MlirToHloArgs& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
llvm::StringRef device_type,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
std::vector<std::unique_ptr<mlir::Pass>>& custom_legalization_passes,
XlaCompilationResult* compilation_result) {
absl::StatusOr<std::string> mlir_bridge_status = CompileFromMlirToXlaHlo(
true, computation, metadata, device_type,
shape_determination_fns, use_tuple_args, compilation_result,
custom_legalization_passes, arg_shapes, arg_core_mapping,
per_core_arg_shapes);
if (mlir_bridge_status.ok()) {
VLOG(1) << "Successfully compiled MLIR computation to XLA HLO using MLIR "
"tf2xla bridge";
return *compilation_result;
}
tsl::error_logging::Log(kBridgeComponent,
"TFXLA_API_V2_BRIDGE_WITH_FALLBACK_FAIL",
mlir_bridge_status.status().ToString())
.IgnoreError();
return mlir_bridge_status.status();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.h"
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/shape.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using tpu::MlirToHloArgs;
using tpu::ShardingAndIndex;
using tpu::TPUCompileMetadataProto;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
absl::StatusOr<std::string> CompileMlirModule(bool compile_to_xla_hlo,
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.mlir_module = module_str;
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return CompileFromMlirToXlaHlo(
compile_to_xla_hlo, mlir_to_hlo_args, metadata_proto,
"XLA_TPU_JIT",
{}, use_tuple_args, compilation_result.get(),
custom_legalization_passes, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes);
}
absl::StatusOr<XlaCompiler::CompilationResult> LegalizeMlirModule(
const char* module_str) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.mlir_module = module_str;
std::vector<TensorShape> arg_shapes;
TPUCompileMetadataProto metadata_proto;
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
auto compilation_result = std::make_unique<XlaCompilationResult>();
return LegalizeWithMlirBridge(
mlir_to_hlo_args, metadata_proto, use_tuple_args,
"XLA_TPU_JIT",
{}, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes, custom_legalization_passes,
compilation_result.get());
}
TEST(LegalizeWithMlirBridge, LegalizesToMhloProto) {
auto result = LegalizeMlirModule(kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_THAT(result, ComputationProtoContains("opcode.*constant"));
}
TEST(CompileFromMlir, ReturnsModuleAsString) {
auto result = CompileMlirModule(true, kMlirModuleStr);
ASSERT_THAT(result, IsOkOrFiltered());
EXPECT_THAT(result, HasMlirModuleWith("mhlo.constant"));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/legalize_tf_mlir_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
486e1eb3-0d28-4e21-863a-18166ad5e866 | cpp | tensorflow/tensorflow | verify_clustering_pass | tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass.cc | tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass_test.cc | #include <memory>
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
#define GEN_PASS_DEF_VERIFYCLUSTERINGPASS
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h.inc"
using mlir::Operation;
using mlir::WalkResult;
class VerifyClusteringPass
: public impl::VerifyClusteringPassBase<VerifyClusteringPass> {
public:
void runOnOperation() override;
};
void VerifyClusteringPass::runOnOperation() {
Operation* func_op = getOperation();
auto walk_result = func_op->walk([&](Operation* op) {
if (!tensorflow::tf2xla::internal::IsInBridgeAcceptableDialects(op)) {
std::string error = "op is in dialect " +
op->getDialect()->getNamespace().str() +
" not in tf functional dialect";
op->emitError() << error;
return WalkResult::interrupt();
}
if (op->hasAttr(mlir::TF::kXlaOutsideCompilationAttr)) {
std::string error =
"op has outside compilation attribute _xla_outside_compilation which "
"is not allowed after clustering";
op->emitError() << error;
return mlir::WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
signalPassFailure();
}
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateVerifyClusteringPass() {
return std::make_unique<VerifyClusteringPass>();
}
}
}
} | #include <memory>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::mhlo::test::GetMlirModuleFromString;
class VerifyClusteringPassTest : public testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(CreateVerifyClusteringPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyClusteringPassTest, OnlyTfFunctionalPasses) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
return %0 : tensor<1xi32>
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
}
TEST_F(VerifyClusteringPassTest, NotTfFunctionalFails) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<3x32x32x3xf32> {
%0 = mhlo.constant dense<2.550000e+02> : tensor<3x32x32x3xf32>
return %0 : tensor<3x32x32x3xf32>
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/verify_clustering_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5302a1e6-ab88-4a50-a323-5dace045e104 | cpp | tensorflow/tensorflow | input_lowering_metrics_pass | tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass.cc | tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass_test.cc | #include <memory>
#include "llvm/ADT/DenseSet.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::Operation;
using mlir::WalkResult;
#define GEN_PASS_DEF_INPUTLOWERINGMETRICSPASS
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h.inc"
auto* dynamism_op_counter = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/dynamism_op_counter",
"Counts how many ops are dynamic", "op_name");
auto* dynamism_function_counter = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter",
"Counts how many functions are dynamic", "has_dynamism");
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
constexpr char kDynamicFunctionName[] = "kDynamicFunction";
class InputMetricsLoweringPass
: public impl::InputLoweringMetricsPassBase<InputMetricsLoweringPass> {
public:
void runOnOperation() override;
};
void InputMetricsLoweringPass::runOnOperation() {
bool has_dynamic_op = false;
Operation* func_op = getOperation();
func_op->walk([&](Operation* op) {
auto abstractOp = op->getRegisteredInfo();
if (!abstractOp) return WalkResult::advance();
if (mlir::mhlo::IsDynamicPadderOp(abstractOp->getTypeID())) {
has_dynamic_op = true;
dynamism_op_counter->GetCell(op->getName().getStringRef().str())
->IncrementBy(1);
}
return WalkResult::advance();
});
if (has_dynamic_op) {
dynamism_function_counter->GetCell(kDynamicFunctionName)->IncrementBy(1);
} else {
dynamism_function_counter->GetCell(kNotDynamicFunctionName)->IncrementBy(1);
}
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateInputLoweringMetricsPass() {
return std::make_unique<InputMetricsLoweringPass>();
}
}
}
} | #include <cstdint>
#include <memory>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::mhlo::test::GetMlirModuleFromString;
using ::tensorflow::monitoring::testing::CellReader;
constexpr char kNotDynamicFunctionName[] = "kNotDynamicFunction";
constexpr char kDynamicFunctionName[] = "kDynamicFunction";
static constexpr char kDynamismOpCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_op_counter";
static constexpr char kDynamismFunctionCounterStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/dynamism_function_counter";
class InputLoweringMetricsPassTest : public testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(CreateInputLoweringMetricsPass());
}
bool ModulesEqual(const ModuleOp& module_before,
const ModuleOp& module_after) {
return mlir::OperationEquivalence::isEquivalentTo(
module_before, module_after, mlir::OperationEquivalence::None);
}
mlir::LogicalResult Run() {
mlir::OwningOpRef<mlir::ModuleOp> module_before = module_->clone();
LogicalResult run_result = pm_->run(module_.get());
EXPECT_TRUE(ModulesEqual(*module_before, *module_));
return run_result;
}
private:
mlir::MLIRContext context_;
mlir::OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(InputLoweringMetricsPassTest, CountsNoDynamicOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> dynamism_op_counter(kDynamismOpCounterStreamzName);
CellReader<int64_t> dynamism_function_counter(
kDynamismFunctionCounterStreamzName);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(dynamism_function_counter.Delta(kNotDynamicFunctionName), 1);
}
TEST_F(InputLoweringMetricsPassTest, CountsDynamicOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%cst0 = "tf.Const"(){ value = dense<0> : tensor<3x5xi1>} : () -> tensor<3x5xi1>
%0 = "tf.Where"(%cst0) : (tensor<3x5xi1>) -> tensor<?x2xi64>
func.return
}
})";
CellReader<int64_t> dynamism_counter(kDynamismOpCounterStreamzName);
CellReader<int64_t> dynamism_function_counter(
kDynamismFunctionCounterStreamzName);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(dynamism_counter.Delta("tf.Where"), 1);
EXPECT_EQ(dynamism_function_counter.Delta(kDynamicFunctionName), 1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/passes/input_lowering_metrics_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b7f7a99-2880-427e-b15f-c6805f0b8c16 | cpp | tensorflow/tensorflow | dialect_detection_utils | tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.cc | tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.h"
#include <set>
#include <string>
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
bool IsInBridgeAcceptableDialects(mlir::Operation* op) {
const std::set<std::string> kBuiltinNamespaces = {"func", "return",
"builtin"};
const std::set<std::string> kBridgeAcceptableNamespaces = {"tf", "tf_device"};
bool isInDefaulNamespaces =
kBuiltinNamespaces.find(op->getDialect()->getNamespace().str()) !=
kBuiltinNamespaces.end();
bool isInBridgeAcceptableNamespaces =
kBridgeAcceptableNamespaces.find(
op->getDialect()->getNamespace().str()) !=
kBridgeAcceptableNamespaces.end();
return isInDefaulNamespaces || isInBridgeAcceptableNamespaces;
}
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.h"
#include <gtest/gtest.h>
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "stablehlo/dialect/ChloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
namespace {
using mlir::MLIRContext;
using mlir::OpBuilder;
using mlir::Operation;
using mlir::OperationState;
using mlir::UnknownLoc;
using mlir::chlo::ChloDialect;
using mlir::TF::TensorFlowDialect;
using tensorflow::tf2xla::internal::IsInBridgeAcceptableDialects;
class SharedUtilsTest : public ::testing::Test {};
TEST_F(SharedUtilsTest, IsInFunctionalDialectPasses) {
MLIRContext context;
context.loadDialect<TensorFlowDialect>();
OpBuilder opBuilder(&context);
OperationState state(UnknownLoc::get(opBuilder.getContext()),
"tf.Const");
mlir::Operation* op = Operation::create(state);
bool result = IsInBridgeAcceptableDialects(op);
EXPECT_TRUE(result);
op->destroy();
}
TEST_F(SharedUtilsTest, IsInFunctionalDialectFails) {
MLIRContext context;
context.loadDialect<ChloDialect>();
OpBuilder opBuilder(&context);
OperationState state(UnknownLoc::get(opBuilder.getContext()),
"chlo.broadcast_add");
Operation* op = Operation::create(state);
bool result = IsInBridgeAcceptableDialects(op);
EXPECT_FALSE(result);
op->destroy();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/utils/dialect_detection_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2c29f282-0a54-4a41-a0c4-1696c6c8eba3 | cpp | tensorflow/tensorflow | inference_metrics_pass | tensorflow/compiler/mlir/tf2xla/internal/inference/inference_metrics_pass.cc | tensorflow/compiler/mlir/tf2xla/internal/inference/inference_metrics_pass_test.cc | #include <memory>
#include <string>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace mlir {
namespace tf2xla {
namespace internal {
auto* has_tpu_partitioned_call_streamz =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/internal/inference/tpu_partitioned_call",
"Whether the model has TPUPartitionedCallOp.",
"has_tpu_partitioned_call");
namespace {
#define GEN_PASS_DEF_INFERENCEMETRICSPASS
#include "tensorflow/compiler/mlir/tf2xla/internal/inference/inference_passes.h.inc"
class InferenceMetricsPass
: public impl::InferenceMetricsPassBase<InferenceMetricsPass> {
public:
void runOnOperation() override;
};
void InferenceMetricsPass::runOnOperation() {
bool has_tpu_partitioned_call = false;
ModuleOp module = getOperation();
for (auto func_op : module.getOps<func::FuncOp>()) {
func_op->walk(
[&](TF::TPUPartitionedCallOp op) { has_tpu_partitioned_call = true; });
if (has_tpu_partitioned_call) break;
}
std::string has_tpu_partitioned_call_str =
has_tpu_partitioned_call ? "true" : "false";
has_tpu_partitioned_call_streamz->GetCell(has_tpu_partitioned_call_str)
->IncrementBy(1);
}
}
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
CreateInferenceMetricsPass() {
return std::make_unique<InferenceMetricsPass>();
}
}
}
} | #include <cstdint>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/inference/inference_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace tf2xla {
namespace internal {
namespace {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::mhlo::test::GetMlirModuleFromString;
using ::tensorflow::monitoring::testing::CellReader;
static constexpr char kHasTpuPartitionedCallStreamzName[] =
"/tensorflow/core/tf2xla/internal/inference/tpu_partitioned_call";
class InferenceMetricsPassTest : public ::testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addPass(CreateInferenceMetricsPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(InferenceMetricsPassTest, RecordsTrueForTPUPartitionedCallOp) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @tpu_partitioned_call_func(%arg0: tensor<?xi32>) -> (tensor<?xi32>) {
func.return %arg0 : tensor<?xi32>
}
func.func @main(%arg0: tensor<20xi32>, %arg1: tensor<?xi32>) -> tensor<*xi32> {
%2 = "tf.TPUPartitionedCall"(%arg0, %arg1) {f = @tpu_partitioned_call_func} : (tensor<20xi32>, tensor<?xi32>) -> tensor<*xi32>
func.return %2 : tensor<*xi32>
}
})";
CellReader<int64_t> error(kHasTpuPartitionedCallStreamzName);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(error.Delta("true"), 1);
EXPECT_EQ(error.Delta("false"), 0);
}
TEST_F(InferenceMetricsPassTest, RecordsFalseForNonTPUPartitionedCallOp) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kHasTpuPartitionedCallStreamzName);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(error.Delta("false"), 1);
EXPECT_EQ(error.Delta("true"), 0);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/inference/inference_metrics_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/inference/inference_metrics_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5652e31a-425e-4615-b061-8fea0c9b6ba5 | cpp | tensorflow/tensorflow | legalization_op_config | tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc | tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config_test.cc | #include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "llvm/ADT/DenseSet.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tpu_embedding_ops_registry.h"
namespace mlir {
namespace mhlo {
namespace {
const llvm::DenseSet<mlir::TypeID>& MlirAlwaysOps() {
static const llvm::DenseSet<mlir::TypeID>* ops = new llvm::DenseSet<
mlir::TypeID>{
TypeID::get<TF::FusedBatchNormV3Op>(),
TypeID::get<TF::FusedBatchNormGradV3Op>(),
TypeID::get<TF::XlaReduceScatterOp>(),
TypeID::get<TF::ModOp>(),
TypeID::get<TF::MatrixDiagPartV3Op>(),
TypeID::get<TF::AbsOp>(),
TypeID::get<TF::AtanOp>(),
TypeID::get<TF::AvgPool3DOp>(),
TypeID::get<TF::BiasAddGradOp>(),
TypeID::get<TF::CeilOp>(),
TypeID::get<TF::CheckNumericsOp>(),
TypeID::get<TF::CosOp>(),
TypeID::get<TF::TanOp>(),
TypeID::get<TF::DiagPartOp>(),
TypeID::get<TF::EinsumOp>(),
TypeID::get<TF::ExpOp>(),
TypeID::get<TF::Expm1Op>(),
TypeID::get<TF::FakeQuantWithMinMaxArgsOp>(),
TypeID::get<TF::FloorOp>(),
TypeID::get<TF::IFFTOp>(),
TypeID::get<TF::ImagOp>(),
TypeID::get<TF::IsFiniteOp>(),
TypeID::get<TF::IsInfOp>(),
TypeID::get<TF::IsNanOp>(),
TypeID::get<TF::LgammaOp>(),
TypeID::get<TF::Log1pOp>(),
TypeID::get<TF::LogSoftmaxOp>(),
TypeID::get<TF::MatrixBandPartOp>(),
TypeID::get<TF::MaxPool3DGradOp>(),
TypeID::get<TF::PreventGradientOp>(),
TypeID::get<TF::RandomShuffleOp>(),
TypeID::get<TF::RealOp>(),
TypeID::get<TF::ReciprocalOp>(),
TypeID::get<TF::ReluOp>(),
TypeID::get<TF::Relu6Op>(),
TypeID::get<TF::ReluGradOp>(),
TypeID::get<TF::RsqrtOp>(),
TypeID::get<TF::SelectOp>(),
TypeID::get<TF::SigmoidOp>(),
TypeID::get<TF::SignOp>(),
TypeID::get<TF::SoftmaxOp>(),
TypeID::get<TF::SqrtOp>(),
TypeID::get<TF::TanhOp>(),
TypeID::get<TF::XlaConvV2Op>(),
TypeID::get<TF::XlaDotOp>(),
TypeID::get<TF::XlaDotV2Op>(),
TypeID::get<TF::XlaDynamicSliceOp>(),
TypeID::get<TF::XlaEinsumOp>(),
TypeID::get<TF::XlaReduceWindowOp>(),
TypeID::get<TF::XlaReplicaIdOp>(),
TypeID::get<TF::XlaRngBitGeneratorOp>(),
TypeID::get<TF::XlaSelectAndScatterOp>(),
TypeID::get<TF::XlaSortOp>(),
TypeID::get<TF::XlaVariadicReduceV2Op>(),
TypeID::get<TF::XlaVariadicSortOp>(),
TypeID::get<TF::RiscAddOp>(),
TypeID::get<TF::RiscDotOp>(),
TypeID::get<TF::ConstOp>(),
TypeID::get<TF::AssertOp>(),
TypeID::get<TF::CrossReplicaSumOp>(),
TypeID::get<TF::InfeedDequeueTupleOp>(),
TypeID::get<TF::OutfeedEnqueueTupleOp>(),
TypeID::get<TF::XlaShardingOp>(),
TypeID::get<TF::IfRegionOp>(),
TypeID::get<TF::WhileRegionOp>(),
TypeID::get<TF::CaseRegionOp>(),
TypeID::get<TF::YieldOp>(),
};
return *ops;
}
bool IsOpTypeAllowedTf2XlaFallback(const TypeID& type_id) {
static auto* ops = [] {
llvm::SmallDenseSet<mlir::TypeID, 512>* ops_set = new llvm::SmallDenseSet<
mlir::TypeID, 512>{
TypeID::get<TF::AcoshOp>(),
TypeID::get<TF::AcosOp>(),
TypeID::get<TF::AddNOp>(),
TypeID::get<TF::AddV2Op>(),
TypeID::get<TF::AngleOp>(),
TypeID::get<TF::AdjustContrastv2Op>(),
TypeID::get<TF::AdjustHueOp>(),
TypeID::get<TF::AdjustSaturationOp>(),
TypeID::get<TF::ApproximateEqualOp>(),
TypeID::get<TF::ApproxTopKOp>(),
TypeID::get<TF::ArgMaxOp>(),
TypeID::get<TF::ArgMinOp>(),
TypeID::get<TF::AsinhOp>(),
TypeID::get<TF::AsinOp>(),
TypeID::get<TF::Atan2Op>(),
TypeID::get<TF::AtanhOp>(),
TypeID::get<TF::BatchMatMulV2Op>(),
TypeID::get<TF::BatchMatMulV3Op>(),
TypeID::get<TF::BatchToSpaceOp>(),
TypeID::get<TF::BesselI0eOp>(),
TypeID::get<TF::BesselI1eOp>(),
TypeID::get<TF::BetaincOp>(),
TypeID::get<TF::BiasAddOp>(),
TypeID::get<TF::BitwiseAndOp>(),
TypeID::get<TF::BitwiseOrOp>(),
TypeID::get<TF::BitwiseXorOp>(),
TypeID::get<TF::BucketizeOp>(),
TypeID::get<TF::CaseOp>(),
TypeID::get<TF::CastOp>(),
TypeID::get<TF::ClipByValueOp>(),
TypeID::get<TF::CholeskyOp>(),
TypeID::get<TF::CollectiveReduceV2Op>(),
TypeID::get<TF::ComplexAbsOp>(),
TypeID::get<TF::ConjugateTransposeOp>(),
TypeID::get<TF::ConcatV2Op>(),
TypeID::get<TF::ConvOp>(),
TypeID::get<TF::CoshOp>(),
TypeID::get<TF::CrossOp>(),
TypeID::get<TF::CumulativeLogsumexpOp>(),
TypeID::get<TF::DataFormatDimMapOp>(),
TypeID::get<TF::DataFormatVecPermuteOp>(),
TypeID::get<TF::DepthToSpaceOp>(),
TypeID::get<TF::DepthwiseConv2dNativeBackpropFilterOp>(),
TypeID::get<TF::DepthwiseConv2dNativeBackpropInputOp>(),
TypeID::get<TF::DiagOp>(),
TypeID::get<TF::DigammaOp>(),
TypeID::get<TF::DivNoNanOp>(),
TypeID::get<TF::DynamicPartitionOp>(),
TypeID::get<TF::EluGradOp>(),
TypeID::get<TF::EluOp>(),
TypeID::get<TF::EnsureShapeOp>(),
TypeID::get<TF::EqualOp>(),
TypeID::get<TF::ErfcOp>(),
TypeID::get<TF::ErfinvOp>(),
TypeID::get<TF::ErfOp>(),
TypeID::get<TF::ExtractImagePatchesOp>(),
TypeID::get<TF::FFT2DOp>(),
TypeID::get<TF::FFT3DOp>(),
TypeID::get<TF::FFTOp>(),
TypeID::get<TF::FakeParamOp>(),
TypeID::get<TF::FakeQuantWithMinMaxArgsGradientOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsGradientOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsPerChannelOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsPerChannelGradientOp>(),
TypeID::get<TF::FloorDivOp>(),
TypeID::get<TF::FloorModOp>(),
TypeID::get<TF::GetMinibatchesInCsrWithPhysicalReplicaOp>(),
TypeID::get<TF::GetMinibatchSplitsWithPhysicalReplicaOp>(),
TypeID::get<TF::GreaterOp>(),
TypeID::get<TF::HSVToRGBOp>(),
TypeID::get<TF::IFFT2DOp>(),
TypeID::get<TF::IFFT3DOp>(),
TypeID::get<TF::IRFFT2DOp>(),
TypeID::get<TF::IRFFT3DOp>(),
TypeID::get<TF::IgammaOp>(),
TypeID::get<TF::IgammacOp>(),
TypeID::get<TF::IgammaGradAOp>(),
TypeID::get<TF::InplaceAddOp>(),
TypeID::get<TF::InTopKV2Op>(),
TypeID::get<TF::InvertOp>(),
TypeID::get<TF::InvOp>(),
TypeID::get<TF::KthOrderStatisticOp>(),
TypeID::get<TF::LRNOp>(),
TypeID::get<TF::LRNGradOp>(),
TypeID::get<TF::LeakyReluGradOp>(),
TypeID::get<TF::LeakyReluOp>(),
TypeID::get<TF::LeftShiftOp>(),
TypeID::get<TF::LessOp>(),
TypeID::get<TF::ListDiffOp>(),
TypeID::get<TF::LogicalAndOp>(),
TypeID::get<TF::LogicalNotOp>(),
TypeID::get<TF::LogOp>(),
TypeID::get<TF::LowerBoundOp>(),
TypeID::get<TF::MakeUniqueOp>(),
TypeID::get<TF::MatMulOp>(),
TypeID::get<TF::MatrixDiagV3Op>(),
TypeID::get<TF::MatrixInverseOp>(),
TypeID::get<TF::MatrixSetDiagV3Op>(),
TypeID::get<TF::MatrixSolveOp>(),
TypeID::get<TF::MatrixTriangularSolveOp>(),
TypeID::get<TF::MaxPool3DGradGradOp>(),
TypeID::get<TF::MaxPoolGradOp>(),
TypeID::get<TF::MaxPoolGradGradOp>(),
TypeID::get<TF::MirrorPadOp>(),
TypeID::get<TF::MirrorPadGradOp>(),
TypeID::get<TF::MulOp>(),
TypeID::get<TF::MultinomialOp>(),
TypeID::get<TF::NdtriOp>(),
TypeID::get<TF::NegOp>(),
TypeID::get<TF::NextAfterOp>(),
TypeID::get<TF::NonMaxSuppressionV4Op>(),
TypeID::get<TF::NotEqualOp>(),
TypeID::get<TF::PadOp>(),
TypeID::get<TF::ParameterizedTruncatedNormalOp>(),
TypeID::get<TF::PlaceholderWithDefaultOp>(),
TypeID::get<TF::PolygammaOp>(),
TypeID::get<TF::PopulationCountOp>(),
TypeID::get<TF::PowOp>(),
TypeID::get<TF::QrOp>(),
TypeID::get<TF::QuantizeAndDequantizeOp>(),
TypeID::get<TF::QuantizeAndDequantizeV2Op>(),
TypeID::get<TF::QuantizeAndDequantizeV3Op>(),
TypeID::get<TF::QuantizeAndDequantizeV4Op>(),
TypeID::get<TF::RFFT2DOp>(),
TypeID::get<TF::RFFT3DOp>(),
TypeID::get<TF::RGBToHSVOp>(),
TypeID::get<TF::RandomUniformIntOp>(),
TypeID::get<TF::RandomUniformOp>(),
TypeID::get<TF::RealDivOp>(),
TypeID::get<TF::ReciprocalGradOp>(),
TypeID::get<TF::Relu6GradOp>(),
TypeID::get<TF::ResizeBilinearOp>(),
TypeID::get<TF::ResizeBilinearGradOp>(),
TypeID::get<TF::ResizeNearestNeighborOp>(),
TypeID::get<TF::ResizeNearestNeighborGradOp>(),
TypeID::get<TF::ReverseSequenceOp>(),
TypeID::get<TF::RightShiftOp>(),
TypeID::get<TF::RintOp>(),
TypeID::get<TF::RollOp>(),
TypeID::get<TF::RoundOp>(),
TypeID::get<TF::SegmentSumV2Op>(),
TypeID::get<TF::SegmentProdV2Op>(),
TypeID::get<TF::SegmentMinV2Op>(),
TypeID::get<TF::SegmentMaxV2Op>(),
TypeID::get<TF::SelectV2Op>(),
TypeID::get<TF::SelfAdjointEigV2Op>(),
TypeID::get<TF::SeluGradOp>(),
TypeID::get<TF::SeluOp>(),
TypeID::get<TF::SigmoidGradOp>(),
TypeID::get<TF::SinOp>(),
TypeID::get<TF::SliceOp>(),
TypeID::get<TF::SoftplusGradOp>(),
TypeID::get<TF::SoftsignGradOp>(),
TypeID::get<TF::SoftsignOp>(),
TypeID::get<TF::SpaceToBatchNDOp>(),
TypeID::get<TF::SpaceToBatchOp>(),
TypeID::get<TF::SpaceToDepthOp>(),
TypeID::get<TF::SparseToDenseOp>(),
TypeID::get<TF::SquareOp>(),
TypeID::get<TF::StatelessMultinomialOp>(),
TypeID::get<TF::StatelessParameterizedTruncatedNormalOp>(),
TypeID::get<TF::StatelessRandomGetAlgOp>(),
TypeID::get<TF::StatelessRandomGetKeyCounterOp>(),
TypeID::get<TF::StatelessRandomGetKeyCounterAlgOp>(),
TypeID::get<TF::StatelessRandomNormalOp>(),
TypeID::get<TF::StatelessRandomNormalV2Op>(),
TypeID::get<TF::StatelessRandomUniformOp>(),
TypeID::get<TF::StatelessRandomUniformFullIntOp>(),
TypeID::get<TF::StatelessRandomUniformFullIntV2Op>(),
TypeID::get<TF::StatelessRandomUniformV2Op>(),
TypeID::get<TF::StatelessRandomUniformIntOp>(),
TypeID::get<TF::StatelessRandomUniformIntV2Op>(),
TypeID::get<TF::StatelessTruncatedNormalOp>(),
TypeID::get<TF::StatelessTruncatedNormalV2Op>(),
TypeID::get<TF::StoreMinibatchStatisticsInFdoOp>(),
TypeID::get<TF::StridedSliceOp>(),
TypeID::get<TF::SubOp>(),
TypeID::get<TF::SvdOp>(),
TypeID::get<TF::TanOp>(),
TypeID::get<TF::TensorScatterAddOp>(),
TypeID::get<TF::TensorScatterSubOp>(),
TypeID::get<TF::TPUEmbeddingActivationsOp>(),
TypeID::get<TF::TopKUniqueOp>(),
TypeID::get<TF::TopKWithUniqueOp>(),
TypeID::get<TF::TransposeOp>(),
TypeID::get<TF::TridiagonalSolveOp>(),
TypeID::get<TF::TridiagonalMatMulOp>(),
TypeID::get<TF::TruncateDivOp>(),
TypeID::get<TF::TruncatedNormalOp>(),
TypeID::get<TF::TruncateModOp>(),
TypeID::get<TF::UniqueOp>(),
TypeID::get<TF::UnpackOp>(),
TypeID::get<TF::UpperBoundOp>(),
TypeID::get<TF::WhereOp>(),
TypeID::get<TF::XlaSendTPUEmbeddingGradientsOp>(),
TypeID::get<TF::XlaBroadcastHelperOp>(),
TypeID::get<TF::XlaCallModuleOp>(),
TypeID::get<TF::XlaCustomCallV2Op>(),
TypeID::get<TF::XlaDynamicUpdateSliceOp>(),
TypeID::get<TF::XlaKeyValueSortOp>(),
TypeID::get<TF::XlaPadOp>(),
TypeID::get<TF::XlaSetBoundOp>(),
TypeID::get<TF::XlaSetDynamicDimensionSizeOp>(),
TypeID::get<TF::XlaSparseCoreAdagradMomentumOp>(),
TypeID::get<TF::XlaSparseCoreAdagradOp>(),
TypeID::get<TF::XlaSparseCoreAdamOp>(),
TypeID::get<TF::XlaSparseCoreFtrlOp>(),
TypeID::get<TF::XlaSparseCoreSgdOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithAdagradAndCsrInputOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradMomentumAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithAdamAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithFtrlAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithSgdAndCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulWithCsrInputOp>(),
TypeID::get<TF::XlaSparseDenseMatmulWithStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdagradMomentumAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithAdamAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithFtrlAndStaticBufferSizeOp>(),
TypeID::get<
TF::XlaSparseDenseMatmulGradWithSgdAndStaticBufferSizeOp>(),
TypeID::get<TF::XlaSparseDenseMatmulGradWithCsrInputOp>(),
TypeID::get<TF::XlaSpmdFullToShardShapeOp>(),
TypeID::get<TF::XlaSpmdShardToFullShapeOp>(),
TypeID::get<TF::XlaSvdOp>(),
};
for (auto op_type_id :
TF::TPUEmbeddingOpsRegistry::Global().GetOpsTypeIds()) {
ops_set->insert(op_type_id);
}
return ops_set;
}();
return ops->count(type_id);
}
bool IsOpTypeAllowedTf2XlaPreferred(const TypeID& type_id) {
static auto* ops =
new llvm::SmallDenseSet<mlir::TypeID, 512>{
TypeID::get<TF::AllOp>(),
TypeID::get<TF::AllToAllOp>(),
TypeID::get<TF::AnyOp>(),
TypeID::get<TF::AvgPoolOp>(),
TypeID::get<TF::AvgPool3DGradOp>(),
TypeID::get<TF::AvgPoolGradOp>(),
TypeID::get<TF::BatchToSpaceNDOp>(),
TypeID::get<TF::BitcastOp>(),
TypeID::get<TF::BroadcastToOp>(),
TypeID::get<TF::CollectivePermuteOp>(),
TypeID::get<TF::ComplexOp>(),
TypeID::get<TF::ConcatV2Op>(),
TypeID::get<TF::ConjOp>(),
TypeID::get<TF::Conv2DOp>(),
TypeID::get<TF::Conv2DBackpropFilterOp>(),
TypeID::get<TF::Conv2DBackpropInputOp>(),
TypeID::get<TF::Conv3DOp>(),
TypeID::get<TF::Conv3DBackpropFilterV2Op>(),
TypeID::get<TF::Conv3DBackpropInputV2Op>(),
TypeID::get<TF::CumprodOp>(),
TypeID::get<TF::CumsumOp>(),
TypeID::get<TF::DepthwiseConv2dNativeOp>(),
TypeID::get<TF::DivOp>(),
TypeID::get<TF::DynamicStitchOp>(),
TypeID::get<TF::_EagerConstOp>(),
TypeID::get<TF::EmptyOp>(),
TypeID::get<TF::ExpandDimsOp>(),
TypeID::get<TF::FakeQuantWithMinMaxVarsOp>(),
TypeID::get<TF::FillOp>(),
TypeID::get<TF::FusedBatchNormOp>(),
TypeID::get<TF::FusedBatchNormGradOp>(),
TypeID::get<TF::FusedBatchNormGradV2Op>(),
TypeID::get<TF::FusedBatchNormV2Op>(),
TypeID::get<TF::_FusedConv2DOp>(),
TypeID::get<TF::GatherNdOp>(),
TypeID::get<TF::GatherV2Op>(),
TypeID::get<TF::GreaterEqualOp>(),
TypeID::get<TF::IdentityOp>(),
TypeID::get<TF::IdentityNOp>(),
TypeID::get<TF::InplaceUpdateOp>(),
TypeID::get<TF::InvertPermutationOp>(),
TypeID::get<TF::IRFFTOp>(),
TypeID::get<TF::L2LossOp>(),
TypeID::get<TF::LegacyCallOp>(),
TypeID::get<TF::LessEqualOp>(),
TypeID::get<TF::LinSpaceOp>(),
TypeID::get<TF::LogicalOrOp>(),
TypeID::get<TF::MaxOp>(),
TypeID::get<TF::MaximumOp>(),
TypeID::get<TF::MaxPoolOp>(),
TypeID::get<TF::MaxPool3DOp>(),
TypeID::get<TF::MeanOp>(),
TypeID::get<TF::MinOp>(),
TypeID::get<TF::MinimumOp>(),
TypeID::get<TF::MulNoNanOp>(),
TypeID::get<TF::OneHotOp>(),
TypeID::get<TF::OnesLikeOp>(),
TypeID::get<TF::PackOp>(),
TypeID::get<TF::PadV2Op>(),
TypeID::get<TF::ParallelDynamicStitchOp>(),
TypeID::get<TF::PartitionedCallOp>(),
TypeID::get<TF::ProdOp>(),
TypeID::get<TF::QrOp>(),
TypeID::get<TF::RandomStandardNormalOp>(),
TypeID::get<TF::RandomUniformOp>(),
TypeID::get<TF::RangeOp>(),
TypeID::get<TF::ReshapeOp>(),
TypeID::get<TF::ReverseV2Op>(),
TypeID::get<TF::RFFTOp>(),
TypeID::get<TF::RsqrtGradOp>(),
TypeID::get<TF::ScatterNdOp>(),
TypeID::get<TF::ShapeOp>(),
TypeID::get<TF::SinhOp>(),
TypeID::get<TF::SizeOp>(),
TypeID::get<TF::SliceOp>(),
TypeID::get<TF::SoftmaxCrossEntropyWithLogitsOp>(),
TypeID::get<TF::SoftplusOp>(),
TypeID::get<TF::SparseMatMulOp>(),
TypeID::get<TF::SparseSoftmaxCrossEntropyWithLogitsOp>(),
TypeID::get<TF::SplitOp>(),
TypeID::get<TF::SplitVOp>(),
TypeID::get<TF::SqrtGradOp>(),
TypeID::get<TF::SquaredDifferenceOp>(),
TypeID::get<TF::SqueezeOp>(),
TypeID::get<TF::StatelessParameterizedTruncatedNormalOp>(),
TypeID::get<TF::StatefulPartitionedCallOp>(),
TypeID::get<TF::StopGradientOp>(),
TypeID::get<TF::StridedSliceOp>(),
TypeID::get<TF::StridedSliceGradOp>(),
TypeID::get<TF::SumOp>(),
TypeID::get<TF::TanhGradOp>(),
TypeID::get<TF::TensorScatterUpdateOp>(),
TypeID::get<TF::TileOp>(),
TypeID::get<TF::TopKV2Op>(),
TypeID::get<TF::_UnaryOpsCompositionOp>(),
TypeID::get<TF::UnsortedSegmentMaxOp>(),
TypeID::get<TF::UnsortedSegmentMinOp>(),
TypeID::get<TF::UnsortedSegmentProdOp>(),
TypeID::get<TF::UnsortedSegmentSumOp>(),
TypeID::get<TF::XdivyOp>(),
TypeID::get<TF::XlaSendTPUEmbeddingGradientsOp>(),
TypeID::get<TF::XlaAllReduceOp>(),
TypeID::get<TF::XlaGatherOp>(),
TypeID::get<TF::Xlog1pyOp>(),
TypeID::get<TF::XlogyOp>(),
TypeID::get<TF::ZerosLikeOp>(),
TypeID::get<TF::ZetaOp>(),
};
return ops->contains(type_id);
}
const llvm::DenseSet<mlir::TypeID>& DynamicTensorflowOps() {
static const llvm::DenseSet<mlir::TypeID>* ops =
new llvm::DenseSet<mlir::TypeID>{
TypeID::get<mlir::TF::DynamicPartitionOp>(),
TypeID::get<mlir::TF::UniqueOp>(),
TypeID::get<mlir::TF::WhereOp>(),
TypeID::get<mlir::TF::XlaSetDynamicDimensionSizeOp>(),
};
return *ops;
}
}
bool HasTf2XlaFallback(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaFallback(type_id) ||
IsOpTypeAllowedTf2XlaPreferred(type_id);
}
bool IsOpLegalizedWithMlir(Operation& op) {
auto abstractOp = op.getRegisteredInfo();
if (!abstractOp) return false;
return IsTypeLegalizedWithMlir(abstractOp->getTypeID());
}
bool IsTypeLegalizedWithMlir(const TypeID& type_id) {
return MlirAlwaysOps().contains(type_id);
}
bool IsOpAllowedTf2xlaFallback(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaFallback(type_id);
}
bool IsOpAllowedTf2xlaPreferred(const TypeID& type_id) {
return IsOpTypeAllowedTf2XlaPreferred(type_id);
}
bool IsDynamicPadderOp(const TypeID& type_id) {
return DynamicTensorflowOps().contains(type_id);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include <optional>
#include <set>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/TypeID.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
using func::FuncOp;
using mlir::ModuleOp;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<3xi64> {tf._user_specified_name = "resource", tf.aliasing_output = 3 : i64}) -> () attributes {tf.entry_function = {control_outputs = "stateful_normal/RngReadAndSkip,stateful_uniform/RngReadAndSkip,stateful_uniform_full_int/RngReadAndSkip", inputs = "stateful_normal_rngreadandskip_resource", outputs = "identity_RetVal,identity_1_RetVal,identity_2_RetVal"}} {
%0:3 = "tf.Unpack"(%arg0) {axis = 0 : i64} : (tensor<3xi64>) -> (tensor<i64>, tensor<i64>, tensor<i64>)
return
}
})";
class LegalizationOpConfigTest : public ::testing::Test {
public:
absl::Status CreateMlirModule(std::string module_string = kMlirModuleStr) {
TF_ASSIGN_OR_RETURN(
module_, test::GetMlirModuleFromString(module_string, &context_));
context_.loadAllAvailableDialects();
return absl::OkStatus();
}
absl::StatusOr<FuncOp> GetMain() {
func::FuncOp main = module_->lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return absl::NotFoundError("Could not find main function");
}
return main;
}
protected:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
};
TEST_F(LegalizationOpConfigTest, FailsWithExpectsLegalizationWithMlir) {
TF_EXPECT_OK(CreateMlirModule());
EXPECT_FALSE(IsOpLegalizedWithMlir(*module_->getOperation()));
}
TEST_F(LegalizationOpConfigTest, ExpectsFalseForNonMlirOps) {
TF_EXPECT_OK(CreateMlirModule());
TF_ASSERT_OK_AND_ASSIGN(FuncOp main, GetMain());
main.walk([&](Operation* op) { EXPECT_FALSE(IsOpLegalizedWithMlir(*op)); });
}
TEST_F(LegalizationOpConfigTest, ExpectsTrueForMlirTypeID) {
EXPECT_TRUE(IsTypeLegalizedWithMlir(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(HasTf2XlaFallback(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(IsOpAllowedTf2xlaFallback(TypeID::get<TF::ModOp>()));
EXPECT_FALSE(IsOpAllowedTf2xlaPreferred(TypeID::get<TF::ModOp>()));
}
TEST_F(LegalizationOpConfigTest, ExpectsTrueForTF2XLATypeID) {
EXPECT_TRUE(HasTf2XlaFallback(TypeID::get<TF::AllOp>()));
EXPECT_TRUE(IsOpAllowedTf2xlaPreferred(TypeID::get<TF::AllOp>()));
EXPECT_FALSE(IsTypeLegalizedWithMlir(TypeID::get<TF::AllOp>()));
}
TEST_F(LegalizationOpConfigTest, ChecksDynamicPadderOps) {
EXPECT_TRUE(
IsDynamicPadderOp(TypeID::get<TF::XlaSetDynamicDimensionSizeOp>()));
EXPECT_FALSE(IsDynamicPadderOp(TypeID::get<TF::ConstOp>()));
}
TEST_F(LegalizationOpConfigTest, CountLoweringsSet) {
int mlir_lowering_count = 0;
int tf2xla_fallback_count = 0;
int non_categorized_count = 0;
DialectRegistry dialect_registry;
dialect_registry.insert<mlir::TF::TensorFlowDialect>();
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
for (auto operation : context.getRegisteredOperations()) {
if (IsTypeLegalizedWithMlir(operation.getTypeID())) {
mlir_lowering_count++;
} else if (HasTf2XlaFallback(operation.getTypeID())) {
tf2xla_fallback_count++;
} else {
non_categorized_count++;
}
}
EXPECT_EQ(mlir_lowering_count, 67);
EXPECT_EQ(tf2xla_fallback_count, 323);
EXPECT_EQ(non_categorized_count, 430);
}
TEST_F(LegalizationOpConfigTest, CountTypesWhichHaveBothMlirAndTf2xlaFallback) {
int double_lowering_count = 0;
DialectRegistry dialect_registry;
dialect_registry.insert<mlir::TF::TensorFlowDialect>();
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
for (auto operation : context.getRegisteredOperations()) {
if (IsTypeLegalizedWithMlir(operation.getTypeID()) &&
HasTf2XlaFallback(operation.getTypeID())) {
double_lowering_count++;
}
}
EXPECT_EQ(double_lowering_count, 1);
}
TEST_F(LegalizationOpConfigTest, CountAllMlirLoweringPatterns) {
DialectRegistry dialect_registry;
mlir::RegisterCommonToolingDialects(dialect_registry);
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
RewritePatternSet mlir_legalize_lower_patterns(&context);
PopulateLegalizeTfPatterns(&context, &mlir_legalize_lower_patterns);
int mlir_only_patterns = 0;
for (auto& pattern : mlir_legalize_lower_patterns.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
if (!pat_op_name) {
continue;
}
if (!HasTf2XlaFallback(pat_op_name->getRegisteredInfo()->getTypeID())) {
mlir_only_patterns++;
}
}
EXPECT_EQ(mlir_only_patterns, 63);
}
TEST_F(LegalizationOpConfigTest, MlirLoweringWithoutXlaKernel) {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
std::vector<const tensorflow::KernelDef*> kernel_defs =
tensorflow::XlaOpRegistry::DeviceKernels(
tensorflow::DEVICE_CPU_XLA_JIT,
true);
std::set<std::string> xla_op_kernels;
for (auto kernel_def : kernel_defs) {
std::string tf_name = "tf." + kernel_def->op();
xla_op_kernels.insert(tf_name);
}
DialectRegistry dialect_registry;
mlir::RegisterCommonToolingDialects(dialect_registry);
MLIRContext context(dialect_registry);
context.loadAllAvailableDialects();
RewritePatternSet mlir_legalize_lower_patterns(&context);
PopulateLegalizeTfPatterns(&context, &mlir_legalize_lower_patterns);
int mlir_without_xla_count = 0;
for (auto& pattern : mlir_legalize_lower_patterns.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
if (!pat_op_name) {
continue;
}
if (xla_op_kernels.find(pat_op_name->getStringRef().str()) ==
xla_op_kernels.end()) {
mlir_without_xla_count++;
}
}
EXPECT_EQ(mlir_without_xla_count, 13);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
aa86dfbf-cb18-4df6-841b-8d5d3aa12dbf | cpp | tensorflow/tensorflow | tf2xla_rewriter | tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc | tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc | #include "tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h"
#include <cstdint>
#include <memory>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tpu_embedding_ops_registry.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "tensorflow/compiler/tf2xla/xla_expression.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_function_importer.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/service/hlo.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/common_runtime/process_function_library_runtime.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
namespace {
using ::mlir::ModuleOp;
using ::tensorflow::Tensor;
using ::tsl::StatusOr;
using ::xla::XlaComputation;
class OpOrArgLocNameMapperWithoutInvalidCharacters
: public tensorflow::OpOrArgLocNameMapper {
public:
OpOrArgLocNameMapperWithoutInvalidCharacters() = default;
~OpOrArgLocNameMapperWithoutInvalidCharacters() override = default;
protected:
std::string GetName(tensorflow::OpOrVal op_or_val) override {
std::string name = OpOrArgLocNameMapper::GetName(op_or_val);
return absl::StrReplaceAll(name, {{";", "."}});
}
};
static std::unique_ptr<tensorflow::StaticDeviceMgr> CreateDeviceMgr(
const std::string& device_type) {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
auto device = std::make_unique<tensorflow::XlaCompilationDevice>(
tensorflow::SessionOptions(), tensorflow::DeviceType(device_type));
return std::make_unique<tensorflow::StaticDeviceMgr>(std::move(device));
}
bool RootInstructionIsTuple(const xla::HloModule& hlo_module) {
xla::HloInstruction* root_instruction =
hlo_module.entry_computation()->root_instruction();
return root_instruction->opcode() == xla::HloOpcode::kTuple;
}
};
LogicalResult Tf2XlaRewriter::RewriteOp(Operation* op,
PatternRewriter& rewriter,
const std::string& device_type) {
Tf2XlaRewriter tf2xla_rewriter(op, rewriter, device_type);
return tf2xla_rewriter.LegalizeOp();
}
Tf2XlaRewriter::Tf2XlaRewriter(Operation* op, PatternRewriter& rewriter,
const std::string& device_type)
: op_(op),
device_type_(device_type),
rewriter_(rewriter),
name_mapper_(
std::make_unique<OpOrArgLocNameMapperWithoutInvalidCharacters>()),
context_(nullptr),
xla_builder_(op_->getName().getStringRef().str()) {}
Tf2XlaRewriter::~Tf2XlaRewriter() {
if (context_) context_->Unref();
}
absl::StatusOr<mhlo::TupleOp> Tf2XlaRewriter::ImportXlaComputation(
XlaComputation& computation) {
xla::DebugOptions debug_options;
TF_ASSIGN_OR_RETURN(auto hlo_module_config,
xla::HloModule::CreateModuleConfigFromProto(
computation.proto(), debug_options));
TF_ASSIGN_OR_RETURN(
std::unique_ptr<xla::HloModule> hlo_module,
xla::HloModule::CreateFromProto(computation.proto(), hlo_module_config));
if (!RootInstructionIsTuple(*hlo_module)) {
return tsl::errors::InvalidArgument("Imported XLA Root is not a tuple op");
}
if (op_->getNumOperands() !=
hlo_module->entry_computation()->num_parameters()) {
return tsl::errors::InvalidArgument(
"Entry computation does not have equal number of parameters to op "
"operands");
}
ModuleOp mlir_module = op_->getParentOfType<ModuleOp>();
mlir::OpBuilder builder(op_);
mlir::SymbolTable symbol_table(mlir_module);
llvm::SmallVector<mlir::Value> arguments;
for (int i = 0; i < op_->getNumOperands(); i++) {
arguments.push_back(op_->getOperand(i));
}
TF_ASSIGN_OR_RETURN(
mlir::Value root_value,
xla::HloFunctionImporter::ImportInstructions(
*hlo_module->entry_computation(), arguments, symbol_table, &builder));
mhlo::TupleOp root_tuple =
mlir::dyn_cast_or_null<mhlo::TupleOp>(root_value.getDefiningOp());
if (!root_tuple) {
return tsl::errors::InvalidArgument(
"Imported XLA Root Value is not a tuple op");
}
return root_tuple;
}
LogicalResult Tf2XlaRewriter::PrepareParams() {
context_ = new tensorflow::XlaContext(nullptr, &xla_builder_,
nullptr);
context_->Ref();
device_mgr_ = CreateDeviceMgr(device_type_);
if (!device_mgr_) return failure();
device_ = device_mgr_->ListDevices().front();
params_.device = device_;
params_.resource_manager = device_->resource_manager();
auto cleanup = [](const std::string& name) {};
step_container_ = std::make_unique<tensorflow::ScopedStepContainer>(
0, cleanup);
absl::Status status = step_container_->Create(
device_->resource_manager(),
tensorflow::XlaContext::kXlaContextResourceName, context_);
if (!status.ok()) {
return emitRemark(op_->getLoc())
<< "failed to create XlaContext resource: " << status.ToString();
}
params_.step_container = step_container_.get();
absl::StatusOr<int64_t> version_or = tensorflow::GetTfGraphProducerVersion(
op_->getParentOfType<mlir::ModuleOp>());
if (!version_or.ok()) {
return emitError(op_->getLoc()) << version_or.status().ToString();
}
flib_def_ = std::make_unique<tensorflow::FunctionLibraryDefinition>(
tensorflow::OpRegistry::Global(), tensorflow::FunctionDefLibrary());
pflr_ = std::make_unique<tensorflow::ProcessFunctionLibraryRuntime>(
device_mgr_.get(), tensorflow::Env::Default(), nullptr,
version_or.value(), flib_def_.get(), tensorflow::OptimizerOptions());
params_.function_library = pflr_->GetFLR(device_->name());
return success();
}
bool IsBounded(Type ty) {
auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty);
if (!ranked_ty) return false;
if (ranked_ty.hasStaticShape()) return true;
auto encoding =
mlir::dyn_cast_or_null<TypeExtensionsAttr>(ranked_ty.getEncoding());
if (!encoding) return false;
for (int i = 0; i < ranked_ty.getRank(); ++i) {
if (ranked_ty.isDynamicDim(i) &&
encoding.getBounds()[i] == ShapedType::kDynamic) {
return false;
}
}
return true;
}
bool HasSymbolRefAttr(Operation* op) {
for (const auto& attr : op->getAttrs()) {
Attribute attr_value = attr.getValue();
if (mlir::isa<SymbolRefAttr>(attr_value)) {
return true;
} else if (auto array_attr = mlir::dyn_cast<ArrayAttr>(attr_value)) {
if (!array_attr.empty() &&
mlir::isa<SymbolRefAttr>(*array_attr.begin())) {
return true;
}
}
}
return false;
}
LogicalResult Tf2XlaRewriter::PrepareKernelInputs(
const llvm::SmallDenseSet<int>& required_consts,
std::vector<tensorflow::XlaExpression>& expressions,
std::vector<tensorflow::Tensor>& tensors,
std::vector<tensorflow::TensorValue>& inputs) {
for (auto it : llvm::enumerate(op_->getOperands())) {
Value operand = it.value();
size_t idx = it.index();
tensorflow::XlaExpression expr = GetExprForOperand(operand, op_, idx);
tensorflow::XlaExpression::Kind kind = expr.kind();
if (kind == tensorflow::XlaExpression::Kind::kInvalid) return failure();
expressions.push_back(expr);
if (!tensorflow::DataTypeCanUseMemcpy(expr.dtype())) {
return op_->emitRemark()
<< "skipping legalization due to unsupported type "
<< operand.getType();
}
auto shape_or = expr.GetShape();
if (!shape_or.ok()) {
return op_->emitRemark()
<< "failed to get shape for expression. " << expr.HumanString();
}
tensors.emplace_back(
device_->GetAllocator(tensorflow::AllocatorAttributes()), expr.dtype(),
shape_or.value());
tensorflow::Tensor& tensor = tensors.back();
tensorflow::XlaExpression::AssignExpressionToTensor(expr, &tensor);
inputs.emplace_back(&tensor);
}
return success();
}
LogicalResult Tf2XlaRewriter::LegalizeOp() {
for (Type ty : op_->getOperandTypes()) {
auto ranked_ty = mlir::dyn_cast<ShapedType>(ty);
if (!IsBounded(ranked_ty)) {
return op_->emitRemark()
<< "lowering requires bounded tensor operands " << ranked_ty;
}
}
if (HasSymbolRefAttr(op_)) {
return op_->emitRemark() << "ops with symbol references are not supported";
}
auto nodedef_or = tensorflow::ConvertTFDialectOpToNodeDef(
op_, name_mapper_->GetUniqueName(op_),
true);
if (!nodedef_or.ok()) {
return op_->emitRemark() << "failed to convert op to NodeDef: "
<< nodedef_or.status().ToString();
}
if (failed(PrepareParams())) return failure();
std::shared_ptr<const tensorflow::NodeProperties> props;
absl::Status status = tensorflow::NodeProperties::CreateFromNodeDef(
*nodedef_or.value(),
params_.function_library->GetFunctionLibraryDefinition(), &props);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to create NodeProperties: " << status.ToString();
}
tensorflow::OpKernel* op_kernel_raw;
status = params_.function_library->CreateKernel(props, &op_kernel_raw);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to create tf2xla kernel: " << status.ToString();
}
auto op_kernel = absl::WrapUnique(op_kernel_raw);
std::vector<int> required_constants;
status = tensorflow::XlaOpRegistry::CompileTimeConstantInputs(
*op_kernel, &required_constants);
if (!status.ok()) {
return op_->emitRemark()
<< "failed to compute required constants: " << status.ToString();
}
llvm::SmallDenseSet<int> required_consts;
required_consts.insert(required_constants.begin(), required_constants.end());
std::vector<tensorflow::XlaExpression> expressions;
std::vector<tensorflow::Tensor> tensors;
std::vector<tensorflow::TensorValue> inputs;
expressions.reserve(op_->getNumOperands());
tensors.reserve(op_->getNumOperands());
inputs.reserve(op_->getNumOperands());
if (failed(
PrepareKernelInputs(required_consts, expressions, tensors, inputs)))
return failure();
params_.inputs = inputs;
params_.op_kernel = op_kernel.get();
llvm::SmallVector<tensorflow::AllocatorAttributes, 4> output_attr(
op_->getNumResults());
params_.output_attr_array = output_attr.data();
tensorflow::OpKernelContext op_context(¶ms_, op_->getNumResults());
device_->Compute(params_.op_kernel, &op_context);
status = op_context.status();
if (!status.ok()) {
return op_->emitRemark()
<< "compilation to HLO failed: " << status.ToString();
}
if (failed(VerifyOpResults(op_context))) return failure();
absl::StatusOr<mhlo::TupleOp> tuple_result_or_status =
CompileWithHloImporter(op_context);
if (!tuple_result_or_status.ok()) {
return op_->emitRemark() << tuple_result_or_status.status().ToString();
}
mhlo::TupleOp tuple_result = tuple_result_or_status.value();
llvm::SmallVector<Value> output_values;
if (failed(GetKernelOutputs(op_context, tuple_result, output_values))) {
return failure();
}
rewriter_.replaceOp(op_, output_values);
return success();
}
absl::StatusOr<mhlo::TupleOp> Tf2XlaRewriter::CompileWithHloImporter(
tensorflow::OpKernelContext& op_context) {
std::vector<xla::XlaOp> output_values;
for (int i = 0, e = op_->getNumResults(); i < e; i++) {
tensorflow::Tensor* output = op_context.mutable_output(i);
const tensorflow::XlaExpression* expr =
tensorflow::XlaExpression::CastExpressionFromTensor(*output);
output_values.push_back(expr->AsXlaOp(&xla_builder_));
}
absl::Span<const xla::XlaOp> return_values(output_values);
xla::XlaOp root_value = xla::Tuple(&xla_builder_, return_values);
TF_ASSIGN_OR_RETURN(XlaComputation computation,
xla_builder_.Build(root_value,
false));
return ImportXlaComputation(computation);
}
mlir::LogicalResult Tf2XlaRewriter::VerifyOpResults(
tensorflow::OpKernelContext& op_context) {
for (int i = 0, e = op_->getNumResults(); i < e; i++) {
tensorflow::Tensor* output = op_context.mutable_output(i);
const tensorflow::XlaExpression* expr =
tensorflow::XlaExpression::CastExpressionFromTensor(*output);
if (expr->kind() != tensorflow::XlaExpression::Kind::kXlaOp &&
expr->kind() != tensorflow::XlaExpression::Kind::kConstant) {
return op_->emitRemark(absl::StrCat(
"expects XlaExpression of kind kXlaOp or kConstant in compiled "
"output index ",
i));
}
}
return success();
}
mlir::LogicalResult Tf2XlaRewriter::UnpackTupleResults(
mhlo::TupleOp tuple_result, llvm::SmallVector<Value>& outputs) {
if (tuple_result->getNumOperands() != op_->getNumResults()) {
return op_->emitRemark() << "Translated TF2XLA tuple has different "
"number of results than original op";
}
for (int i = 0; i < tuple_result->getNumOperands(); i++) {
outputs.push_back(tuple_result->getOperand(i));
}
tuple_result.getOperation()->erase();
return success();
}
mlir::LogicalResult Tf2XlaRewriter::GetKernelOutputs(
tensorflow::OpKernelContext& op_context, mhlo::TupleOp tuple_results,
llvm::SmallVector<Value>& outputs) {
outputs.reserve(op_->getNumResults());
return UnpackTupleResults(tuple_results, outputs);
}
tensorflow::XlaExpression Tf2XlaRewriter::GetExprForOperand(
Value operand, Operation* op, int64_t operand_index) {
ElementsAttr const_attr;
auto defining_op = operand.getDefiningOp();
::xla::XlaOp xla_op = xla::Parameter(&xla_builder_, operand_index,
xla::TypeToShape(operand.getType()),
std::to_string(operand_index));
if (defining_op && matchPattern(defining_op, m_Constant(&const_attr))) {
tensorflow::Tensor tensor;
auto status = tensorflow::ConvertToTensor(const_attr, &tensor);
if (!status.ok()) {
op->emitRemark() << "skipping legalization due to failed const conversion"
<< status.ToString();
return tensorflow::XlaExpression::Invalid();
}
return tensorflow::XlaExpression::Constant(tensor);
}
tensorflow::DataType dtype;
auto status = tensorflow::ConvertToDataType(operand.getType(), &dtype);
if (!status.ok()) {
op->emitRemark() << "skipping legalization due to " << status.ToString();
return tensorflow::XlaExpression::Invalid();
}
return tensorflow::XlaExpression::XlaOp(xla_op, dtype);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace mhlo {
using ::mlir::LogicalResult;
using ::mlir::ModuleOp;
using ::mlir::OpBuilder;
using ::mlir::Operation;
using ::mlir::func::FuncOp;
using ::tsl::Status;
using ::tsl::StatusOr;
using ::xla::ReplicaGroup;
using ::xla::ShapeUtil;
using ::xla::XlaBuilder;
using ::xla::XlaComputation;
using ::xla::XlaOp;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<3xi64> {tf._user_specified_name = "resource", tf.aliasing_output = 3 : i64}) -> () attributes {tf.entry_function = {control_outputs = "stateful_normal/RngReadAndSkip,stateful_uniform/RngReadAndSkip,stateful_uniform_full_int/RngReadAndSkip", inputs = "stateful_normal_rngreadandskip_resource", outputs = "identity_RetVal,identity_1_RetVal,identity_2_RetVal"}} {
%0:3 = "tf.Unpack"(%arg0) {axis = 0 : i64} : (tensor<3xi64>) -> (tensor<i64>, tensor<i64>, tensor<i64>)
return
}
})";
XlaComputation GetTestXlaComputation() {
XlaBuilder xla_builder("test");
auto param =
Parameter(&xla_builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a");
XlaOp add = xla::Add(param, xla::ConstantR0<float>(&xla_builder, 2.0));
std::vector<XlaOp> tuple_values;
tuple_values.push_back(add);
xla::Tuple(&xla_builder, tuple_values);
return xla_builder.Build().value();
}
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& other_builder)
: mlir::PatternRewriter(other_builder) {}
~EmptyPatternRewriter() override = default;
};
class Tf2XlaRewriterTestPeer {
public:
explicit Tf2XlaRewriterTestPeer() = delete;
explicit Tf2XlaRewriterTestPeer(mlir::Operation* op)
: op_builder_(op),
empty_rewriter_(op_builder_),
tf2xla_rewriter_(op, empty_rewriter_,
"XLA_CPU_JIT") {}
absl::StatusOr<TupleOp> ImportXlaComputationIntoModule(
XlaComputation& computation) {
return tf2xla_rewriter_.ImportXlaComputation(computation);
}
private:
OpBuilder op_builder_;
EmptyPatternRewriter empty_rewriter_;
Tf2XlaRewriter tf2xla_rewriter_;
};
class Tf2XlaRewriterTest : public ::testing::Test {
public:
void SetUp() override {
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
}
Status CreateMlirModule(std::string module_string = kMlirModuleStr) {
TF_ASSIGN_OR_RETURN(
module_, test::GetMlirModuleFromString(module_string, &context_));
context_.loadAllAvailableDialects();
return absl::OkStatus();
}
Status LegalizeSingleOp(Operation& op) {
SourceMgrDiagnosticHandler sourceMgrHandler(source_manager_, &context_);
OpBuilder op_builder(&op);
EmptyPatternRewriter pattern_rewriter(op_builder);
LogicalResult result =
Tf2XlaRewriter::RewriteOp(&op, pattern_rewriter,
"XLA_CPU_JIT");
if (!result.succeeded()) {
return tsl::errors::Internal("Failed to rewrite op");
}
return absl::OkStatus();
}
Status LegalizeModule(std::string module_string = kMlirModuleStr) {
TF_EXPECT_OK(CreateMlirModule(module_string));
FuncOp main = module_->lookupSymbol<mlir::func::FuncOp>("main");
if (!main) {
return tsl::errors::InvalidArgument("Could not find a main function");
}
WalkResult walk_result = main.walk([&](Operation* op) {
if (op->getDialect()->getNamespace() !=
TF::TensorFlowDialect::getDialectNamespace()) {
return WalkResult::advance();
}
if (!LegalizeSingleOp(*op).ok()) {
return WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
return tsl::errors::Internal("Could not legalize all ops");
}
return absl::OkStatus();
}
mlir::func::FuncOp GetMainFunc() {
func::FuncOp main_func = module_->lookupSymbol<mlir::func::FuncOp>("main");
EXPECT_TRUE(main_func);
return main_func;
}
mlir::Operation& GetFirstOpFromMain() {
mlir::func::FuncOp main_func = GetMainFunc();
return main_func.getBody().front().front();
}
absl::StatusOr<TupleOp> ImportXlaComputationIntoModule(
XlaComputation& computation) {
SourceMgrDiagnosticHandler sourceMgrHandler(source_manager_, &context_);
mlir::Operation& first_op = GetFirstOpFromMain();
Tf2XlaRewriterTestPeer test_peer(&first_op);
return test_peer.ImportXlaComputationIntoModule(computation);
}
protected:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
llvm::SourceMgr source_manager_;
};
TEST_F(Tf2XlaRewriterTest, LegalizesOpWithTf2xlaHloImporter) {
TF_EXPECT_OK(LegalizeModule());
int num_tuple_ops = 0;
module_->walk([&num_tuple_ops](TupleOp tuple_op) { num_tuple_ops += 1; });
EXPECT_EQ(num_tuple_ops, 0);
}
TEST_F(Tf2XlaRewriterTest, ImportsXlaComputationIntoModule) {
TF_ASSERT_OK(CreateMlirModule());
XlaComputation computation = GetTestXlaComputation();
TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple,
ImportXlaComputationIntoModule(computation));
ModuleOp parent_module =
root_tuple.getOperation()->getParentOfType<ModuleOp>();
EXPECT_EQ(parent_module, *module_);
}
TEST_F(Tf2XlaRewriterTest, FailsWithoutRootTuple) {
TF_ASSERT_OK(CreateMlirModule());
XlaBuilder xla_builder("test_fail");
xla::Add(xla::ConstantR0<float>(&xla_builder, 1.0),
xla::ConstantR0<float>(&xla_builder, 2.0));
XlaComputation bad_computation = xla_builder.Build().value();
EXPECT_FALSE(ImportXlaComputationIntoModule(bad_computation).ok());
}
TEST_F(Tf2XlaRewriterTest, ImportsSingleComputation) {
XlaBuilder builder("test_builder");
XlaComputation to_apply;
{
auto sub_builder = builder.CreateSubBuilder("add");
auto arg0 = Parameter(sub_builder.get(), 0,
ShapeUtil::MakeScalarShape(xla::F32), "x");
auto arg1 = Parameter(sub_builder.get(), 1,
ShapeUtil::MakeScalarShape(xla::F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto x = Parameter(&builder, 0, ShapeUtil::MakeShape(xla::F32, {4, 16}), "x");
ReplicaGroup group;
group.add_replica_ids(0);
group.add_replica_ids(1);
XlaOp reduce_scatter =
ReduceScatter(x, to_apply, 1, 2,
{group});
std::vector<XlaOp> tuple_values;
tuple_values.push_back(reduce_scatter);
xla::Tuple(&builder, tuple_values);
TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
EXPECT_EQ(computation.proto().computations_size(), 2);
TF_ASSERT_OK(CreateMlirModule());
TF_ASSERT_OK_AND_ASSIGN(TupleOp root_tuple,
ImportXlaComputationIntoModule(computation));
EXPECT_TRUE(root_tuple);
int num_func_ops = 0;
module_->walk([&num_func_ops](func::FuncOp func_op) { num_func_ops++; });
EXPECT_EQ(num_func_ops, 1);
}
TEST_F(Tf2XlaRewriterTest, InsertsConstantParameters) {
static constexpr char kModuleWithConstParam[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1442 : i32}} {
func.func @main(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = "tf.Const"() {value = dense<1.42> : tensor<2xf32>} : () -> tensor<2xf32>
%1 = "tf.Atan2"(%arg0, %0) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
func.return %0 : tensor<2xf32>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithConstParam));
}
TEST_F(Tf2XlaRewriterTest, DoesntEnforceCompileTimeConstantCheck) {
static constexpr char kModuleWithNonConstParam[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main(%arg0: tensor<3x3x10xbf16>, %arg1: tensor<3xi32>) -> tensor<1x?x4xbf16> attributes {allow_soft_placement = false, tf.entry_function = {control_outputs = "", inputs = "_arg0,_arg1,_arg2", outputs = "_retval0"}} {
%cst = "tf.Const"() {value = dense<[1, -1, 4]> : tensor<3xi32>} : () -> tensor<3xi32>
%0 = "tf.Slice"(%arg0, %arg1, %cst) {_XlaHasReferenceVars = false, _xla_inferred_shapes = [#tf_type.shape<1x?x4>], device = "/job:localhost/replica:0/task:0/device:TPU:0"} : (tensor<3x3x10xbf16>, tensor<3xi32>, tensor<3xi32>) -> tensor<1x?x4xbf16>
return %0 : tensor<1x?x4xbf16>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithNonConstParam));
}
TEST_F(Tf2XlaRewriterTest, CreatesDefaultValues) {
static constexpr char kModuleWithOpWithoutValuesThatShouldBeDefaulted[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main() -> tensor<1x2x3x4xf32> attributes {allow_soft_placement = false, tf.entry_function = {control_outputs = "", inputs = "_arg0,_arg1,_arg2", outputs = "_retval0"}} {
%cst = "tf.Const"() {value = dense<[1, 2, 3, 4]> : tensor<4xi32>} : () -> tensor<4xi32>
%0 = "tf.RandomUniform"(%cst) : (tensor<4xi32>) -> tensor<1x2x3x4xf32>
return %0 : tensor<1x2x3x4xf32>
}
})";
TF_ASSERT_OK(LegalizeModule(kModuleWithOpWithoutValuesThatShouldBeDefaulted));
}
TEST_F(Tf2XlaRewriterTest, OpWithLocationDoesntBreakNodeDefName) {
static constexpr char kModuleWithOpWithoutValuesThatShouldBeDefaulted[] =
R"mlir(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1610 : i32}} {
func.func @main(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = "tf.Exp"(%arg0) : (tensor<2xf32>) -> tensor<2xf32> loc(fused["exp"("exp"), "exp"])
func.return %0 : tensor<2xf32>
}
})mlir";
TF_ASSERT_OK(LegalizeModule(kModuleWithOpWithoutValuesThatShouldBeDefaulted));
}
TEST_F(Tf2XlaRewriterTest, ErrorsWithInvalidNumberOfParametersToArgs) {
XlaBuilder builder("test_builder");
XlaComputation to_apply;
{
auto sub_builder = builder.CreateSubBuilder("add");
auto arg0 = Parameter(sub_builder.get(), 0,
ShapeUtil::MakeScalarShape(xla::F32), "x");
auto arg1 = Parameter(sub_builder.get(), 1,
ShapeUtil::MakeScalarShape(xla::F32), "y");
Add(arg0, arg1);
TF_ASSERT_OK_AND_ASSIGN(to_apply, sub_builder->Build());
}
auto a = Parameter(&builder, 0, ShapeUtil::MakeScalarShape(xla::F32), "a");
auto b = Parameter(&builder, 1, ShapeUtil::MakeScalarShape(xla::F32), "b");
XlaOp call_op = xla::Call(&builder, to_apply, {a, b});
std::vector<XlaOp> tuple_values;
tuple_values.push_back(call_op);
xla::Tuple(&builder, tuple_values);
TF_ASSERT_OK_AND_ASSIGN(XlaComputation computation, builder.Build());
EXPECT_EQ(computation.proto().computations_size(), 2);
TF_ASSERT_OK(CreateMlirModule());
absl::StatusOr<TupleOp> status_or_tuple_op =
ImportXlaComputationIntoModule(computation);
EXPECT_FALSE(status_or_tuple_op.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
52944e6e-66f7-4103-9d2a-a920c7a49650 | cpp | tensorflow/tensorflow | verify_tfxla_legalization | tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization.cc | tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc | #include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/platform/errors.h"
namespace mlir {
namespace mhlo {
namespace {
#define GEN_PASS_DEF_VERIFYTFXLALEGALIZATION
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
auto* mlir_failed_legalization_op_count =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_failed_legalization_op_count",
"Counts which op fails to legalize", "op_name");
auto* mlir_non_static_op_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_non_static_op_count",
"Counts which ops do not have static results", "op_name");
auto* mlir_non_static_op_skip_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/"
"mlir_second_phase_non_static_op_skip_count",
"Counts skipped ops which do not have static results", "op_name");
static const char* kMustBeConstantError =
"must have compile-time constant inputs and outputs.\n\n"
"XLA compilation requires that operator arguments that represent shapes or "
"dimensions be evaluated to concrete values at compile time. This error "
"means that a shape or dimension argument could not be evaluated at "
"compile time, usually because the value of the argument depends on a "
"parameter to the computation, on a variable, or on a stateful operation "
"such as a random number generator.";
static const DenseSet<mlir::TypeID>* operations_to_skip =
new DenseSet<mlir::TypeID>{mlir::TypeID::get<mhlo::EinsumOp>()};
class VerifyTFXLALegalization
: public impl::VerifyTFXLALegalizationBase<VerifyTFXLALegalization> {
public:
explicit VerifyTFXLALegalization(bool legalize_chlo) {
legalize_chlo_ = legalize_chlo;
}
void runOnOperation() override;
};
static void IncrementCounterFor(tensorflow::monitoring::Counter<1>* counter,
Operation* op) {
counter->GetCell(op->getName().getStringRef().str())->IncrementBy(1);
}
bool HasBounds(RankedTensorType type) {
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
type.getEncoding());
return (encoding && !encoding.getBounds().empty());
}
bool HasStaticShapeOrBounded(Value val) {
auto type = val.getType();
if (mlir::isa<UnrankedTensorType>(type)) {
return false;
}
if (mlir::isa<RankedTensorType>(type)) {
auto ranked_tensor = mlir::dyn_cast<RankedTensorType>(type);
if (ranked_tensor.hasStaticShape()) {
return true;
}
return HasBounds(ranked_tensor);
}
return true;
}
bool EmitMustBeConstantError(Operation* op) {
if (operations_to_skip->contains(op->getRegisteredInfo()->getTypeID())) {
IncrementCounterFor(mlir_non_static_op_skip_count, op);
return true;
}
emitError(op->getLoc()) << absl::StrCat(
"Node `", op->getName().getStringRef().str(), "` ", kMustBeConstantError);
return false;
}
bool IsStaticOperation(Operation* op) {
for (auto o : op->getResults()) {
if (!HasStaticShapeOrBounded(o)) {
return EmitMustBeConstantError(op);
}
}
return true;
}
bool IsMhloAndStatic(Operation* op) {
if (!llvm::isa<mlir::mhlo::MhloDialect>(op->getDialect())) {
return true;
}
return IsStaticOperation(op);
}
bool IsDefaultConversionLegal(
Operation* op, const ConversionTarget& default_conversion_target) {
if (!default_conversion_target.isLegal(op)) {
emitError(op->getLoc()) << "Could not legalize op: " << op->getName();
return false;
}
return true;
}
void VerifyTFXLALegalization::runOnOperation() {
Operation* func_op = getOperation();
ConversionTarget default_conversion_target =
GetDefaultLegalConversionTargets(getContext(), legalize_chlo_);
bool has_invalid_ops = false;
func_op->walk([&](Operation* op) {
if (!IsMhloAndStatic(op)) {
has_invalid_ops = true;
IncrementCounterFor(mlir_non_static_op_count, op);
return WalkResult::interrupt();
}
if (!IsDefaultConversionLegal(op, default_conversion_target)) {
has_invalid_ops = true;
IncrementCounterFor(mlir_failed_legalization_op_count, op);
}
return WalkResult::advance();
});
if (has_invalid_ops) signalPassFailure();
}
}
std::unique_ptr<mlir::OperationPass<mlir::func::FuncOp>>
CreateVerifyTFXLALegalizationPass(bool legalize_chlo) {
return std::make_unique<VerifyTFXLALegalization>(legalize_chlo);
}
}
} | #include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::mhlo::test::GetMlirModuleFromString;
using ::tensorflow::monitoring::testing::CellReader;
static constexpr char kFailedLegalizationStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_failed_legalization_op_count";
static constexpr char kNonStaticOpStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_non_static_op_count";
static constexpr char kNonStaticOpSkipStreamz[] =
"/tensorflow/core/tf2xla/mlir_second_phase_non_static_op_skip_count";
class VerifyTfxlaLegalizationTest : public ::testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(false));
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyTfxlaLegalizationTest, RecordsStreamzFailedVerification) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kFailedLegalizationStreamz);
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(error.Delta("tf.BadValue"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, ErrorsNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32>
%4 = mhlo.multiply %3, %3 : tensor<?xi32>
return %4 : tensor<?xi32>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(legal_error.Delta("mhlo.dynamic_iota"), 0);
EXPECT_EQ(static_error.Delta("mhlo.dynamic_iota"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, SkipsSpecificNonStaticInputs) {
static constexpr char kNonStaticFailure[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main(%a : tensor<5x14x1xf32>, %b : tensor<1x14x32xf32>) -> tensor<?x?x?xf32> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%c = "mhlo.einsum"(%a, %b) {einsum_config = "bji,bjk->bik"} : (tensor<5x14x1xf32>, tensor<1x14x32xf32>) -> tensor<?x?x?xf32>
return %c : tensor<?x?x?xf32>
}
})";
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CellReader<int64_t> skipped(kNonStaticOpSkipStreamz);
CreateModule(kNonStaticFailure);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(static_error.Delta("mhlo.einsum"), 0);
EXPECT_EQ(skipped.Delta("mhlo.einsum"), 1);
}
TEST_F(VerifyTfxlaLegalizationTest, SkipsNonStaticInputsWithBounds) {
static constexpr char kNonStaticWithBoundsSuccess[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1504 : i32}} {
func.func @main() -> tensor<?xi32, #mhlo.type_extensions<bounds = [4]>> attributes {tf.entry_function = {control_outputs = "", inputs = "i,j", outputs = "identity_RetVal"}} {
%0 = mhlo.constant dense<1.000000e+00> : tensor<f64>
%1 = mhlo.convert %0 : (tensor<f64>) -> tensor<i64>
%2 = mhlo.reshape %1 : (tensor<i64>) -> tensor<1xi64>
%3 = "mhlo.dynamic_iota"(%2) {iota_dimension = 0 : i64} : (tensor<1xi64>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
%4 = mhlo.multiply %3, %3 : tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
return %4 : tensor<?xi32, #mhlo.type_extensions<bounds = [4]>>
}
})";
CellReader<int64_t> legal_error(kFailedLegalizationStreamz);
CellReader<int64_t> static_error(kNonStaticOpStreamz);
CreateModule(kNonStaticWithBoundsSuccess);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(legal_error.Delta("mhlo.multiply"), 0);
EXPECT_EQ(static_error.Delta("mhlo.multiply"), 0);
}
TEST_F(VerifyTfxlaLegalizationTest, RecordsMultipleFailures) {
static constexpr char kMultipleFailures[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.BadValue"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tf.AlsoBad"() {value = dense<10> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> error(kFailedLegalizationStreamz);
CreateModule(kMultipleFailures);
auto result = Run();
EXPECT_TRUE(result.failed());
EXPECT_EQ(error.Delta("tf.BadValue"), 1);
EXPECT_EQ(error.Delta("tf.AlsoBad"), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
650091b6-2adc-4acd-a30c-698f8f8cc66e | cpp | tensorflow/tensorflow | xla_legalize_targets | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.cc | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets_test.cc | #include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
namespace mlir {
namespace mhlo {
ConversionTarget GetDefaultLegalConversionTargets(MLIRContext& mlir_context,
bool legalize_chlo) {
ConversionTarget target(mlir_context);
if (legalize_chlo) {
target.addIllegalDialect<chlo::ChloDialect>();
target.addIllegalDialect<stablehlo::StablehloDialect>();
} else {
target.addLegalDialect<chlo::ChloDialect>();
}
target.addLegalDialect<MhloDialect>();
target.addLegalDialect<arith::ArithDialect>();
target.addLegalDialect<func::FuncDialect>();
target.addLegalDialect<tensor::TensorDialect>();
target.addLegalDialect<shape::ShapeDialect>();
target.addLegalOp<func::CallOp>();
target.addLegalOp<TF::_XlaHostComputeMlirOp, TF::XlaSendToHostOp,
TF::XlaRecvFromHostOp>();
return target;
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace mhlo {
namespace {
mlir::DialectRegistry GetDefaultDialectRegistry() {
mlir::DialectRegistry registry;
registry.insert<arith::ArithDialect>();
registry.insert<func::FuncDialect>();
registry.insert<tensor::TensorDialect>();
registry.insert<shape::ShapeDialect>();
registry.insert<TF::TensorFlowDialect>();
registry.insert<chlo::ChloDialect>();
return registry;
}
class XlaLegalizeTargetsTest : public testing::Test {
public:
XlaLegalizeTargetsTest()
: context_(GetDefaultDialectRegistry()),
module_(mlir::ModuleOp::create(mlir::UnknownLoc::get(&context_))),
builder_(&module_->getBodyRegion()) {
context_.loadAllAvailableDialects();
}
protected:
mlir::MLIRContext context_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
mlir::OpBuilder builder_;
};
TEST_F(XlaLegalizeTargetsTest, CreatesConversionTargets) {
auto const_int = builder_.create<mlir::arith::ConstantIntOp>(
builder_.getUnknownLoc(), 10, builder_.getI32Type());
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, false);
EXPECT_TRUE(target.isLegal(const_int));
}
TEST_F(XlaLegalizeTargetsTest, AllowsCHLODialect) {
auto const_int = builder_.create<chlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32TensorAttr({42}));
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, true);
EXPECT_TRUE(target.isIllegal(const_int));
}
TEST_F(XlaLegalizeTargetsTest, DontAllowCHLODialect) {
auto const_int = builder_.create<chlo::ConstantOp>(
builder_.getUnknownLoc(), builder_.getI32TensorAttr({42}));
ConversionTarget target =
GetDefaultLegalConversionTargets(context_, false);
EXPECT_TRUE(target.isLegal(const_int));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
770b6edd-c724-4e1f-9105-36781d3f9b9e | cpp | tensorflow/tensorflow | xla_legalize_tf | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf.cc | tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_test.cc | #include <memory>
#include <optional>
#include <string>
#include <utility>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "stablehlo/dialect/ChloOps.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_with_tf2xla_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_targets.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/rewriters.h"
#include "xla/mlir_hlo/mhlo/utils/type_conversion.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace mlir {
namespace mhlo {
namespace {
#define GEN_PASS_DEF_LEGALIZETF
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
auto *mlir_legalization_count = tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_count",
"Counts the attempts of legalization of ops", "op_name");
auto *mlir_failed_legalization_count = tensorflow::monitoring::Counter<2>::New(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count",
"Counts the failure of legalization of ops", "op_name", "legality");
class LegalizeTF : public impl::LegalizeTFBase<LegalizeTF> {
public:
explicit LegalizeTF(bool legalize_chlo,
std::optional<StringRef> tf2xla_fallback_device_type,
bool prefer_tf2xla) {
legalize_chlo_ = legalize_chlo;
prefer_tf2xla_ = prefer_tf2xla;
use_tf2xla_fallback_ = tf2xla_fallback_device_type.has_value();
if (tf2xla_fallback_device_type.has_value()) {
device_type_ = tf2xla_fallback_device_type.value().str();
}
}
void runOnOperation() override;
};
#define GEN_PASS_DEF_LEGALIZETFMODULEPASS
#include "tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_passes.h.inc"
RewritePatternSet PatternsIncludeOps(RewritePatternSet &from) {
RewritePatternSet to(from.getContext());
for (auto &pattern : from.getNativePatterns()) {
std::optional<OperationName> pat_op_name = pattern->getRootKind();
bool include =
!pat_op_name ||
IsTypeLegalizedWithMlir(pat_op_name->getRegisteredInfo()->getTypeID());
if (include) to.add(std::move(pattern));
}
to.add(std::move(from.getPDLPatterns()));
return to;
}
std::string OperationLegalityString(Operation *op,
const ConversionTarget &target) {
auto op_name = op->getName();
auto action = target.getOpAction(op_name);
if (!action.has_value()) {
return "Unknown";
}
switch (action.value_or(ConversionTarget::LegalizationAction::Legal)) {
case ConversionTarget::LegalizationAction::Legal:
return "Legal";
case ConversionTarget::LegalizationAction::Dynamic:
return "Dynamic";
case ConversionTarget::LegalizationAction::Illegal:
return "Illegal";
default:
return "Invalid";
}
}
void IncrementFailedLegalizationCount(Operation *op,
const ConversionTarget &target) {
auto op_name = op->getName();
auto name_string = op_name.getStringRef().str();
auto op_legality = OperationLegalityString(op, target);
mlir_failed_legalization_count->GetCell(name_string, op_legality)
->IncrementBy(1);
}
mlir::LogicalResult ApplyPatterns(Operation *op, RewritePatternSet &patterns,
bool legalize_chlo) {
ConversionTarget target =
GetDefaultLegalConversionTargets(*op->getContext(), legalize_chlo);
DenseSet<Operation *> unconverted_ops;
ConversionConfig config;
config.unlegalizedOps = &unconverted_ops;
auto result = applyPartialConversion(op, target, std::move(patterns), config);
if (failed(result)) {
IncrementFailedLegalizationCount(op, target);
}
for (const auto &unconverted_op : unconverted_ops) {
IncrementFailedLegalizationCount(unconverted_op, target);
}
return result;
}
LogicalResult legalizeTF(Operation *op, bool legalize_chlo,
std::optional<StringRef> tf2xla_fallback_device_type,
bool prefer_tf2xla) {
MLIRContext *context = op->getContext();
RewritePatternSet legalize_lower_patterns(context);
PopulateLegalizeTfPatterns(context, &legalize_lower_patterns);
TF::PopulateTFLoweringBeforeHLOPatterns(context, &legalize_lower_patterns);
if (tf2xla_fallback_device_type && prefer_tf2xla) {
VLOG(1) << "TF to XLA legalization patterns are partitioned by op into "
"either native MLIR legalization, or TF2XLA fallback "
"legalzation, with a preference toward TF2XLA.";
} else if (tf2xla_fallback_device_type) {
VLOG(1) << "TF to XLA legalization patterns include all native patterns "
"and TF2XLA fallback patterns.";
} else {
VLOG(1) << "TF to XLA legalization patterns are native patterns only.";
}
RewritePatternSet patterns = (tf2xla_fallback_device_type && prefer_tf2xla)
? PatternsIncludeOps(legalize_lower_patterns)
: std::move(legalize_lower_patterns);
Tf2XlaTypeConverter converter;
if (tf2xla_fallback_device_type) {
PopulateLegalizeTfWithTf2XlaPatterns(tf2xla_fallback_device_type.value(),
patterns, context, converter,
prefer_tf2xla);
}
stablehlo::StablehloToHloTypeConverter hlo_converter;
if (legalize_chlo) {
chlo::populateChloToHloPatterns(context, &hlo_converter, &patterns);
}
chlo::ConstantLikeOp::getCanonicalizationPatterns(patterns, context);
return ApplyPatterns(op, patterns, legalize_chlo);
}
void LegalizeTF::runOnOperation() {
auto op = getOperation();
auto op_name = op->getName().getStringRef().str();
mlir_legalization_count->GetCell(op_name)->IncrementBy(1);
std::optional<StringRef> tf2xla_fallback_device_type = std::nullopt;
if (use_tf2xla_fallback_) {
tf2xla_fallback_device_type = device_type_;
}
if (failed(legalizeTF(op, legalize_chlo_, tf2xla_fallback_device_type,
prefer_tf2xla_))) {
signalPassFailure();
}
}
}
std::unique_ptr<OperationPass<ModuleOp>> createLegalizeTFPass(
bool legalize_chlo, std::optional<StringRef> tf2xla_fallback_device_type,
bool prefer_tf2xla) {
return std::make_unique<LegalizeTF>(
legalize_chlo, tf2xla_fallback_device_type, prefer_tf2xla);
}
}
} | #include <functional>
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::PassManager;
using ::tensorflow::monitoring::testing::CellReader;
absl::StatusOr<OwningOpRef<ModuleOp>> GetMlirModuleFromString(
absl::string_view module_string, MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
OwningOpRef<ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(module_string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
bool BuildAndRunPipeline(absl::string_view module_string,
const std::function<void(PassManager*)>& passes) {
mlir::registerPassManagerCLOptions();
MLIRContext context;
OwningOpRef<ModuleOp> module =
GetMlirModuleFromString(module_string, &context).value();
PassManager pm(&context);
if (mlir::failed(mlir::applyPassManagerCLOptions(pm))) return false;
passes(&pm);
return pm.run(module.get()).succeeded();
}
std::function<void(PassManager*)> legalizeTFPasses() {
return [](PassManager* pm) {
pm->addPass(mlir::mhlo::createLegalizeTFPass(
true, llvm::StringRef("gpu/xpu"),
false));
};
}
TEST(XlaLegalizeTest, IllegalOp) {
constexpr char kMlirIllegalOpStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> legalize_failure_count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
auto status = BuildAndRunPipeline(kMlirIllegalOpStr, legalizeTFPasses());
EXPECT_TRUE(status);
EXPECT_EQ(legalize_failure_count.Read("tf.DoesntExist", "Unknown"), 1);
}
TEST(XlaLegalizeTest, LegalOp) {
static constexpr char kMlirLegalOpStr[] = R"(
func.func @infeed_dequeue_tuple_dynamic_error() -> (tensor<3x3xf32>, tensor<4x?xf32>) {
%0:2 = "tf.InfeedDequeueTuple"() : () -> (tensor<3x3xf32>, tensor<4x?xf32>) func.return %0#0, %0#1 : tensor<3x3xf32>, tensor<4x?xf32>
})";
CellReader<int64_t> legalize_failure_count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
auto status = BuildAndRunPipeline(kMlirLegalOpStr, legalizeTFPasses());
EXPECT_TRUE(status);
EXPECT_EQ(legalize_failure_count.Read("tf.InfeedDequeueTuple", "Unknown"), 1);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/transforms/xla_legalize_tf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b2be4701-2268-40b8-a256-ebbc5fad8a74 | cpp | tensorflow/tensorflow | cluster_tf | tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.cc | tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLFunctionalExtras.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/device_type.pb.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/stacktrace.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::LogicalResult;
using mlir::ModuleOp;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
tensorflow::Status RunTFXLABridge(
ModuleOp module,
llvm::function_ref<void(OpPassManager &pm)> pipeline_builder,
llvm::StringRef module_name = llvm::StringRef(),
llvm::StringRef dump_prefix = "tf_xla_bridge_v2") {
if (!mlir::TF::TensorFlowDialect::HasConstantFoldHook()) {
return tensorflow::errors::Internal(
"TensorFlow dialect missing constant fold hook in TFXLA bridge phase "
"1; this could happen if the binary doesn't link the constant fold "
"hook registration library.");
}
PassManager bridge(module.getContext());
bridge.enableVerifier();
::tensorflow::applyTensorflowAndCLOptions(bridge);
pipeline_builder(bridge);
mlir::StatusScopedDiagnosticHandler diag_handler(
module.getContext(), false,
!VLOG_IS_ON(1));
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
dump_prefix.str() + "_before"),
module, llvm::StringRef(), &bridge);
}
if (VLOG_IS_ON(2) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(),
kDebugGroupBridgePhase1Clustering)) {
::tensorflow::tf2xla::internal::EnablePassIRPrinting(
bridge, kDebugGroupBridgePhase1Clustering, module_name);
}
LogicalResult result = bridge.run(module);
(void)result;
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
dump_prefix.str() + "_after"),
module, llvm::StringRef(), &bridge);
}
return diag_handler.ConsumeStatus();
}
tensorflow::Status RecordIfErrorStatus(const std::string error_prefix,
bool fallback_enabled,
std::string bridge_type,
std::string device_type,
absl::Status status) {
if (status.ok()) {
return status;
}
VLOG(2) << error_prefix << " " << status;
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type, "v2", device_type,
fallback_enabled,
"failure");
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
std::string bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_TPU_BRIDGE";
if (device_type != "tpu") {
bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_CPU/GPU_BRIDGE";
}
tsl::error_logging::Log(mlir::TF::kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
void CreateReplicatedClusteringPipeline(OpPassManager &pm,
llvm::StringRef module_name) {
pm.addPass(mlir::TFTPU::CreateTPUValidateInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateCanonicalizeCompileAndReplicateAttributesPass());
tensorflow::tf2xla::internal::AddReplicatedBridgeClusteringPipelinePasses(
pm, module_name);
}
void CreateReplicatedClusteringPipelineV2(OpPassManager &pm) {
CreateReplicatedClusteringPipeline(pm, "");
}
tensorflow::Status RunFunctionTf2xlaClusteringBridge(
ModuleOp module, bool is_supported_by_replicated_brige,
bool is_in_fallback_enabled_mode, llvm::StringRef module_name) {
std::string device_type = is_supported_by_replicated_brige
? mlir::TF::kMlirPh1BridgeCounterTpu
: mlir::TF::kMlirPh1BridgeCounterNonTpu;
VLOG(2)
<< (is_supported_by_replicated_brige ? "Replicated" : "NonReplicated")
<< " Bridge called stack trace is "
<< "(NOTE: this is not an error; rather the stack trace for debugging) : "
<< tensorflow::CurrentStackTrace();
Status clustering_status =
is_supported_by_replicated_brige
? RunTFXLABridge(
module,
[module_name](OpPassManager &pm) {
CreateReplicatedClusteringPipeline(pm, module_name);
},
module_name, "tf_xla_bridge_v2_replicated")
: RunTFXLABridge(
module,
[](OpPassManager &pm) {
tensorflow::tf2xla::internal::
AddNonReplicatedBridgeClusteringPipelinePasses(pm);
},
module_name, "tf_xla_bridge_v2_nonreplicated");
std::string bridge_type = is_supported_by_replicated_brige
? mlir::TF::kMlirPh1BridgeCounterReplicated
: mlir::TF::kMlirPh1BridgeCounterNonReplicated;
TF_RETURN_IF_ERROR(RecordIfErrorStatus(
"clustering_v2", is_in_fallback_enabled_mode,
bridge_type, device_type, clustering_status));
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type, "v2", device_type,
is_in_fallback_enabled_mode,
"success");
return absl::OkStatus();
}
mlir::PassPipelineRegistration<> replicated_clustering_bridge_v2(
"tf-replicated-clustering-bridge-v2",
"Run all the passes involved in transforming a TensorFlow 2 graph before "
"execution so that it is suitable for targeting devices.",
CreateReplicatedClusteringPipelineV2);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/testing/utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
using ::mlir::DialectRegistry;
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::WalkResult;
using ::mlir::func::FuncOp;
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::tf2xla::v2::testing::TestDataPath;
static constexpr char kCompilationStreamz[] =
"/tensorflow/core/tf_mlir_bridge_first_phase_v2_count";
class FunctionClusterTensorflowDialectTest : public ::testing::Test {
public:
FunctionClusterTensorflowDialectTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(FunctionClusterTensorflowDialectTest, ClustersTfReplicatedBridge) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, true,
false));
FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
ASSERT_TRUE(main);
EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterTpu,
"fallback_disabled", "success"),
1);
}
TEST_F(FunctionClusterTensorflowDialectTest,
RunsOutsideCompilationReplicatedBridge) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("outside_compilation.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, true,
false));
FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterFuncOp cluster_op) {
has_cluster_op = true;
return WalkResult::advance();
});
EXPECT_TRUE(has_cluster_op);
EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterTpu,
"fallback_disabled", "success"),
1);
}
TEST_F(FunctionClusterTensorflowDialectTest, ClustersTFNonReplicatedBridge) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, false,
false));
FuncOp main = mlir_module_->lookupSymbol<mlir::func::FuncOp>("main");
ASSERT_TRUE(main);
EXPECT_EQ(
compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterNonReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterNonTpu,
"fallback_disabled", "success"),
1);
}
TEST_F(FunctionClusterTensorflowDialectTest, LogsFallbackMode) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunFunctionTf2xlaClusteringBridge(
*mlir_module_, true,
true));
EXPECT_EQ(compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2,
mlir::TF::kMlirPh1BridgeCounterTpu,
"fallback_enabled", "success"),
1);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2794a58a-3290-4fc0-a55b-0df11748c080 | cpp | tensorflow/tensorflow | compile_mlir_util | tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc | tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include "tensorflow/compiler/mlir/tf2xla/mlir_bridge_rollout_policy.h"
#include "absl/status/status.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/mlir_pass_instrumentation.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/lowering_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "tensorflow/compiler/tf2xla/type_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_computation.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/translate/mhlo_to_hlo/layout_util.h"
#include "xla/hlo/translate/mhlo_to_hlo/mlir_hlo_to_hlo.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/protobuf/core_platform_payloads.pb.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
constexpr absl::string_view kGroupSizeAttrName =
"tf2xla.collective_info.group_size";
constexpr absl::string_view kGroupKeyAttrName =
"tf2xla.collective_info.group_key";
absl::StatusOr<TensorShape> GetTensorShapeFromXlaArgument(
const XlaArgument& arg) {
if (std::holds_alternative<xla::Shape>(arg.shape)) {
TensorShape arg_shape;
TF_RETURN_IF_ERROR(
XLAShapeToTensorShape(std::get<xla::Shape>(arg.shape), &arg_shape));
return arg_shape;
} else {
return std::get<TensorShape>(arg.shape);
}
}
Status MaybeRewriteLayoutWithShardedShape(
mlir::StringAttr sharding,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* shape) {
if (!sharding) return absl::OkStatus();
xla::OpSharding op_sharding;
if (tensorflow::DecodeShardingAttribute(sharding, op_sharding).failed()) {
return errors::InvalidArgument("failed to parse sharding '",
sharding.getValue().str(), "'");
}
std::optional<xla::HloSharding> hlo_sharding;
TF_ASSIGN_OR_RETURN(hlo_sharding, xla::HloSharding::FromProto(op_sharding));
TF_RETURN_IF_ERROR(RewriteLayoutWithShardedShape(
hlo_sharding, false, shape_determination_fns, shape));
return absl::OkStatus();
}
Status GetXlaInputShapes(
mlir::ModuleOp module, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
std::vector<xla::Shape>* xla_input_shapes) {
xla_input_shapes->clear();
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
TF_RET_CHECK(main_func != nullptr) << "No main function found";
mlir::FunctionType func_type = main_func.getFunctionType();
int num_args = func_type.getNumInputs();
xla_input_shapes->reserve(num_args);
std::vector<xla::Shape> individual_arg_shapes;
individual_arg_shapes.reserve(num_args);
for (int i = 0; i < num_args; ++i) {
individual_arg_shapes.emplace_back();
xla::Shape& xla_shape = individual_arg_shapes.back();
DataType arg_dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(func_type.getInput(i), &arg_dtype));
auto layout_preference = shape_determination_fns.layout_preference_fn(
arg_shapes[i].shape, arg_dtype, std::nullopt);
TF_ASSIGN_OR_RETURN(xla_shape,
shape_determination_fns.shape_representation_fn(
arg_shapes[i].shape, arg_dtype,
false, layout_preference));
auto sharding =
main_func.getArgAttrOfType<mlir::StringAttr>(i, "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &xla_shape));
}
if (use_tuple_args) {
xla_input_shapes->push_back(
xla::ShapeUtil::MakeTupleShape(individual_arg_shapes));
} else {
*xla_input_shapes = individual_arg_shapes;
}
return absl::OkStatus();
}
mlir::RankedTensorType GetBufferType(mlir::Type ty) {
auto ranked_ty = mlir::dyn_cast_or_null<mlir::RankedTensorType>(ty);
if (!ranked_ty) return {};
int64_t rank = ranked_ty.getRank();
llvm::SmallVector<int64_t, 4> dims = llvm::to_vector<4>(ranked_ty.getShape());
auto encoding = mlir::dyn_cast_or_null<mlir::mhlo::TypeExtensionsAttr>(
ranked_ty.getEncoding());
if (encoding && !encoding.getBounds().empty()) {
for (int64_t dim = 0; dim < rank; ++dim) {
if (dims[dim] == mlir::ShapedType::kDynamic) {
dims[dim] = encoding.getBounds()[dim];
}
}
}
return GetTypeFromTFTensorShape(dims, ranked_ty.getElementType());
}
Status GetOutputInfo(
mlir::ModuleOp module, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
xla::Shape* xla_output_shape, std::vector<XlaOutputDescription>* outputs,
std::vector<XlaResourceUpdate>* resource_updates) {
auto shape_representation_fn_no_fast_memory =
[shape_determination_fns](
const xla::Shape& xla_shape) -> absl::StatusOr<xla::Shape> {
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
TF_ASSIGN_OR_RETURN(DataType dtype, EncodePrimitiveTypeAsDataType(
xla_shape.element_type()));
auto layout_preference = shape_determination_fns.layout_preference_fn(
shape, dtype, std::nullopt);
return shape_determination_fns.shape_representation_fn(
shape, dtype, false, layout_preference);
};
mlir::func::FuncOp main_func =
module.lookupSymbol<mlir::func::FuncOp>("main");
mlir::FunctionType func_type = main_func.getFunctionType();
outputs->clear();
outputs->reserve(func_type.getNumResults());
resource_updates->clear();
resource_updates->reserve(func_type.getNumResults());
std::vector<xla::Shape> shapes;
shapes.reserve(func_type.getNumResults());
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
for (unsigned i = 0; i < main_func.getNumArguments(); ++i)
if (auto aliasing_output = main_func.getArgAttrOfType<mlir::IntegerAttr>(
i, "tf.aliasing_output"))
output_to_input_alias[aliasing_output.getInt()] = i;
auto return_op = main_func.begin()->getTerminator();
for (const auto& type_and_idx : llvm::enumerate(func_type.getResults())) {
size_t idx = type_and_idx.index();
auto result_ty = mlir::cast<mlir::RankedTensorType>(type_and_idx.value());
mlir::RankedTensorType buffer_ty = result_ty;
if (!buffer_ty.hasStaticShape()) {
mlir::Value return_val = return_op->getOperand(idx);
if (auto owner = mlir::dyn_cast_or_null<mlir::tensor::CastOp>(
return_val.getDefiningOp())) {
buffer_ty = GetBufferType(owner.getOperand().getType());
if (!buffer_ty || !buffer_ty.hasStaticShape()) {
return errors::InvalidArgument(
"results needs to be static or bounded");
}
}
}
xla::Shape shape = xla::TypeToShape(buffer_ty);
if (shape.element_type() == xla::PRIMITIVE_TYPE_INVALID) {
return errors::InvalidArgument("XLA conversion failed for MLIR type.");
}
TF_ASSIGN_OR_RETURN(shape, shape_representation_fn_no_fast_memory(shape));
if (!result_ty.hasStaticShape()) {
int64_t rank = result_ty.getRank();
for (int64_t dim = 0; dim < rank; ++dim) {
if (result_ty.isDynamicDim(dim)) {
shape.set_dynamic_dimension(dim, true);
}
}
}
auto sharding = main_func.getResultAttrOfType<mlir::StringAttr>(
type_and_idx.index(), "mhlo.sharding");
TF_RETURN_IF_ERROR(MaybeRewriteLayoutWithShardedShape(
sharding, shape_determination_fns, &shape));
auto tensor_type =
mlir::dyn_cast<mlir::RankedTensorType>(type_and_idx.value());
shapes.push_back(shape);
auto it = output_to_input_alias.find(type_and_idx.index());
if (it != output_to_input_alias.end() && use_resource_updates_for_aliases) {
resource_updates->emplace_back();
XlaResourceUpdate& resource_update = resource_updates->back();
resource_update.input_index = it->getSecond();
resource_update.modified = true;
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &resource_update.type));
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &resource_update.shape));
continue;
}
outputs->emplace_back();
XlaOutputDescription& out_desc = outputs->back();
TF_RETURN_IF_ERROR(ConvertToDataType(tensor_type, &out_desc.type));
out_desc.is_constant = false;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(shape, &out_desc.shape));
out_desc.input_index =
it != output_to_input_alias.end() ? it->getSecond() : -1;
out_desc.is_tensor_list = false;
}
*xla_output_shape = xla::ShapeUtil::MakeTupleShape(shapes);
return absl::OkStatus();
}
void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) {
input_mapping->resize(num_inputs, 0);
std::iota(input_mapping->begin(), input_mapping->end(), 0);
}
static void RegisterDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::stablehlo::registerAllDialects(registry);
}
bool CanInlineFunctionsPostLegalization(llvm::StringRef device_type) {
return device_type == DEVICE_TPU_XLA_JIT;
}
void AddLegalizationPasses(mlir::OpPassManager& pm, bool legalize_chlo,
llvm::StringRef device_type, bool enable_op_fallback,
bool lower_to_xla_hlo) {
if (lower_to_xla_hlo) {
mlir::quant::stablehlo::AddQuantizationLoweringPasses(pm);
pm.addPass(mlir::mhlo::createLegalizeTFPass(
legalize_chlo,
device_type, enable_op_fallback));
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateInfeedsOpsXlaAdjustLayoutPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
}
}
void CreateMlirLoweringPassesPipeline(mlir::PassManager& pm) {
applyTensorflowAndCLOptions(pm);
pm.addNestedPass<mlir::func::FuncOp>(mlir::TF::CreateLowerQuantizedPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::CreateConvertTFQuantTypesPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
pm.addPass(mlir::mhlo::CreateLegalizeTFCollectivePass());
mlir::quant::stablehlo::AddQuantizationLoweringPasses(pm);
pm.addPass(mlir::mhlo::createLegalizeTFPass(
true,
std::nullopt, false));
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(mlir::mhlo::CreateLegalizeTFCommunicationPass());
auto pass_instrumentors = mlir::GetPassInstrumentors();
for (const auto& creator : pass_instrumentors) {
pm.addInstrumentation(creator());
}
}
void MaybeDumpMlirModuleAndPasses(mlir::PassManager& pm,
mlir::ModuleOp module_op,
std::string module_name, std::string tag) {
if (DEBUG_DATA_DUMPER()->ShouldDump(module_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name, kDebugGroupMain, tag),
module_op, "", &pm);
}
if (VLOG_IS_ON(2) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name, kDebugGroupBridgePhase2)) {
module_op.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name](const std::string& pass_tag_name, mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name, kDebugGroupBridgePhase2, pass_tag_name);
},
"",
true));
}
}
absl::Status RunMlirPipelineAndMaybeDumpResults(mlir::PassManager& pm,
mlir::ModuleOp module_op,
std::string module_name = "") {
mlir::StatusScopedDiagnosticHandler error_handler(module_op.getContext());
if (failed(pm.run(module_op))) {
auto status = absl::InvalidArgumentError("TF to XLA legalization failed: ");
tensorflow::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_2,
status);
return error_handler.Combine(status);
}
if (DEBUG_DATA_DUMPER()->ShouldDump(module_name, kDebugGroupMain) ||
VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name, kDebugGroupMain,
"legalize_hlo_after"),
module_op, "", &pm);
}
Status status = error_handler.ConsumeStatus();
tensorflow::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_2,
status);
return status;
}
}
void CreateConvertMlirToXlaHloPipeline(
mlir::OpPassManager& pm, llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo, bool allow_partial_conversion) {
bool legalize_chlo = true;
pm.addNestedPass<mlir::func::FuncOp>(
tf2xla::internal::CreateInputLoweringMetricsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateTFXLADeviceSpecificTransformsPass(device_type));
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantPass());
if (lower_to_xla_hlo) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::CreateReplicateTensorListInitOpsPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createSCCPPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(mlir::createSCCPPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorListOpsDecompositionPass());
}
pm.addPass(mlir::TF::CreateStackOpsDecompositionPass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::TF::CreateTensorArrayOpsDecompositionPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TFDevice::CreateDecomposeResourceOpsPass());
pm.addPass(mlir::TF::CreatePromoteResourcesToArgsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::createStablehloLegalizeToHloPass());
}
pm.addNestedPass<mlir::func::FuncOp>(mlir::TF::CreateLowerQuantizedPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::stablehlo::CreateConvertTFQuantTypesPass());
if (lower_to_xla_hlo) {
for (auto& target_pass : custom_legalization_passes) {
pm.addNestedPass<mlir::func::FuncOp>(std::move(target_pass));
}
pm.addPass(mlir::mhlo::CreateLegalizeTFCollectivePass());
}
AddLegalizationPasses(pm, legalize_chlo, device_type, enable_op_fallback,
lower_to_xla_hlo);
if (lower_to_xla_hlo) {
pm.addPass(mlir::mhlo::CreateLegalizeTFCommunicationPass());
if (!allow_partial_conversion) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::CreateVerifyTFXLALegalizationPass(legalize_chlo));
}
}
if (CanInlineFunctionsPostLegalization(device_type)) {
pm.addPass(mlir::createInlinerPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::mhlo::createSinkConstantsToControlFlowPass());
}
Status RefineShapes(llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
mlir::ModuleOp module) {
auto producer_or = GetTfGraphProducerVersion(module);
if (!producer_or.ok()) return producer_or.status();
int64_t producer_version = producer_or.value();
llvm::SmallVector<int64_t, 16> shape_backing;
llvm::SmallVector<llvm::ArrayRef<int64_t>, 4> arg_shapes_copy;
{
size_t count = 0;
for (const TensorOrResourceShape& tensor_resource_shape : arg_shapes) {
if (tensor_resource_shape.is_resource) continue;
count += tensor_resource_shape.shape.dims();
}
shape_backing.resize(count);
arg_shapes_copy.reserve(arg_shapes.size());
size_t offset = 0;
for (const TensorOrResourceShape& tensor_resource_shape : arg_shapes) {
if (tensor_resource_shape.is_resource) {
arg_shapes_copy.push_back(llvm::ArrayRef<int64_t>());
continue;
}
size_t start = offset;
for (tensorflow::TensorShapeDim dim : tensor_resource_shape.shape) {
shape_backing[offset] = dim.size;
++offset;
}
if (offset == start) {
arg_shapes_copy.push_back(llvm::ArrayRef<int64_t>());
} else {
arg_shapes_copy.push_back(
llvm::ArrayRef<int64_t>(&shape_backing[start], offset - start));
}
}
}
auto main_func = module.lookupSymbol<mlir::func::FuncOp>("main");
mlir::StatusScopedDiagnosticHandler error_handler(module.getContext());
mlir::LogicalResult result = mlir::TF::InferShapeForFunction(
main_func, arg_shapes_copy, producer_version);
if (failed(result)) {
return error_handler.Combine(
errors::Internal("MLIR Shape refinement failed"));
}
return error_handler.ConsumeStatus();
}
Status CreateAndRunMlirBridge(mlir::ModuleOp module_op,
llvm::StringRef device_type,
bool enable_op_fallback,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
bool lower_to_xla_hlo,
llvm::StringRef module_name = llvm::StringRef()) {
mlir::PassManager tf2xla(module_op.getContext());
applyTensorflowAndCLOptions(tf2xla);
CreateConvertMlirToXlaHloPipeline(tf2xla, device_type, enable_op_fallback,
custom_legalization_passes,
lower_to_xla_hlo,
false);
auto pass_instrumentors = mlir::GetPassInstrumentors();
for (const auto& creator : pass_instrumentors) {
tf2xla.addInstrumentation(creator());
}
MaybeDumpMlirModuleAndPasses(tf2xla, module_op, module_name.str(),
"legalize_hlo_before");
return RunMlirPipelineAndMaybeDumpResults(tf2xla, module_op,
module_name.str());
}
Status BuildHloFromTfInner(mlir::ModuleOp module_op, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
llvm::StringRef device_type,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes) {
TF_RETURN_IF_ERROR(CreateAndRunMlirBridge(module_op, device_type,
false,
custom_legalization_passes,
true));
mlir::Block& block =
module_op.lookupSymbol<mlir::func::FuncOp>("main").front();
return mlir::BuildHloFromMlirHlo(block, builder, xla_params, returns);
}
Status ConvertMLIRWithOptionalXlaComputation(
mlir::ModuleOp module_op, llvm::StringRef device_type,
xla::XlaComputation* xla_computation, bool use_tuple_args,
bool enable_op_fallback, bool return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name, bool lower_to_xla_hlo) {
TF_RETURN_IF_ERROR(CreateAndRunMlirBridge(
module_op, device_type, enable_op_fallback, custom_legalization_passes,
lower_to_xla_hlo, module_name));
mlir::MlirToHloConversionOptions options;
options.layout_preference_fn = [&](const xla::Shape& xla_shape)
-> absl::StatusOr<mlir::XlaLayoutPreference> {
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
TF_ASSIGN_OR_RETURN(DataType dtype, EncodePrimitiveTypeAsDataType(
xla_shape.element_type()));
return shape_determination_fns.layout_preference_fn(shape, dtype,
std::nullopt);
};
options.shape_representation_fn =
[&](const xla::Shape& xla_shape, bool fast_mem,
mlir::XlaLayoutPreference layout_preference)
-> absl::StatusOr<xla::Shape> {
TensorShape shape;
TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape));
TF_ASSIGN_OR_RETURN(DataType dtype, EncodePrimitiveTypeAsDataType(
xla_shape.element_type()));
return shape_determination_fns.shape_representation_fn(
shape, dtype, fast_mem, layout_preference);
};
xla::HloProto hlo_proto;
if (lower_to_xla_hlo) {
TF_RETURN_IF_ERROR(mlir::ConvertMlirHloToHlo(
module_op, &hlo_proto, use_tuple_args, return_tuple, options));
*xla_computation = xla::XlaComputation(hlo_proto.hlo_module());
}
return absl::OkStatus();
}
Status ConvertMLIRToXlaComputation(
mlir::ModuleOp module_op, llvm::StringRef device_type,
xla::XlaComputation* xla_computation, bool use_tuple_args,
bool enable_op_fallback, bool return_tuple,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name) {
return ConvertMLIRWithOptionalXlaComputation(
module_op, device_type, xla_computation, use_tuple_args,
enable_op_fallback, return_tuple, shape_determination_fns,
custom_legalization_passes, module_name, true);
}
Status CompileMlirSetup(mlir::ModuleOp module_op,
llvm::ArrayRef<TensorOrResourceShape> arg_shapes) {
TF_RETURN_IF_ERROR(RefineShapes(arg_shapes, module_op));
if (VLOG_IS_ON(2))
tensorflow::DumpMlirOpToFile("compile_mlir_shape_refiner", module_op);
return absl::OkStatus();
}
Status BuildHloFromTf(mlir::ModuleOp module_op, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes) {
if (VLOG_IS_ON(2))
tensorflow::DumpMlirOpToFile("build_hlo_tf_before", module_op);
TF_RETURN_IF_ERROR(CompileMlirSetup(module_op, arg_shapes));
TF_RETURN_IF_ERROR(BuildHloFromTfInner(module_op, builder, xla_params,
returns, device_type,
custom_legalization_passes));
if (VLOG_IS_ON(2))
tensorflow::DumpMlirOpToFile("build_hlo_tf_after", module_op);
return absl::OkStatus();
}
Status PopulateCollectiveInfo(mlir::ModuleOp module_op,
XlaCompilationResult* compilation_result) {
mlir::IntegerAttr group_key_attr =
module_op->getAttrOfType<mlir::IntegerAttr>(
mlir::StringRef(kGroupKeyAttrName.data(), kGroupKeyAttrName.size()));
mlir::IntegerAttr group_size_attr =
module_op->getAttrOfType<mlir::IntegerAttr>(mlir::StringRef(
kGroupSizeAttrName.data(), kGroupSizeAttrName.size()));
if (group_key_attr == nullptr && group_size_attr == nullptr) {
return absl::OkStatus();
}
DCHECK(group_key_attr != nullptr)
<< "module attribute " << kGroupKeyAttrName
<< " is required for CollectiveInfo but not found.";
DCHECK(group_size_attr != nullptr)
<< "module attribute " << kGroupSizeAttrName
<< " is required for CollectiveInfo but not found.";
int32_t group_key = group_key_attr.getInt();
int32_t group_size = group_size_attr.getInt();
VLOG(2) << "Populating CollectiveInfo: group_key=" << group_key
<< " group_size=" << group_size;
compilation_result->collective_info = {group_key, group_size, 0};
return absl::OkStatus();
}
Status PopulateResultIOInfo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
bool use_tuple_args, bool use_resource_updates_for_aliases,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result) {
GetInputMappingForMlir(arg_shapes.size(), &compilation_result->input_mapping);
TF_RETURN_IF_ERROR(GetXlaInputShapes(module_op, arg_shapes, use_tuple_args,
shape_determination_fns,
&compilation_result->xla_input_shapes));
return GetOutputInfo(
module_op, use_resource_updates_for_aliases, shape_determination_fns,
&compilation_result->xla_output_shape, &compilation_result->outputs,
&compilation_result->resource_updates);
}
absl::StatusOr<std::string> CompileMlirToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<TensorOrResourceShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple, bool use_resource_updates_for_aliases,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name, bool lower_to_xla_hlo) {
if (enable_op_fallback && lower_to_xla_hlo &&
GetMlirBridge2ndPhaseRolloutPolicy(module_op) ==
MlirBridgeRolloutPolicy::kDisabledAfterGraphAnalysis) {
return CompileToHloGraphAnalysisFailedError();
}
TF_RETURN_IF_ERROR(CompileMlirSetup(module_op, arg_shapes));
compilation_result->computation = std::make_shared<xla::XlaComputation>();
TF_RETURN_IF_ERROR(ConvertMLIRWithOptionalXlaComputation(
module_op, device_type, compilation_result->computation.get(),
use_tuple_args, enable_op_fallback, use_return_tuple,
shape_determination_fns, custom_legalization_passes, module_name,
lower_to_xla_hlo));
auto mlir_compilation = SerializeMlirModule(module_op);
if (lower_to_xla_hlo) {
TF_RETURN_IF_ERROR(PopulateCollectiveInfo(module_op, compilation_result));
auto populate_result = PopulateResultIOInfo(
module_op, arg_shapes, use_tuple_args, use_resource_updates_for_aliases,
shape_determination_fns, compilation_result);
if (!populate_result.ok()) {
llvm::errs() << "Failed to populate result io info";
return populate_result;
}
}
return mlir_compilation;
}
absl::StatusOr<std::string> CompileSerializedMlirToXlaHlo(
llvm::StringRef mlir_module_string, llvm::ArrayRef<TensorShape> arg_shapes,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
const XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes,
llvm::StringRef module_name, bool lower_to_xla_hlo) {
mlir::DialectRegistry mlir_registry;
RegisterDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
TF_RETURN_IF_ERROR(
DeserializeMlirModule(mlir_module_string, &mlir_context, &mlir_module));
llvm::SmallVector<TensorOrResourceShape, 4> tensor_or_resource_shapes;
tensor_or_resource_shapes.reserve(arg_shapes.size());
for (const auto& arg_shape : arg_shapes)
tensor_or_resource_shapes.push_back({arg_shape});
return CompileMlirToXlaHlo(
mlir_module.get(), tensor_or_resource_shapes, device_type, use_tuple_args,
enable_op_fallback, true,
false, shape_determination_fns,
compilation_result, custom_legalization_passes, module_name,
lower_to_xla_hlo);
}
static absl::StatusOr<std::vector<int>> RewriteWithArgs(
mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args) {
mlir::func::FuncOp main_fn =
module_op.lookupSymbol<mlir::func::FuncOp>("main");
std::vector<int> params;
bool has_resource_args = false;
auto builder = mlir::OpBuilder(main_fn.getBody());
std::vector<int> args_to_erase;
for (int idx = 0; idx < args.size(); idx++) {
const XlaArgument& xla_arg = args[idx];
mlir::BlockArgument mlir_arg = main_fn.getArgument(idx);
if (xla_arg.kind == XlaArgument::kResource) {
mlir::Type element_type;
if (xla_arg.type == DT_INVALID) {
return errors::Unimplemented(absl::StrCat(
"Argument ", idx,
" is an uninitialized resource variable which is currently"
" unsupported in the MLIR-based TPU bridge"));
}
TF_RETURN_IF_ERROR(ConvertDataType(xla_arg.type, builder, &element_type));
TF_ASSIGN_OR_RETURN(TensorShape arg_shape,
GetTensorShapeFromXlaArgument(xla_arg));
auto resource_shape = arg_shape.dim_sizes();
llvm::SmallVector<int64_t, 4> resource_subtype_shape(
resource_shape.begin(), resource_shape.end());
auto resource_subtype =
GetTypeFromTFTensorShape(resource_subtype_shape, element_type);
auto resource_type =
mlir::TF::ResourceType::get({resource_subtype}, builder.getContext());
auto tensor_type = mlir::cast<mlir::TensorType>(mlir_arg.getType());
if (tensor_type.hasRank()) {
mlir_arg.setType(
GetTypeFromTFTensorShape(tensor_type.getShape(), resource_type));
} else {
mlir_arg.setType(mlir::UnrankedTensorType::get(resource_type));
}
has_resource_args = true;
}
if (xla_arg.kind != XlaArgument::kConstant) {
params.push_back(idx);
continue;
}
TF_ASSIGN_OR_RETURN(auto value_attr,
ConvertTensor(xla_arg.constant_value, &builder));
auto constant = builder.create<mlir::TF::ConstOp>(
mlir::UnknownLoc::get(module_op.getContext()), value_attr);
mlir_arg.replaceAllUsesWith(constant);
args_to_erase.push_back(idx);
}
if (has_resource_args) {
llvm::SmallVector<mlir::Type, 4> updated_argument_types;
updated_argument_types.reserve(main_fn.getNumArguments());
for (mlir::BlockArgument& arg : main_fn.getArguments())
updated_argument_types.push_back(arg.getType());
main_fn.setType(
mlir::FunctionType::get(main_fn.getContext(), updated_argument_types,
main_fn.getFunctionType().getResults()));
}
for (int idx : llvm::reverse(args_to_erase)) main_fn.eraseArgument(idx);
return params;
}
Status CompileGraphSetup(
mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args,
std::vector<int>* remaining_params,
llvm::SmallVector<TensorOrResourceShape, 4>& arg_shapes) {
TF_ASSIGN_OR_RETURN(*remaining_params, RewriteWithArgs(module_op, args));
arg_shapes.reserve(remaining_params->size());
for (unsigned idx : *remaining_params) {
const auto& arg = args[idx];
TF_ASSIGN_OR_RETURN(TensorShape arg_shape,
GetTensorShapeFromXlaArgument(arg));
arg_shapes.push_back({arg_shape,
arg.kind == XlaArgument::kResource});
}
mlir::PassManager pm(module_op.getContext());
applyTensorflowAndCLOptions(pm);
mlir::TF::StandardPipelineOptions tf_options;
mlir::TF::CreateTFStandardPipeline(pm, tf_options);
if (VLOG_IS_ON(1))
tensorflow::DumpMlirOpToFile("compile_graph_setup_before", module_op);
if (VLOG_IS_ON(2)) {
module_op.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<tensorflow::BridgeLoggerConfig>(
true));
}
mlir::StatusScopedDiagnosticHandler diag_handler(module_op.getContext());
if (failed(pm.run(module_op))) return diag_handler.ConsumeStatus();
if (VLOG_IS_ON(1))
tensorflow::DumpMlirOpToFile("compile_graph_setup_after", module_op);
return absl::OkStatus();
}
Status BuildHloFromModule(mlir::ModuleOp module_op, xla::XlaBuilder& builder,
llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns,
llvm::ArrayRef<XlaArgument> args,
llvm::StringRef device_type) {
std::vector<int> remaining_params;
llvm::SmallVector<TensorOrResourceShape, 4> arg_shapes;
TF_RETURN_IF_ERROR(
CompileGraphSetup(module_op, args, &remaining_params, arg_shapes));
llvm::SmallVector<xla::XlaOp, 2> remaining_xla_params;
for (auto i : remaining_params) remaining_xla_params.push_back(xla_params[i]);
TF_RETURN_IF_ERROR(CompileMlirSetup(module_op, arg_shapes));
mlir::PassManager tf2xla(module_op.getContext());
CreateMlirLoweringPassesPipeline(tf2xla);
MaybeDumpMlirModuleAndPasses(tf2xla, module_op, "",
"legalize_hlo_before");
TF_RETURN_IF_ERROR(RunMlirPipelineAndMaybeDumpResults(tf2xla, module_op));
mlir::Block& block =
module_op.lookupSymbol<mlir::func::FuncOp>("main").front();
TF_RETURN_IF_ERROR(
mlir::BuildHloFromMlirHlo(block, builder, remaining_xla_params, returns));
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("build_hlo_tf_after", module_op);
}
return absl::OkStatus();
}
Status CompileGraphToXlaHlo(
mlir::ModuleOp module_op, llvm::ArrayRef<XlaArgument> args,
llvm::StringRef device_type, bool use_tuple_args, bool enable_op_fallback,
bool use_return_tuple,
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns,
XlaCompilationResult* compilation_result,
llvm::MutableArrayRef<std::unique_ptr<mlir::Pass>>
custom_legalization_passes) {
std::vector<int> remaining_params;
llvm::SmallVector<TensorOrResourceShape, 4> arg_shapes;
TF_RETURN_IF_ERROR(
CompileGraphSetup(module_op, args, &remaining_params, arg_shapes));
auto compile_mlir_result = CompileMlirToXlaHlo(
module_op, arg_shapes, device_type, use_tuple_args, enable_op_fallback,
use_return_tuple,
true, shape_determination_fns,
compilation_result, custom_legalization_passes);
compilation_result->input_mapping = remaining_params;
return compile_mlir_result.status();
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GraphToModule(
bool unconditionally_use_set_output_shapes, const Graph& graph,
llvm::ArrayRef<std::string> control_rets,
const FunctionLibraryDefinition& flib_def, mlir::MLIRContext* context) {
mlir::DialectRegistry registry;
RegisterDialects(registry);
context->appendDialectRegistry(registry);
GraphImportConfig config;
config.graph_as_function = true;
config.control_outputs = control_rets;
config.enable_shape_inference = false;
config.unconditionally_use_set_output_shapes =
unconditionally_use_set_output_shapes;
GraphDebugInfo debug_info;
return ConvertGraphToMlir(graph, debug_info, flib_def, config, context);
}
Status BuildHloFromGraph(
const Graph& graph, xla::XlaBuilder& builder,
mlir::MLIRContext& mlir_context, llvm::ArrayRef<xla::XlaOp> xla_params,
std::vector<xla::XlaOp>& returns, bool unconditionally_use_output_shapes,
llvm::ArrayRef<XlaArgument> args, llvm::ArrayRef<std::string> control_rets,
llvm::StringRef device_type, const FunctionLibraryDefinition& flib_def) {
TF_ASSIGN_OR_RETURN(mlir::OwningOpRef<mlir::ModuleOp> module,
GraphToModule(unconditionally_use_output_shapes, graph,
control_rets, flib_def, &mlir_context));
return BuildHloFromModule(module.get(), builder, xla_params, returns, args,
device_type);
}
void RegisterConvertMlirToXlaHloPipelineWithDefaults() {
static mlir::PassPipelineRegistration<> pipeline(
"tf-to-hlo-pipeline",
"Convert TF dialect to HLO dialect (used for compilation in bridge).",
[](mlir::OpPassManager& pm) {
tensorflow::CreateConvertMlirToXlaHloPipeline(
pm, "XLA_CPU_JIT", false,
{});
});
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.h"
#include <initializer_list>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/shape_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/types.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
using ::mlir::OpPassManager;
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::HasSubstr;
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
TEST(LegalizeMlirTest, LegalizesModule) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("mhlo.const"));
}
TEST(LegalizeMlirTest, FailsLegalizesModule) {
constexpr char failed_legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.DoesntExist"() : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CellReader<int64_t> count(
"/tensorflow/core/tf2xla/v1/mlir_failed_xla_legalize_tf_pass_count");
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
failed_legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_FALSE(status.ok());
EXPECT_EQ(count.Delta("tf.DoesntExist", "Unknown"), 1);
}
TEST(CompileMlirUtil, CreatesPipeline) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{});
EXPECT_FALSE(pass_manager.getPasses().empty());
}
TEST(CompileMlirUtil, HasLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kLegalizeTfPass));
}
TEST(CompileMlirUtil, DoesNotHaveLegalizationPass) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kLegalizeTfPass = "xla-legalize-tf";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
false,
{},
false);
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, Not(HasSubstr(kLegalizeTfPass)));
}
TEST(CompileMlirUtil, DoesNotLowerWhenTold) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
std::vector<tensorflow::TensorShape> arg_shapes;
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
kMlirModuleStr, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result,
{},
"",
false);
EXPECT_TRUE(status.ok());
EXPECT_THAT(status.value(), HasSubstr("tf.Const"));
}
TEST(CompileMlirUtil, CanonicalizationIsExplicitDuringInlining) {
OpPassManager pass_manager;
llvm::StringRef device_type = "XLA_CPU_JIT";
absl::string_view kInlinePass =
"inline{default-pipeline=canonicalize "
"inlining-threshold=4294967295 max-iterations=4 }";
CreateConvertMlirToXlaHloPipeline(pass_manager, device_type,
true,
{});
std::string pass_description;
llvm::raw_string_ostream raw_stream(pass_description);
pass_manager.printAsTextualPipeline(raw_stream);
EXPECT_THAT(pass_description, HasSubstr(kInlinePass));
}
TEST(LegalizeMlirTest, LegalizesModuleWithDynamicShape) {
constexpr char legalization[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>> {
%0 = "tf.Identity"(%arg0) : (tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>) -> tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
func.return %0 : tensor<?xi32, #mhlo.type_extensions<bounds = [1]>>
}
})";
std::vector<tensorflow::TensorShape> arg_shapes = {{1}};
XlaCompilationResult compilation_result;
auto status = CompileSerializedMlirToXlaHlo(
legalization, arg_shapes, "XLA_TPU_JIT",
true, false,
{}, &compilation_result);
EXPECT_TRUE(status.ok());
}
absl::StatusOr<std::unique_ptr<Graph>> BuildConstOpGraphWithOutputShapes() {
DataType data_type = DT_INT32;
std::initializer_list<int64_t> dims = {2, 3, 4, 5};
Tensor tensor(data_type, TensorShape(dims));
for (int i = 0; i < 2 * 3 * 4 * 5; ++i) {
tensor.flat<int32>()(i) = i;
}
NodeDef node;
auto builder = NodeDefBuilder("some_node", "Const")
.Attr("dtype", data_type)
.Attr("value", tensor);
AttrValue shape_attr;
TensorShapeProto* shape_proto = shape_attr.mutable_list()->add_shape();
shape_proto->add_dim()->set_size(1);
builder.Attr("_output_shapes", shape_attr);
TF_RETURN_IF_ERROR(builder.Finalize(&node));
return CreateSingleOpGraph(node, {}, {data_type});
}
absl::StatusOr<std::unique_ptr<Graph>> BuildEmptyOpGraph(
std::vector<XlaCompiler::Argument>& xla_args) {
DataType data_type = DT_INT32;
XlaCompiler::Argument arg;
arg.type = DT_INT32;
arg.shape = xla::ShapeUtil::MakeShape(xla::S32, {});
arg.name = "arg0";
arg.kind = XlaCompiler::Argument::kParameter;
xla_args.push_back(arg);
NodeDef node;
auto builder = NodeDefBuilder("some_node", "Empty")
.Input(FakeInput(DT_INT32))
.Attr("dtype", data_type);
TF_RETURN_IF_ERROR(builder.Finalize(&node));
return CreateSingleOpGraph(node, xla_args, {data_type});
}
absl::StatusOr<xla::XlaComputation> BuildHloFromGraph(
Graph& graph, std::vector<XlaCompiler::Argument>& xla_args,
bool use_output_shapes) {
xla::XlaBuilder builder(
::testing::UnitTest::GetInstance()->current_test_info()->name());
mlir::MLIRContext mlir_context;
llvm::SmallVector<xla::XlaOp, 4> xla_params;
for (int i = 0; i < xla_args.size(); ++i) {
xla_params.push_back(Parameter(&builder, i, std::get<1>(xla_args[i].shape),
"arg" + std::to_string(i)));
}
std::vector<xla::XlaOp> returns(1);
TF_RETURN_IF_ERROR(
BuildHloFromGraph(graph, builder, mlir_context, xla_params, returns,
use_output_shapes, xla_args,
{}, DEVICE_TPU,
FunctionLibraryDefinition(OpRegistry::Global())));
return builder.Build();
}
TEST(CompileMlirUtil, UsesCorrectOriginalShapeWithoutOutputShapes) {
std::vector<XlaCompiler::Argument> xla_args;
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildConstOpGraphWithOutputShapes());
auto build_result =
BuildHloFromGraph(*graph, xla_args, false);
TF_ASSERT_OK(build_result);
EXPECT_THAT(build_result,
XlaComputationProtoContains("opcode: \"constant\""));
}
TEST(CompileMlirUtil, UsesIncorrectOutputShapesWhenPresent) {
std::vector<XlaCompiler::Argument> xla_args;
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildConstOpGraphWithOutputShapes());
auto build_result =
BuildHloFromGraph(*graph, xla_args, true);
ASSERT_FALSE(build_result.ok());
EXPECT_THAT(build_result.status().message(),
HasSubstr("op operand type 'tensor<2x3x4x5xi32>' and result type "
"'tensor<1xi32>' are cast incompatible"));
}
TEST(CompileMlirUtil, DoesNotLowerFallbackOps) {
std::vector<XlaCompiler::Argument> xla_args;
TF_ASSERT_OK_AND_ASSIGN(auto graph, BuildEmptyOpGraph(xla_args));
auto build_result =
BuildHloFromGraph(*graph, xla_args, true);
ASSERT_FALSE(build_result.ok());
EXPECT_THAT(build_result.status().message(),
HasSubstr("'tf.Empty' op unsupported op"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d03b60b5-e83c-4edb-98fa-654bafca637a | cpp | tensorflow/tensorflow | compile_tf_graph | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc | tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/compile_only_client.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/monitoring/sampler.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/profile_utils/cpu_utils.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tensorflow/core/tpu/tpu_compile.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
using ::tensorflow::tpu::FunctionToHloArgs;
using ::tensorflow::tpu::GuaranteedConsts;
using ::tensorflow::tpu::MlirToHloArgs;
using ::tensorflow::tpu::ShardingAndIndex;
auto* phase2_bridge_compilation_status =
tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v1/"
"phase2_compilation_status",
"Tracks the compilation status of the non-mlir bridge",
"status" );
auto* phase2_bridge_compilation_time = tsl::monitoring::Sampler<1>::New(
{"/tensorflow/core/tf2xla/api/v1/phase2_compilation_time",
"The wall-clock time spent on executing graphs in milliseconds.",
"configuration"},
{tsl::monitoring::Buckets::Exponential(1, 1.5, 45)});
constexpr char kOldBridgeNoMlirSuccess[] = "kOldBridgeNoMlirSuccess";
constexpr char kOldBridgeNoMlirFailure[] = "kOldBridgeNoMlirFailure";
namespace {
struct CompilationTimer {
uint64 start_cycles = profile_utils::CpuUtils::GetCurrentClockCycle();
uint64 ElapsedCycles() {
return profile_utils::CpuUtils::GetCurrentClockCycle() - start_cycles;
}
int64_t ElapsedCyclesInMilliseconds() {
std::chrono::duration<double> duration =
profile_utils::CpuUtils::ConvertClockCycleToTime(ElapsedCycles());
return std::chrono::duration_cast<std::chrono::milliseconds>(duration)
.count();
}
};
Status PopulateInputOutputAliasing(
mlir::func::FuncOp main_fn,
XlaCompiler::CompilationResult* compilation_result, bool use_tuple_args) {
constexpr char kAliasingAttr[] = "tf.aliasing_output";
llvm::SmallDenseMap<unsigned, unsigned> output_to_input_alias;
unsigned num_arguments = main_fn.getNumArguments();
for (unsigned arg_index = 0; arg_index < num_arguments; ++arg_index) {
if (auto aliasing_output = main_fn.getArgAttrOfType<mlir::IntegerAttr>(
arg_index, kAliasingAttr))
output_to_input_alias[aliasing_output.getInt()] = arg_index;
}
if (output_to_input_alias.empty()) return absl::OkStatus();
xla::HloModuleProto* module_proto =
compilation_result->computation->mutable_proto();
absl::StatusOr<xla::ProgramShape> program_shape_or_status =
compilation_result->computation->GetProgramShape();
TF_RET_CHECK(program_shape_or_status.ok());
xla::ProgramShape& program_shape = program_shape_or_status.value();
if (!program_shape.result().IsTuple())
return errors::Internal("Expect result to have tuple shape");
xla::HloInputOutputAliasConfig config(program_shape.result());
for (auto alias : output_to_input_alias) {
if (use_tuple_args) {
TF_RETURN_IF_ERROR(config.SetUpAlias(
xla::ShapeIndex({alias.first}), 0, xla::ShapeIndex({alias.second}),
xla::HloInputOutputAliasConfig::AliasKind::kMayAlias));
} else {
TF_RETURN_IF_ERROR(config.SetUpAlias(
xla::ShapeIndex({alias.first}), alias.second, xla::ShapeIndex({}),
xla::HloInputOutputAliasConfig::AliasKind::kMayAlias));
}
}
*module_proto->mutable_input_output_alias() = config.ToProto();
return absl::OkStatus();
}
bool failed(const absl::Status& status) { return !status.ok(); }
Status PrepareAndExportToLibrary(mlir::ModuleOp module,
FunctionLibraryDefinition* flib_def) {
mlir::PassManager manager(module.getContext());
applyTensorflowAndCLOptions(manager);
manager.addPass(mlir::TF::CreatePrepareTpuComputationForTfExportPass());
manager.addPass(mlir::TF::CreateTFRegionControlFlowToFunctional());
manager.addPass(mlir::TF::CreateTFShapeInferencePass());
manager.addNestedPass<mlir::func::FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
manager.addPass(mlir::CreateBreakUpIslandsPass());
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
if (VLOG_IS_ON(2)) {
llvm::StringRef module_name = llvm::StringRef();
constexpr const char* kDebugGroupBridgePhase2 =
"v1_prepare_and_export_to_library";
internal::EnablePassIRPrinting(manager, kDebugGroupBridgePhase2,
module_name);
}
auto prepare_status = manager.run(module);
auto diag_handler_status = diag_handler.ConsumeStatus();
if (failed(prepare_status) || failed(diag_handler_status)) {
return diag_handler_status;
}
GraphExportConfig config;
config.export_entry_func_to_flib = true;
absl::flat_hash_set<Node*> control_ret_nodes;
return tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module, config, nullptr, flib_def, &control_ret_nodes);
}
absl::Status CompileTFFunctionWithoutMlir(
FunctionToHloArgs function_computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
Status comp_status = CompileTFFunctionToHlo(
*function_computation.flib_def, function_computation.graph_def_version,
shape_determination_funcs, arg_shapes,
function_computation.guaranteed_constants, *function_computation.function,
metadata, client, arg_core_mapping, per_core_arg_shapes, use_tuple_args,
compilation_result);
if (comp_status.ok()) {
phase2_bridge_compilation_status->GetCell(kOldBridgeNoMlirSuccess)
->IncrementBy(1);
} else {
phase2_bridge_compilation_status->GetCell(kOldBridgeNoMlirFailure)
->IncrementBy(1);
}
return comp_status;
}
absl::Status CompileMLIRTFFunction(
tpu::MlirToHloArgs mlir_computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
TF_RETURN_IF_ERROR(DeserializeMlirModule(mlir_computation.mlir_module,
&context, &mlir_module));
if (!mlir::SetTPUInfeedLayout(mlir_module))
return errors::Internal("Failed to set layouts attribute");
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("legalize_with_old_bridge", mlir_module.get());
}
constexpr char kEntryFuncName[] = "main";
auto main_fn = mlir_module->lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!main_fn) {
return errors::Internal(
"TPU compile op requires module with a entry function main");
}
auto flib_def = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
TF_RETURN_IF_ERROR(PrepareAndExportToLibrary(*mlir_module, flib_def.get()));
if (VLOG_IS_ON(2)) {
tensorflow::DumpMlirOpToFile("legalize_with_old_bridge_post_transform",
mlir_module.get());
}
VersionDef versions;
if (mlir::failed(ExtractTfVersions(*mlir_module, &versions))) {
return errors::Internal(
"module attribute in _TPUCompileMlir op is missing tf versions.");
}
NameAttrList func;
func.set_name(kEntryFuncName);
GuaranteedConsts consts;
*compilation_result = {};
TF_RETURN_IF_ERROR(CompileTFFunctionToHlo(
*flib_def, versions.producer(), shape_determination_funcs, arg_shapes,
consts, func, metadata, client, arg_core_mapping, per_core_arg_shapes,
use_tuple_args, compilation_result));
return PopulateInputOutputAliasing(main_fn, compilation_result,
use_tuple_args);
}
}
absl::Status CompileTensorflowGraphToHlo(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>& computation,
const tpu::TPUCompileMetadataProto& metadata, bool use_tuple_args,
const XlaShapeLayoutHelpers::ShapeDeterminationFns
shape_determination_funcs,
const std::vector<tensorflow::TensorShape>& arg_shapes,
std::vector<tpu::ShardingAndIndex>* arg_core_mapping,
std::vector<std::vector<xla::Shape>>* per_core_arg_shapes,
xla::CompileOnlyClient* client,
XlaCompiler::CompilationResult* compilation_result) {
LOG_FIRST_N(INFO, 1) << "Compiling MLIR computation to XLA HLO using the "
"old (non-MLIR) tf2xla bridge";
CompilationTimer timer;
*compilation_result = {};
bool has_mlir = computation.index() == 0;
std::string mlir_string = has_mlir ? "has_mlir" : "has_function_to_hlo";
const std::string kBridgePhase2Config =
absl::StrCat("graph_old_bridge_", mlir_string);
if (has_mlir) {
TF_RETURN_IF_ERROR(CompileMLIRTFFunction(
std::get<0>(computation), metadata, use_tuple_args,
shape_determination_funcs, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result));
} else {
FunctionToHloArgs function_computation = std::get<1>(computation);
TF_RETURN_IF_ERROR(CompileTFFunctionWithoutMlir(
function_computation, metadata, use_tuple_args,
shape_determination_funcs, arg_shapes, arg_core_mapping,
per_core_arg_shapes, client, compilation_result));
}
phase2_bridge_compilation_time->GetCell(kBridgePhase2Config)
->Add(timer.ElapsedCyclesInMilliseconds());
return absl::OkStatus();
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.h"
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tf2xla/internal/test_matchers.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/utils/test_metadata_config.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/hlo/translate/mhlo_to_hlo/type_to_shape.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/lib/monitoring/test_utils.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tf2xla {
namespace v1 {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::tpu::FunctionToHloArgs;
using ::tensorflow::tpu::MlirToHloArgs;
using ::tensorflow::tpu::ShardingAndIndex;
using ::tsl::monitoring::testing::Histogram;
static constexpr char kCompilationTimeStreamzName[] =
"/tensorflow/core/tf2xla/api/v1/phase2_compilation_time";
static constexpr char kCompilationStatusStreamzName[] =
"/tensorflow/core/tf2xla/api/v1/phase2_compilation_status";
static constexpr char kPlatformName[] = "Host";
constexpr char kEntryFuncName[] = "main";
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
func.return
}
})";
MlirToHloArgs CreateTestMlirToHloArgs(const char* module_str = kMlirModuleStr) {
MlirToHloArgs mlir_to_hlo_args;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
mlir_to_hlo_args.mlir_module = module_str;
return mlir_to_hlo_args;
}
class CompileTFGraphTest : public ::testing::Test {
public:
absl::StatusOr<XlaCompilationResult> CompileWithComputation(
const std::variant<tpu::MlirToHloArgs, tpu::FunctionToHloArgs>
computation) {
XlaCompilationResult compilation_result;
se::Platform* platform =
se::PlatformManager::PlatformWithName(kPlatformName).value();
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform).value();
bool use_tuple_args = true;
std::vector<ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
tpu::TPUCompileMetadataProto metadata_proto;
std::vector<TensorShape> arg_shapes;
if (computation.index() == 0) {
TF_RETURN_IF_ERROR(tensorflow::tf2xla::internal::ConfigureMetadata(
std::get<0>(computation).mlir_module, arg_shapes, metadata_proto));
}
XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns;
absl::Status compilation_status =
tensorflow::tf2xla::v1::CompileTensorflowGraphToHlo(
computation, metadata_proto, use_tuple_args,
shape_determination_fns, arg_shapes, &arg_core_mapping,
&per_core_arg_shapes, client, &compilation_result);
if (!compilation_status.ok()) return compilation_status;
return compilation_result;
}
};
TEST_F(CompileTFGraphTest, RecordsStreamzForMlirFallback) {
CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
MlirToHloArgs mlir_to_hlo_args = CreateTestMlirToHloArgs();
TF_EXPECT_OK(CompileWithComputation(mlir_to_hlo_args).status());
Histogram histogram = compilation_time.Delta("graph_old_bridge_has_mlir");
EXPECT_EQ(histogram.num(), 1);
}
TEST_F(CompileTFGraphTest, RecordsStreamzForFunctionToHlo) {
CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
CellReader<int64_t> compilation_status(kCompilationStatusStreamzName);
FunctionDef empty_function =
tensorflow::FunctionDefHelper::Create("empty", {}, {}, {}, {}, {});
tensorflow::FunctionDefLibrary fdef;
*(fdef.add_function()) = empty_function;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), fdef);
OpInputList guaranteed_constants;
NameAttrList function;
function.set_name("empty");
FunctionToHloArgs function_to_hlo_args = {&function,
&flib_def,
0,
{&guaranteed_constants}};
TF_EXPECT_OK(CompileWithComputation(function_to_hlo_args).status());
Histogram histogram =
compilation_time.Delta("graph_old_bridge_has_function_to_hlo");
EXPECT_EQ(histogram.num(), 1);
EXPECT_EQ(compilation_status.Delta("kOldBridgeNoMlirSuccess"), 1);
}
TEST_F(CompileTFGraphTest, SuccessfullyCompilesWithManualSharding) {
constexpr char kSupportedManualSharding[] = R"(
module @module___inference_tpu_function_41 attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 1617 : i32}} {
func.func @main(%arg0: tensor<2x2xf32>) -> (tensor<2x2xf32> {mhlo.sharding = "\08\03\1A\02\02\01\22\02\00\01"}) {
%0 = tf_executor.graph {
%outputs, %control = tf_executor.island wraps "tf.XlaSharding"(%arg0) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01"} : (tensor<2x2xf32>) -> tensor<2x2xf32>
%outputs_0, %control_1 = tf_executor.island wraps "tf.XlaSharding"(%outputs) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
%outputs_2, %control_3 = tf_executor.island wraps "tf.XlaSpmdFullToShardShape"(%outputs_0) {dim = -1 : i64, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<1x2xf32>
%control_4 = tf_executor.island wraps "tf._XlaHostComputeMlir"(%outputs_2) {host_mlir_module = "", manual_sharding = true, recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"} : (tensor<1x2xf32>) -> ()
%outputs_5, %control_6 = tf_executor.island(%control_4) wraps "tf._XlaHostComputeMlir"() {host_mlir_module = "module {\0A func.func @host_func() -> tensor<1x2xf32> {\0A %0 = \22tf.Const\22() {value = dense<0.1> : tensor<1x2xf32>} : () -> tensor<1x2xf32> \0A return %0 : tensor<1x2xf32>}}", manual_sharding = true, recv_key = "host_compute_channel_1_retvals", send_key = "host_compute_channel_1_args"} : () -> tensor<1x2xf32>
%outputs_7, %control_8 = tf_executor.island wraps "tf.XlaSpmdShardToFullShape"(%outputs_5) {dim = -1 : i64, full_shape = #tf_type.shape<2x2>, manual_sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<1x2xf32>) -> tensor<2x2xf32>
%outputs_9, %control_10 = tf_executor.island wraps "tf.XlaSharding"(%outputs_7) {_XlaSharding = "\08\03\1A\02\02\01\22\02\00\01", sharding = "\08\03\1A\02\02\01\22\02\00\01", unspecified_dims = []} : (tensor<2x2xf32>) -> tensor<2x2xf32>
tf_executor.fetch %outputs_9 : tensor<2x2xf32>
}
return %0 : tensor<2x2xf32>
}
}
)";
auto mlir_to_hlo_args = CreateTestMlirToHloArgs(kSupportedManualSharding);
auto result = CompileWithComputation(mlir_to_hlo_args);
EXPECT_TRUE(result.ok());
}
TEST_F(CompileTFGraphTest, DoesNotInlineStatelessRandomOps) {
static constexpr char kHasReturnValues[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> (tensor<32x64xf32> {mhlo.sharding = "\08\01\1A\01\01\22\01\00"}) {
%cst = "tf.Const"() {value = dense<[524170, 523952]> : tensor<2xi32>} : () -> tensor<2xi32>
%cst_0 = "tf.Const"() {value = dense<[32, 64]> : tensor<2xi32>} : () -> tensor<2xi32>
%0 = "tf.StatelessRandomNormal"(%cst_0, %cst) : (tensor<2xi32>, tensor<2xi32>) -> tensor<32x64xf32>
return %0 : tensor<32x64xf32>
}
})";
auto compilation_result =
CompileWithComputation(CreateTestMlirToHloArgs(kHasReturnValues));
EXPECT_TRUE(compilation_result.ok());
EXPECT_THAT(compilation_result,
ComputationProtoContains("tf.StatelessRandomNormal"));
}
TEST_F(CompileTFGraphTest, TestRunsShapeInference) {
static constexpr char kShapeInferenceModule[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = "tf.Const"() <{value = dense<-1> : tensor<3360x8xi32>}> : () -> tensor<3360x8xi32>
%cst_33 = "tf.Const"() <{value = dense<[1120, -1]> : tensor<2xi32>}> : () -> tensor<2xi32>
%cst_34 = "tf.Const"() <{value = dense<[3, 1120, -1]> : tensor<3xi32>}> : () -> tensor<3xi32>
%cst_63 = "tf.Const"() <{value = dense<0> : tensor<i32>}> : () -> tensor<i32>
%1965:4 = "tf._XlaHostComputeMlir"(%0, %cst_34, %cst_63, %cst_33) <{host_mlir_module = "#loc1 = loc(\22Reshape:\22)\0A#loc2 = loc(\22Reshape_4\22)\0A#loc3 = loc(\22Reshape\22)\0A#loc9 = loc(fused[#loc1, #loc2, #loc3])\0Amodule {\0A func.func @host_func(%arg0: tensor<3360x?xi32> loc(fused[#loc1, #loc2, #loc3]), %arg1: tensor<3xi32> loc(fused[#loc1, #loc2, #loc3]), %arg2: tensor<i32> loc(fused[#loc1, #loc2, #loc3]), %arg3: tensor<2xi32> loc(fused[#loc1, #loc2, #loc3])) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>) {\0A %0 = \22tf.Reshape\22(%arg0, %arg1) {_xla_outside_compilation = \220\22} : (tensor<3360x?xi32>, tensor<3xi32>) -> tensor<3x1120x?xi32> loc(#loc9)\0A %1:3 = \22tf.Split\22(%arg2, %0) {_xla_outside_compilation = \220\22} : (tensor<i32>, tensor<3x1120x?xi32>) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1x1120x?xi32>) loc(#loc10)\0A %2 = \22tf.Reshape\22(%1#0, %arg3) {_xla_outside_compilation = \220\22} : (tensor<1x1120x?xi32>, tensor<2xi32>) -> tensor<1120x?xi32> loc(#loc11)\0A %3 = \22tf.Shape\22(%2) {_xla_outside_compilation = \220\22} : (tensor<1120x?xi32>) -> tensor<2xi32> loc(#loc12)\0A return %1#1, %1#2, %2, %3 : tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32> loc(#loc9)\0A } loc(#loc9)\0A} loc(#loc)\0A#loc = loc(unknown)\0A#loc4 = loc(\22Split:\22)\0A#loc5 = loc(\22split\22)\0A#loc6 = loc(\22Reshape_5\22)\0A#loc7 = loc(\22Shape:\22)\0A#loc8 = loc(\22Shape_4\22)\0A#loc10 = loc(fused[#loc4, #loc5])\0A#loc11 = loc(fused[#loc1, #loc6])\0A#loc12 = loc(fused[#loc7, #loc8])\0A", recv_key = "host_compute_channel_0_retvals", send_key = "host_compute_channel_0_args"}> : (tensor<3360x8xi32>, tensor<3xi32>, tensor<i32>, tensor<2xi32>) -> (tensor<1x1120x?xi32>, tensor<1x1120x?xi32>, tensor<1120x?xi32>, tensor<2xi32>)
return
}
}
)";
auto compilation_result =
CompileWithComputation(CreateTestMlirToHloArgs(kShapeInferenceModule));
EXPECT_TRUE(compilation_result.ok());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c2c26fb1-265a-4feb-9f4d-8261e92daeb4 | cpp | tensorflow/tensorflow | tf_dialect_to_executor | tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc | tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h"
#include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/logging_hooks.h"
#include "xla/tsl/lib/monitoring/counter.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::LogicalResult;
using mlir::ModuleOp;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
auto *tf_dialect_to_executor_dialect_status = tsl::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/api/v2/tf_dialect_to_executor_dialect_status",
"Counts how often a successful export from TF Dialect to Executor Dialect "
"is",
"status");
constexpr char kExportSuccess[] = "success";
constexpr char kExportFailed[] = "failed";
namespace {
void AddTfDialectToExecutorPasses(OpPassManager &pm) {
pm.addPass(mlir::TF::CreateTFRegionControlFlowToFunctional());
pm.addNestedPass<FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateSplitIntoIslandPerOpPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateReplicateToIslandPass(
false));
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateReplicaIDToDeviceOrdinalPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateParallelExecuteToIslandsPass(
false));
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateLaunchToDeviceAttributePass(
false));
pm.addPass(
mlir::tf_executor::CreateTFExecutorUpdateControlDependenciesPass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUDevicePropagationPass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUColocateSplitsPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_convert_control_to_data_outputs_pass) {
bool composite_tpuexecute_side_effects =
tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_composite_tpuexecute_side_effects;
pm.addPass(
mlir::tf_executor::CreateTFExecutorConvertControlToDataOutputsPass(
composite_tpuexecute_side_effects));
}
pm.addPass(mlir::TF::CreateVerifySuitableForExportPass());
}
tensorflow::Status RecordStatusIfError(absl::Status status) {
if (status.ok()) {
return absl::OkStatus();
}
tf_dialect_to_executor_dialect_status->GetCell(kExportFailed)->IncrementBy(1);
VLOG(1) << "Failed to export from TF Dialect to TF Executor Dialect. "
<< status;
constexpr char bridge_subcomponent[] =
"TFXLA_TF_FUNCTIONAL_TO_EXECUTOR_EXPORT_v2";
constexpr char kBridgeComponent[] = "TFXLABridge";
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
tsl::error_logging::Log(kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
}
tensorflow::Status ExportFromTensorflowDialectToExecutor(
ModuleOp module, llvm::StringRef module_name) {
PassManager tf_to_executor(module.getContext());
::tensorflow::applyTensorflowAndCLOptions(tf_to_executor);
tf_to_executor.enableVerifier();
AddTfDialectToExecutorPasses(tf_to_executor);
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), kDebugGroupMain,
"tfxla_bridge_v2_tfdialect_to_executor_before"),
module, llvm::StringRef(), &tf_to_executor);
if (VLOG_IS_ON(2) ||
DEBUG_DATA_DUMPER()->ShouldDump(
module_name.str(), kDebugGroupBridgePhase1ExecutorExport)) {
internal::EnablePassIRPrinting(
tf_to_executor, kDebugGroupBridgePhase1ExecutorExport, module_name);
}
}
LogicalResult result = tf_to_executor.run(module);
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), kDebugGroupMain,
"tfxla_bridge_v2_tfdialect_to_executor_after"),
module, llvm::StringRef(), &tf_to_executor);
}
if (result.failed()) {
return RecordStatusIfError(
absl::InternalError("Failed to export from TF Dialect to TF Executor "
"Dialect. Read LLVM Pipeline Error"));
}
tf_dialect_to_executor_dialect_status->GetCell(kExportSuccess)
->IncrementBy(1);
return absl::OkStatus();
}
mlir::PassPipelineRegistration<> tf_dialect_to_executor_pipeline(
"tf-dialect-to-executor-v2",
"Run passes to convert from TF Dialect to Executor in preparation for "
"exporting module back to TF Graph.",
AddTfDialectToExecutorPasses);
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.h"
#include <stdlib.h>
#include <cstdint>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/testing/utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/resource_loader.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
constexpr char kExportStreamzName[] =
"/tensorflow/core/tf2xla/api/v2/tf_dialect_to_executor_dialect_status";
constexpr char kExportSuccess[] = "success";
constexpr char kExportFailed[] = "failed";
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::tf2xla::v2::testing::TestDataPath;
size_t CountSubstring(absl::string_view str, absl::string_view substr) {
size_t count = 0;
size_t idx = str.find(substr);
while (idx != std::string::npos) {
count++;
idx = str.find(substr, idx + 1);
}
return count;
}
class TensorflowDialectToExecutorTest : public ::testing::Test {
public:
TensorflowDialectToExecutorTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TensorflowDialectToExecutorTest, ConvertsToExecutor) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_));
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 0);
}
TEST_F(TensorflowDialectToExecutorTest, ErrorsWhenCannotConvert) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("invalid_executor.mlir"));
EXPECT_FALSE(ExportFromTensorflowDialectToExecutor(*mlir_module_).ok());
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 0);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 1);
}
TEST_F(TensorflowDialectToExecutorTest, PrunesDeadOps) {
CellReader<int64_t> compilation_status(kExportStreamzName);
TF_ASSERT_OK(CreateMlirModule("func_with_dead_ops.mlir"));
TF_EXPECT_OK(ExportFromTensorflowDialectToExecutor(*mlir_module_));
std::string module_dump;
llvm::raw_string_ostream raw_stream(module_dump);
mlir_module_->print(raw_stream);
EXPECT_EQ(compilation_status.Delta(kExportSuccess), 1);
EXPECT_EQ(compilation_status.Delta(kExportFailed), 0);
EXPECT_EQ(
CountSubstring(module_dump, "tf_executor.island wraps \"tf.Concat\""), 2);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21725ba1-c718-4024-a3ae-27f4d851549c | cpp | tensorflow/tensorflow | tf_executor_to_graph | tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc | tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/export_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/verify_suitable_for_graph_export.h"
#include "tensorflow/compiler/mlir/utils/name_utils.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::BlockArgument;
using mlir::Dialect;
using mlir::Operation;
using mlir::SymbolTable;
using mlir::Value;
using mlir::func::FuncOp;
using tsl::StatusOr;
namespace {
constexpr char kDeviceAttr[] = "tf.device";
constexpr char kResourceArgUniqueIdAttr[] = "tf._resource_arg_unique_id";
constexpr char kEntryFuncAttr[] = "tf.entry_function";
constexpr char kAliasingAttr[] = "tf.aliasing_output";
class LegalizedOpOrValLocNameMapper : public OpOrArgLocNameMapper {
private:
std::string GetName(OpOrVal op_or_val) override {
std::string name = OpOrArgLocNameMapper::GetName(op_or_val);
assert(!name.empty() && "expected non-empty name");
mlir::LegalizeNodeName(name);
return name;
}
};
Operation* GetIslandInnerOpOrSelf(mlir::Operation* op) {
auto island = llvm::dyn_cast<mlir::tf_executor::IslandOp>(op);
if (island) return &island.GetBody().front();
return op;
}
class Exporter {
public:
static Status Convert(mlir::ModuleOp module, const GraphExportConfig& configs,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
absl::flat_hash_set<Node*>* control_ret_nodes);
static Status ConvertLibFunction(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions);
static absl::StatusOr<std::unique_ptr<Graph>> Convert(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions,
absl::flat_hash_set<Node*>* control_ret_nodes);
private:
explicit Exporter(const GraphExportConfig* configs, Graph* graph,
const Dialect* tf_dialect, const SymbolTable* symbol_table)
: configs_(*configs),
graph_(graph),
tf_dialect_(tf_dialect),
symbol_table_(*symbol_table) {
graph_->ToGraphDef(&graphdef_);
}
Status AddArgumentNode(BlockArgument arg, unsigned index,
llvm::StringRef name);
Status AddFetchNode(FuncOp function, mlir::tf_executor::FetchOp fetch,
llvm::ArrayRef<llvm::StringRef> names);
Status AddInstructionNode(Operation* inst);
void UseOriginalFunctionNames(NodeDef& node_def);
Status AddEdge(Operation* inst);
absl::StatusOr<std::unique_ptr<NodeDef>> GetArgumentNode(
BlockArgument arg, unsigned index, llvm::StringRef name);
absl::StatusOr<std::unique_ptr<NodeDef>> GetReturnNode(FuncOp function,
Value operand,
unsigned index,
llvm::StringRef name);
Status GetControlRetNodes(mlir::tf_executor::FetchOp fetch,
absl::flat_hash_set<Node*>* control_ret_nodes);
Status AddEdgeBetweenNodes(Value src, Node* dst_node, unsigned dst_index);
const GraphExportConfig& configs_;
Graph* graph_;
GraphDef graphdef_;
LegalizedOpOrValLocNameMapper op_to_name_;
absl::flat_hash_map<Operation*, Node*> nodes_;
llvm::DenseMap<BlockArgument, Node*> args_;
typedef absl::InlinedVector<Node*, 4> NodeVector;
absl::flat_hash_map<Operation*, NodeVector> returns_;
const mlir::Dialect* tf_dialect_;
const SymbolTable& symbol_table_;
};
std::string FindFunctionName(const GraphExportConfig& configs, FuncOp func) {
if (auto original_func_name =
func->getAttrOfType<mlir::StringAttr>("tf._original_func_name");
configs.export_original_tf_func_name && original_func_name) {
return original_func_name.str();
}
return func.getName().str();
}
absl::StatusOr<std::unique_ptr<NodeDef>> Exporter::GetArgumentNode(
BlockArgument arg, unsigned index, llvm::StringRef name) {
auto func = arg.getParentRegion()->getParentOfType<FuncOp>();
auto node_def = std::make_unique<NodeDef>();
if (!name.empty())
node_def->set_name(std::string(ParseTensorName(name.str()).node()));
else
node_def->set_name(
std::string(op_to_name_.GetUniqueName(func.getName().str())));
node_def->set_op(FunctionLibraryDefinition::kArgOp);
mlir::TensorType arg_type = mlir::cast<mlir::TensorType>(arg.getType());
if (auto resource_type =
mlir::dyn_cast<mlir::TF::ResourceType>(arg_type.getElementType())) {
llvm::ArrayRef<mlir::TensorType> subtypes = resource_type.getSubtypes();
if (!subtypes.empty()) {
AttrValue handle_dtypes_attr;
AttrValue handle_shapes_attr;
for (mlir::TensorType subtype : subtypes) {
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(subtype.getElementType(), &dtype));
handle_dtypes_attr.mutable_list()->add_type(dtype);
SetTensorShapeProto(subtype,
handle_shapes_attr.mutable_list()->add_shape());
}
(*node_def->mutable_attr())["_handle_dtypes"] = handle_dtypes_attr;
(*node_def->mutable_attr())["_handle_shapes"] = handle_shapes_attr;
}
}
TF_RETURN_IF_ERROR(
SetShapeAttribute("_output_shapes", arg_type, node_def->mutable_attr()));
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(arg_type.getElementType(), &dtype));
AttrValue type_attr;
type_attr.set_type(dtype);
(*node_def->mutable_attr())["T"] = type_attr;
AttrValue index_attr;
index_attr.set_i(index);
(*node_def->mutable_attr())["index"] = index_attr;
if (auto device_attr =
func.getArgAttrOfType<mlir::StringAttr>(index, kDeviceAttr))
*node_def->mutable_device() = device_attr.getValue().str();
llvm::ArrayRef<mlir::NamedAttribute> func_arg_i_attrs =
mlir::function_interface_impl::getArgAttrs(func, index);
absl::flat_hash_set<absl::string_view> attrs_to_ignore = {kDeviceAttr,
kAliasingAttr};
TF_RETURN_IF_ERROR(ConvertAttributes(func_arg_i_attrs, attrs_to_ignore,
false,
node_def->mutable_attr()));
return node_def;
}
absl::StatusOr<std::unique_ptr<NodeDef>> Exporter::GetReturnNode(
FuncOp function, Value operand, unsigned index, llvm::StringRef name) {
auto node_def = std::make_unique<NodeDef>();
if (!name.empty())
node_def->set_name(std::string(ParseTensorName(name.str()).node()));
else
node_def->set_name(
std::string(op_to_name_.GetUniqueName(function.getName().str())));
node_def->set_op(FunctionLibraryDefinition::kRetOp);
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(
mlir::cast<mlir::TensorType>(operand.getType()).getElementType(),
&dtype));
AttrValue type_attr;
type_attr.set_type(dtype);
(*node_def->mutable_attr())["T"] = type_attr;
AttrValue index_attr;
index_attr.set_i(index);
(*node_def->mutable_attr())["index"] = index_attr;
if (auto device_attr =
function.getResultAttrOfType<mlir::StringAttr>(index, kDeviceAttr))
*node_def->mutable_device() = device_attr.getValue().str();
llvm::ArrayRef<mlir::NamedAttribute> func_res_i_attrs =
function.getResultAttrs(index);
absl::flat_hash_set<absl::string_view> attrs_to_ignore = {kDeviceAttr};
TF_RETURN_IF_ERROR(ConvertAttributes(func_res_i_attrs, attrs_to_ignore,
false,
node_def->mutable_attr()));
return node_def;
}
Status Exporter::AddEdgeBetweenNodes(Value src, Node* dst_node,
unsigned dst_index) {
if (auto input_result = mlir::dyn_cast<mlir::OpResult>(src)) {
auto* input_inst = GetIslandInnerOpOrSelf(input_result.getOwner());
if (auto next_iter_source =
llvm::dyn_cast<mlir::tf_executor::NextIterationSourceOp>(
input_inst))
input_inst = next_iter_source.GetSink();
auto node_it = nodes_.find(input_inst);
TF_RET_CHECK(node_it != nodes_.end())
<< "Use of OpResult encountered before def!";
if (mlir::isa<mlir::tf_executor::ControlType>(input_result.getType())) {
graph_->AddControlEdge(node_it->second, dst_node,
true);
} else {
graph_->AddEdge(node_it->second, input_result.getResultNumber(), dst_node,
dst_index);
}
return absl::OkStatus();
}
auto input_arg = mlir::cast<BlockArgument>(src);
auto input_node_it = args_.find(input_arg);
TF_RET_CHECK(input_node_it != args_.end())
<< "Use of BlockArgument encounted before def!";
graph_->AddEdge(input_node_it->second, 0, dst_node, dst_index);
return absl::OkStatus();
}
Status Exporter::AddEdge(Operation* inst) {
if (auto fetch = llvm::dyn_cast<mlir::tf_executor::FetchOp>(inst)) {
for (auto operand_and_idx : llvm::enumerate(fetch.getOperands())) {
Value operand = operand_and_idx.value();
if (mlir::isa<mlir::tf_executor::ControlType>(operand.getType())) break;
auto* dst_node = returns_[fetch][operand_and_idx.index()];
TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(operand, dst_node, 0));
}
return absl::OkStatus();
}
if (auto next_iter_sink =
llvm::dyn_cast<mlir::tf_executor::NextIterationSinkOp>(inst)) {
auto* dst_node = nodes_[inst];
TF_RETURN_IF_ERROR(
AddEdgeBetweenNodes(next_iter_sink.getInput(), dst_node, 0));
for (auto control_and_idx :
llvm::enumerate(next_iter_sink.getControlInputs()))
TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(control_and_idx.value(), dst_node,
control_and_idx.index() + 1));
return absl::OkStatus();
}
if (llvm::isa<mlir::tf_executor::NextIterationSourceOp>(inst)) {
assert(inst->getNumOperands() == 0);
return absl::OkStatus();
}
Operation* op = GetIslandInnerOpOrSelf(inst);
auto* dst_node = nodes_[op];
int operand_offset = 0;
if (auto island = llvm::dyn_cast<mlir::tf_executor::IslandOp>(inst)) {
for (auto operand_and_idx : llvm::enumerate(op->getOperands()))
TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(operand_and_idx.value(), dst_node,
operand_and_idx.index()));
operand_offset = op->getNumOperands();
}
for (auto operand_and_idx : llvm::enumerate(inst->getOperands()))
TF_RETURN_IF_ERROR(
AddEdgeBetweenNodes(operand_and_idx.value(), dst_node,
operand_and_idx.index() + operand_offset));
return absl::OkStatus();
}
void Exporter::UseOriginalFunctionNames(NodeDef& node_def) {
if (!configs_.export_original_tf_func_name) return;
auto& attrs = *node_def.mutable_attr();
auto try_use_original_func_name = [this](std::string* name) {
if (auto func = symbol_table_.lookup<FuncOp>(*name)) {
if (auto original_func_name =
func->getAttrOfType<mlir::StringAttr>("tf._original_func_name")) {
*name = original_func_name.str();
}
}
};
try_use_original_func_name(node_def.mutable_op());
for (auto& iter : attrs) {
auto& attr = iter.second;
if (attr.has_func()) {
try_use_original_func_name(attr.mutable_func()->mutable_name());
} else if (attr.has_list()) {
for (auto& func_attr : *attr.mutable_list()->mutable_func()) {
try_use_original_func_name(func_attr.mutable_name());
}
}
}
}
Status Exporter::AddInstructionNode(Operation* inst) {
std::unique_ptr<NodeDef> node_def;
int graph_hash_value = graph_regularization::ComputeHash(graphdef_);
auto name = op_to_name_.GetUniqueName(inst, graph_hash_value);
TF_ASSIGN_OR_RETURN(node_def,
ConvertTFDialectOpToNodeDef(
inst, name, false));
UseOriginalFunctionNames(*node_def);
TF_ASSIGN_OR_RETURN(Node * node, graph_->AddNode(std::move(*node_def)));
DCHECK(node != nullptr);
nodes_[inst] = node;
return absl::OkStatus();
}
bool IsEntryFunctionArg(BlockArgument arg) {
return arg.getParentRegion()->getParentOfType<FuncOp>().getName() == "main";
}
Status Exporter::AddArgumentNode(BlockArgument arg, unsigned index,
llvm::StringRef name) {
TF_ASSIGN_OR_RETURN(auto node_def, GetArgumentNode(arg, index, name));
TF_ASSIGN_OR_RETURN(Node * node, graph_->AddNode(std::move(*node_def)));
args_[arg] = node;
return absl::OkStatus();
}
Status Exporter::AddFetchNode(FuncOp function, mlir::tf_executor::FetchOp fetch,
llvm::ArrayRef<llvm::StringRef> names) {
auto& return_nodes = returns_[fetch];
for (auto operand_and_idx : llvm::enumerate(fetch.getOperands())) {
if (mlir::isa<mlir::tf_executor::ControlType>(
operand_and_idx.value().getType()))
break;
TF_ASSIGN_OR_RETURN(
auto node_def,
GetReturnNode(function, operand_and_idx.value(),
operand_and_idx.index(),
names.empty() ? "" : names[operand_and_idx.index()]));
TF_ASSIGN_OR_RETURN(Node * node, graph_->AddNode(std::move(*node_def)));
return_nodes.push_back(node);
}
return absl::OkStatus();
}
Status Exporter::GetControlRetNodes(
mlir::tf_executor::FetchOp fetch,
absl::flat_hash_set<Node*>* control_ret_nodes) {
for (Value fetch_operand : fetch.getOperands()) {
if (mlir::isa<mlir::tf_executor::ControlType>(fetch_operand.getType())) {
Operation* defining_op =
GetIslandInnerOpOrSelf(fetch_operand.getDefiningOp());
auto node_it = nodes_.find(defining_op);
TF_RET_CHECK(node_it != nodes_.end());
control_ret_nodes->insert(node_it->second);
}
}
return absl::OkStatus();
}
void FixupInputNamesFromEdges(Graph* graph) {
for (Node* n : graph->nodes()) {
if (n->IsOp()) {
NodeDef* node_def = n->mutable_def();
node_def->clear_input();
for (const Edge* e : n->in_edges()) {
Node* src = e->src();
if (src->IsOp()) {
Graph::AddInput(node_def, src->name(), e->src_output());
}
}
}
}
}
absl::StatusOr<std::unique_ptr<Graph>> Exporter::Convert(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions,
absl::flat_hash_set<Node*>* control_ret_nodes) {
mlir::Block& block = function.front();
llvm::SmallVector<llvm::StringRef, 2> input_names;
llvm::SmallVector<llvm::StringRef, 2> output_names;
llvm::SmallVector<llvm::StringRef, 2> unique_output_names;
auto dict_attr =
function->getAttrOfType<mlir::DictionaryAttr>(kEntryFuncAttr);
if (dict_attr) {
TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("inputs")))
<< "inputs missing in entry function attribute";
TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("outputs")))
<< "outputs missing in entry function attribute";
mlir::cast<mlir::StringAttr>(dict_attr.get("inputs"))
.getValue()
.split(input_names, ',', -1, false);
mlir::cast<mlir::StringAttr>(dict_attr.get("outputs"))
.getValue()
.split(output_names, ',', -1, false);
}
auto graph = std::make_unique<Graph>(OpRegistry::Global());
VersionDef versions;
auto module = function->getParentOfType<mlir::ModuleOp>();
if (mlir::succeeded(ExtractTfVersions(module, &versions))) {
graph->set_versions(versions);
}
Exporter exporter(&configs, graph.get(), tf_dialect, &symbol_table);
auto graph_op = llvm::cast<mlir::tf_executor::GraphOp>(block.front());
if (!output_names.empty()) {
const int num_data_results = graph_op.getNumResults();
const int64_t output_names_size = output_names.size();
TF_RET_CHECK(output_names_size == num_data_results)
<< "output names (" << output_names.size()
<< ") != terminator operands (" << num_data_results << ")";
llvm::DenseMap<Operation*, llvm::StringRef> output_op_to_name;
llvm::StringMap<Operation*> name_to_op;
for (const auto& it : llvm::enumerate(graph_op.GetFetch().getOperands())) {
const int64_t index = it.index();
if (index >= num_data_results) break;
std::string name(output_names[index]);
auto tensor_id = ParseTensorName(name);
std::string tensor_id_node(tensor_id.node());
assert(!tensor_id_node.empty() && "expected non-empty name");
mlir::LegalizeNodeName(tensor_id_node);
unique_output_names.push_back(
exporter.op_to_name_.GetUniqueName(tensor_id_node));
}
}
if (!input_names.empty()) {
TF_RET_CHECK(input_names.size() == block.getNumArguments());
for (const auto& it : llvm::enumerate(function.getArguments())) {
std::string name(input_names[it.index()]);
assert(!name.empty() && "expected non-empty name");
mlir::LegalizeNodeName(name);
auto tensor_id = ParseTensorName(name);
TF_RET_CHECK(tensor_id.index() == 0)
<< "input port designation not supported";
(void)exporter.op_to_name_.GetUniqueName(name);
}
}
for (auto it : llvm::enumerate(block.getArguments())) {
int index = it.index();
auto arg = it.value();
mlir::Type type = arg.getType();
if (!mlir::isa<mlir::TensorType>(type)) {
return errors::InvalidArgument(
"FuncOps arguments must have tensor types. Found ",
mlir::debugString(type), " in function ", function.getName().str());
}
TF_RETURN_IF_ERROR(exporter.AddArgumentNode(
arg, index, !input_names.empty() ? input_names[index] : ""));
}
auto convert_called_function = [&](llvm::StringRef name) {
auto func = symbol_table.lookup<FuncOp>(name);
if (func != nullptr) {
TF_RETURN_IF_ERROR(ConvertLibFunction(configs, tf_dialect, symbol_table,
func, flib_def, visited_functions));
TF_RETURN_IF_ERROR(graph->mutable_flib_def()->AddLibrary(*flib_def));
}
return absl::OkStatus();
};
for (Operation& inst : graph_op.GetBody()) {
for (auto type : inst.getResultTypes())
if (!mlir::isa<mlir::TensorType, mlir::tf_executor::ControlType,
mlir::tf_executor::TokenType>(type))
return errors::InvalidArgument(
"Values must be of tensor type, TensorFlow control type, or "
"TensorFlow token type. Found ",
mlir::debugString(type));
if (llvm::isa<mlir::tf_executor::NextIterationSourceOp>(inst)) {
continue;
} else if (auto fetch = llvm::dyn_cast<mlir::tf_executor::FetchOp>(inst)) {
TF_RETURN_IF_ERROR(
exporter.AddFetchNode(function, fetch, unique_output_names));
} else if (auto island =
llvm::dyn_cast<mlir::tf_executor::IslandOp>(inst)) {
Operation& inner_op = island.GetBody().front();
auto op_name = GetTensorFlowOpName(inner_op.getName().getStringRef());
if (llvm::isa<FuncOp>(inner_op) && op_name.ok()) {
TF_RETURN_IF_ERROR(convert_called_function(op_name.value().str()));
}
if (IsLegacyCallInstruction(&inner_op)) {
TF_RETURN_IF_ERROR(convert_called_function(
inner_op.getAttrOfType<mlir::SymbolRefAttr>("f")
.getLeafReference()
.getValue()));
}
TF_RETURN_IF_ERROR(exporter.AddInstructionNode(&inner_op));
} else {
TF_RETURN_IF_ERROR(exporter.AddInstructionNode(&inst));
}
}
for (Operation& inst : graph_op.GetBody()) {
TF_RETURN_IF_ERROR(exporter.AddEdge(&inst));
}
FixupSourceAndSinkEdges(graph.get());
FixupInputNamesFromEdges(graph.get());
TF_RETURN_IF_ERROR(
exporter.GetControlRetNodes(graph_op.GetFetch(), control_ret_nodes));
return graph;
}
Status Exporter::ConvertLibFunction(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions) {
bool is_new_function = visited_functions.insert(function).second;
if (!is_new_function) return absl::OkStatus();
auto function_name = FindFunctionName(configs, function);
absl::flat_hash_set<Node*> control_ret_nodes;
TF_ASSIGN_OR_RETURN(
auto sub_graph,
Exporter::Convert(configs, tf_dialect, symbol_table, function, flib_def,
visited_functions, &control_ret_nodes));
const auto control_ret = [&](const Node* n) -> std::optional<string> {
return control_ret_nodes.contains(n) ? std::make_optional<string>(n->name())
: std::nullopt;
};
FunctionDef func_def;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*sub_graph, function_name, control_ret, &func_def));
auto grad_string = mlir::TF::TensorFlowDialect::GetGradientAttrName();
if (auto attr =
function->getAttrOfType<mlir::FlatSymbolRefAttr>(grad_string)) {
auto grad_func = symbol_table.lookup<FuncOp>(attr.getValue());
TF_RETURN_IF_ERROR(ConvertLibFunction(configs, tf_dialect, symbol_table,
grad_func, flib_def,
visited_functions));
GradientDef grad;
grad.set_function_name(function_name);
grad.set_gradient_func(grad_func.getName().str());
TF_RETURN_IF_ERROR(flib_def->AddGradientDef(grad));
}
auto stateful_string = mlir::TF::TensorFlowDialect::GetStatefulAttrName();
if (auto attr = function->getAttrOfType<mlir::UnitAttr>(stateful_string)) {
func_def.mutable_signature()->set_is_stateful(true);
}
absl::flat_hash_set<absl::string_view> attrs_to_ignore = {
grad_string.data(), stateful_string.data(), kEntryFuncAttr};
llvm::SmallVector<mlir::NamedAttribute, 8> funcAttrs(
function->getDialectAttrs());
TF_RETURN_IF_ERROR(ConvertAttributes(funcAttrs, attrs_to_ignore,
false,
func_def.mutable_attr()));
for (int i = 0, e = function.getNumArguments(); i < e; ++i) {
if (auto resource_arg_unique_id_attr =
function.getArgAttrOfType<mlir::IntegerAttr>(
i, kResourceArgUniqueIdAttr)) {
(*func_def.mutable_resource_arg_unique_id())[i] =
resource_arg_unique_id_attr.getInt();
}
}
return flib_def->AddFunctionDef(std::move(func_def));
}
Status Exporter::Convert(mlir::ModuleOp module,
const GraphExportConfig& configs,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
absl::flat_hash_set<Node*>* control_ret_nodes) {
mlir::StringAttr entry_func_id =
mlir::StringAttr::get(module.getContext(), "main");
std::optional<FuncOp> entry_func;
FunctionLibraryDefinition temp_flib_def(OpRegistry::Global(),
FunctionDefLibrary());
llvm::SmallDenseSet<FuncOp> visited_functions;
auto tf_dialect = module.getContext()->getLoadedDialect("tf");
SymbolTable symbol_table(module);
for (auto function : module.getOps<FuncOp>()) {
if (function.isExternal())
return errors::FailedPrecondition("External functions not supported");
if (function.getName() == entry_func_id &&
!configs.export_entry_func_to_flib) {
entry_func.emplace(function);
} else {
TF_RETURN_IF_ERROR(ConvertLibFunction(configs, tf_dialect, symbol_table,
function, &temp_flib_def,
visited_functions));
}
}
if (flib_def != nullptr) {
TF_RETURN_IF_ERROR(flib_def->AddLibrary(temp_flib_def));
}
if (!configs.export_entry_func_to_flib) {
if (!entry_func.has_value())
return errors::FailedPrecondition(
"entry function `main` must be present");
TF_ASSIGN_OR_RETURN(
*graph, Exporter::Convert(configs, tf_dialect, symbol_table,
entry_func.value(), &temp_flib_def,
visited_functions, control_ret_nodes));
TF_RETURN_IF_ERROR(
graph->get()->mutable_flib_def()->AddLibrary(temp_flib_def));
} else if (graph != nullptr) {
TF_RETURN_IF_ERROR(
graph->get()->mutable_flib_def()->AddLibrary(std::move(*flib_def)));
}
return absl::OkStatus();
}
}
Status ConvertTfExecutorToGraph(mlir::ModuleOp module,
const GraphExportConfig& configs,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
absl::flat_hash_set<Node*>* control_ret_nodes) {
mlir::StatusScopedDiagnosticHandler sh(module.getContext());
if (failed(VerifyExportSuitable(module))) return sh.ConsumeStatus();
return sh.Combine(
Exporter::Convert(module, configs, graph, flib_def, control_ret_nodes));
}
absl::Status ConvertMlirFunctionToFunctionLibraryDef(
FuncOp func, const GraphExportConfig& configs, FunctionDef* function_def) {
Dialect* tf_dialect = func.getContext()->getLoadedDialect("tf");
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
llvm::SmallDenseSet<FuncOp> visited_functions;
SymbolTable symbol_table(func->getParentOfType<mlir::ModuleOp>());
TF_RETURN_IF_ERROR(Exporter::ConvertLibFunction(
configs, tf_dialect, symbol_table, func, &flib_def, visited_functions));
auto name = FindFunctionName(configs, func);
const FunctionDef* func_def = flib_def.Find(name);
if (func_def != nullptr) {
*function_def = *func_def;
return absl::OkStatus();
}
return absl::InvalidArgumentError(
absl::StrCat("Function '", name,
"' couldn't be found in the FunctionDefLibrary after "
"converting from MLIR"));
}
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include <stdlib.h>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/api/v2/testdata/");
}
class TfExecutorToGraphTest : public ::testing::Test {
public:
TfExecutorToGraphTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::StatusOr<OwningOpRef<mlir::ModuleOp>> CreateMlirModule(
std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
return mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
}
GraphDef CreateGraphDef(std::string graphdef_filename) {
std::string file_path = TestDataPath() + graphdef_filename;
std::string contents;
GraphDef graph_def;
auto status = riegeli::ReadAll(riegeli::FdReader(file_path), contents);
if (!status.ok()) {
return graph_def;
}
tsl::protobuf::TextFormat::ParseFromString(contents, &graph_def);
return graph_def;
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TfExecutorToGraphTest, ConvertMlirToGraphSucceeds) {
auto valid_executor_module = CreateMlirModule("valid_executor.mlir");
GraphExportConfig confs;
absl::flat_hash_set<Node*> control_ret_nodes;
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
auto result_graph = std::make_unique<Graph>(flib_def);
TF_ASSERT_OK(ConvertTfExecutorToGraph(valid_executor_module.value().get(),
confs, &result_graph, &flib_def,
&control_ret_nodes));
GraphDef result_graphdef;
result_graph->ToGraphDef(&result_graphdef);
GraphDef expected_graphdef = CreateGraphDef("valid_graph.txt");
EXPECT_EQ(result_graphdef.DebugString(), expected_graphdef.DebugString());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f4b95792-084d-430f-879c-146a3b0e9a8b | cpp | tensorflow/tensorflow | convert_tf_quant_types | tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc | tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types_test.cc | #include <memory>
#include <string>
#include <utility>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Value.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/lib/monitoring/counter.h"
namespace mlir::quant::stablehlo {
namespace {
using quant::tensorflow::GetDenseAttrFromTensorProtoAttr;
using quant::tensorflow::GetIntTypeFromTFQint;
using quant::tensorflow::IsTFQintType;
using quant::tensorflow::IsTFUniformQuantizedOp;
#define GEN_PASS_DEF_CONVERTTFQUANTTYPES
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h.inc"
auto *mlir_tf_quant_op_count = ::tensorflow::monitoring::Counter<1>::New(
"/tensorflow/core/tf2xla/tf_quant_op_count" ,
"Counts the number of ops that has qint types" ,
"op_name" );
bool IsIllegalType(Type type) {
return IsTFQintType(getElementTypeOrSelf(type));
}
Type ToLegalType(Type type) {
if (IsTFQintType(type)) return GetIntTypeFromTFQint(type);
if (auto shaped = mlir::dyn_cast<ShapedType>(type)) {
Type elem = shaped.getElementType();
if (IsTFQintType(elem)) return shaped.clone(ToLegalType(elem));
}
return type;
}
bool IsQintToIntCast(Operation *op) {
auto cast_op = llvm::dyn_cast<TF::CastOp>(op);
return cast_op && IsIllegalType(cast_op.getX().getType()) &&
ToLegalType(cast_op.getX().getType()) == cast_op.getY().getType();
}
bool IsIntToQintCast(Operation *op) {
auto cast_op = llvm::dyn_cast<TF::CastOp>(op);
return cast_op && IsIllegalType(cast_op.getY().getType()) &&
ToLegalType(cast_op.getY().getType()) == cast_op.getX().getType();
}
bool IsQintValueQintToIntCast(Value v) {
if (!IsIllegalType(v.getType())) {
return true;
}
if (v.getUsers().empty()) {
return false;
}
return llvm::all_of(v.getUsers(), [&](OpOperand operand) {
return IsQintToIntCast(operand.getOwner());
});
}
bool IsQintValueDefinedByIntToQintCast(Value v) {
if (!IsIllegalType(v.getType())) {
return true;
}
if (!v.getDefiningOp() || !llvm::isa<TF::CastOp>(v.getDefiningOp())) {
return false;
}
return IsIntToQintCast(v.getDefiningOp());
}
bool IsTFUniformQuantizedOpLegal(Operation *op) {
return op && llvm::all_of(op->getResults(), IsQintValueQintToIntCast) &&
llvm::all_of(op->getOperands(), IsQintValueDefinedByIntToQintCast);
}
bool IsCastOpLegal(TF::CastOp cast_op) {
if (IsIllegalType(cast_op.getSrcT()) && IsIllegalType(cast_op.getDstT())) {
return false;
}
if (IsIllegalType(cast_op.getSrcT()) &&
!(cast_op.getX().getDefiningOp() &&
IsTFUniformQuantizedOp(cast_op.getX().getDefiningOp()))) {
return false;
}
if (IsIllegalType(cast_op.getDstT()) &&
!IsTFUniformQuantizedOp(*cast_op.getY().getUsers().begin())) {
return false;
}
return true;
}
class TFQuantTypeConverter : public TypeConverter {
public:
TFQuantTypeConverter() {
addConversion([](Type type) -> Type {
return IsIllegalType(type) ? ToLegalType(type) : type;
});
}
};
class TFQuantTypeConversionTarget : public ConversionTarget {
public:
explicit TFQuantTypeConversionTarget(MLIRContext &ctx,
TFQuantTypeConverter &converter)
: ConversionTarget(ctx), converter_(converter) {
markUnknownOpDynamicallyLegal([this](Operation *op) {
if (IsTFUniformQuantizedOp(op)) {
return IsTFUniformQuantizedOpLegal(op);
} else if (auto cast_op = llvm::dyn_cast<TF::CastOp>(op)) {
return IsCastOpLegal(cast_op);
} else if (auto const_op = llvm::dyn_cast<TF::ConstOp>(op)) {
return !IsIllegalType(const_op.getOutput().getType());
}
if (auto func = dyn_cast<func::FuncOp>(op)) {
if (!converter_.isSignatureLegal(func.getFunctionType())) return false;
}
return converter_.isLegal(op);
});
}
private:
TFQuantTypeConverter &converter_;
};
class TFQuantTypePattern : public ConversionPattern {
public:
TFQuantTypePattern(MLIRContext *ctx, TypeConverter &converter)
: ConversionPattern(converter, MatchAnyOpTypeTag(), 1, ctx) {}
LogicalResult matchAndRewrite(
Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
if (IsTFUniformQuantizedOp(op) || llvm::isa<TF::ConstOp>(op)) {
return failure();
}
llvm::SmallVector<Type, 4> new_results;
if (failed(getTypeConverter()->convertTypes(op->getResultTypes(),
new_results)))
return failure();
OperationState state(op->getLoc(), op->getName().getStringRef(), operands,
new_results, op->getAttrs(), op->getSuccessors());
for (Region ®ion : op->getRegions()) {
auto new_region = std::make_unique<Region>(op);
rewriter.inlineRegionBefore(region, *new_region, new_region->begin());
if (failed(rewriter.convertRegionTypes(new_region.get(),
*getTypeConverter()))) {
return failure();
}
state.addRegion(std::move(new_region));
}
rewriter.replaceOp(op, rewriter.create(state)->getResults());
mlir_tf_quant_op_count->GetCell(std::string(op->getName().getStringRef()))
->IncrementBy(1);
return success();
}
};
class TFUniformQuantizedOpsPattern : public ConversionPattern {
public:
explicit TFUniformQuantizedOpsPattern(MLIRContext *ctx)
: ConversionPattern(MatchAnyOpTypeTag(), 1, ctx) {}
LogicalResult matchAndRewrite(
Operation *op, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
if (!IsTFUniformQuantizedOp(op)) {
return failure();
}
llvm::SmallVector<Value, 4> new_operands;
for (int i = 0; i < operands.size(); ++i) {
Type orig_op_type = op->getOperandTypes()[i];
if (IsIllegalType(orig_op_type) &&
!IsQintValueDefinedByIntToQintCast(op->getOperand(i))) {
new_operands.push_back(rewriter.create<TF::CastOp>(
op->getLoc(), orig_op_type, operands[i]));
} else {
new_operands.push_back(operands[i]);
}
}
OperationState state(op->getLoc(), op->getName().getStringRef(),
new_operands, op->getResultTypes(), op->getAttrs(),
op->getSuccessors());
Operation *new_op = rewriter.create(state);
llvm::SmallVector<Value, 4> new_results = new_op->getResults();
for (int i = 0; i < new_results.size(); ++i) {
Value &result = new_results[i];
if (IsIllegalType(result.getType()) &&
!IsQintValueQintToIntCast(op->getResult(i))) {
result = rewriter.create<TF::CastOp>(
op->getLoc(), ToLegalType(result.getType()), result);
}
op->getResult(i).replaceUsesWithIf(
new_op->getResult(i), [](OpOperand &operand) {
return IsQintToIntCast(operand.getOwner());
});
}
rewriter.replaceOp(op, new_results);
return success();
}
};
class TFConstOpQuantToIntPattern : public OpConversionPattern<TF::ConstOp> {
public:
using OpConversionPattern::OpConversionPattern;
LogicalResult matchAndRewrite(
TF::ConstOp op, TF::ConstOpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
if (!IsIllegalType(op.getOutput().getType())) return failure();
TF::TensorProtoAttr tensor_proto_attr;
if (!matchPattern(op.getOperation(), m_Constant(&tensor_proto_attr))) {
return rewriter.notifyMatchFailure(op, "operand must be constant.");
}
auto dense_attr_or = GetDenseAttrFromTensorProtoAttr(
tensor_proto_attr.getValue(),
mlir::dyn_cast<TensorType>(ToLegalType(op.getOutput().getType())));
if (failed(dense_attr_or)) {
op->emitError("failed to get DenseElementAttr.");
return failure();
}
rewriter.replaceOpWithNewOp<TF::ConstOp>(
op, ToLegalType(op.getOutput().getType()), *dense_attr_or);
return success();
}
};
struct ConvertTFQuantTypes
: public impl::ConvertTFQuantTypesBase<ConvertTFQuantTypes> {
void runOnOperation() override;
};
void ConvertTFQuantTypes::runOnOperation() {
TFQuantTypeConverter converter;
RewritePatternSet patterns(&getContext());
patterns.add<TFQuantTypePattern>(&getContext(), converter);
patterns.add<TFConstOpQuantToIntPattern, TFUniformQuantizedOpsPattern>(
&getContext());
populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
converter);
TFQuantTypeConversionTarget target(getContext(), converter);
if (failed(applyFullConversion(getOperation(), target, std::move(patterns))))
return signalPassFailure();
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertTFQuantTypesPass() {
return std::make_unique<ConvertTFQuantTypes>();
}
} | #include <cstdint>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/passes.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::DialectRegistry;
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::tensorflow::monitoring::testing::CellReader;
using ::testing::Test;
static constexpr char kMetricsName[] =
"/tensorflow/core/tf2xla/tf_quant_op_count";
class LegalizeTfTypesTest : public Test {
protected:
void CreateModule(const char* module_string) {
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context_.appendDialectRegistry(mlir_registry);
TF_ASSERT_OK(
tensorflow::DeserializeMlirModule(module_string, &context_, &module_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<mlir::func::FuncOp>(
quant::stablehlo::CreateConvertTFQuantTypesPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(LegalizeTfTypesTest, RecordsStreamzQuantOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<3x3x!tf_type.qint8>, %arg1: tensor<3x3x!tf_type.qint8>) -> tensor<6x3x!tf_type.qint8> {
%axis = "tf.Const"() { value = dense<0> : tensor<i64> } : () -> tensor<i64>
%1 = "tf.ConcatV2"(%arg0, %arg1, %axis) : (tensor<3x3x!tf_type.qint8>, tensor<3x3x!tf_type.qint8>, tensor<i64>) -> tensor<6x3x!tf_type.qint8>
func.return %1 : tensor<6x3x!tf_type.qint8>
}
})";
CreateModule(kMlirModuleStr);
CellReader<int64_t> reader(kMetricsName);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(reader.Delta("tf.ConcatV2"), 1);
EXPECT_EQ(reader.Delta("func.return"), 1);
EXPECT_EQ(reader.Delta("func.func"), 0);
}
TEST_F(LegalizeTfTypesTest, RecordsStreamzNoQuantOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main(%arg0: tensor<3x3xf32>, %arg1: tensor<3x3xf32>) -> tensor<6x3xf32> {
%axis = "tf.Const"() { value = dense<0> : tensor<i64> } : () -> tensor<i64>
%1 = "tf.ConcatV2"(%arg0, %arg1, %axis) : (tensor<3x3xf32>, tensor<3x3xf32>, tensor<i64>) -> tensor<6x3xf32>
func.return %1 : tensor<6x3xf32>
}
})";
CreateModule(kMlirModuleStr);
CellReader<int64_t> reader(kMetricsName);
auto result = Run();
EXPECT_TRUE(result.succeeded());
EXPECT_EQ(reader.Delta("tf.ConcatV2"), 0);
EXPECT_EQ(reader.Delta("func.return"), 0);
EXPECT_EQ(reader.Delta("func.func"), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e49465f7-3466-44da-bff8-071558a62945 | cpp | tensorflow/tensorflow | tf_type_utils | tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.cc | tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include "absl/status/status.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
namespace mlir::quant::tensorflow {
bool IsTFQintType(const Type type) {
return mlir::isa<TF::Qint8Type, TF::Qint16Type, TF::Qint32Type,
TF::Quint8Type, TF::Quint16Type>(type);
}
Type GetIntTypeFromTFQint(const Type type) {
return TypeSwitch<Type, Type>(type)
.Case<TF::Qint8Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 8); })
.Case<TF::Qint16Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 16); })
.Case<TF::Qint32Type>(
[&type](Type) { return IntegerType::get(type.getContext(), 32); })
.Case<TF::Quint8Type>([&type](Type) {
return IntegerType::get(type.getContext(), 8,
IntegerType::SignednessSemantics::Unsigned);
})
.Case<TF::Quint16Type>([&type](Type) {
return IntegerType::get(type.getContext(), 16,
IntegerType::SignednessSemantics::Unsigned);
})
.Default([&type](Type) { return type; });
}
FailureOr<mlir::DenseElementsAttr> GetDenseAttrFromTensorProtoAttr(
const llvm::StringRef mangled_tensor_proto, TensorType tensor_type) {
::tensorflow::TensorProto tensor_proto;
absl::Status status = ::tensorflow::mangling_util::DemangleTensor(
mangled_tensor_proto, &tensor_proto);
if (!status.ok()) {
return failure();
}
::tensorflow::Tensor t;
if (!t.FromProto(tensor_proto)) {
return failure();
}
if (t.dtype() == ::tensorflow::DT_QINT8) {
const auto arr = t.flat<::tensorflow::qint8>();
return mlir::DenseElementsAttr::get(
tensor_type.clone(IntegerType::get(tensor_type.getContext(), 8)),
llvm::ArrayRef(arr.data(), arr.size()));
} else if (t.dtype() == ::tensorflow::DT_QINT32) {
const auto arr = t.flat<::tensorflow::qint32>();
return mlir::DenseElementsAttr::get(
tensor_type.clone(IntegerType::get(tensor_type.getContext(), 32)),
llvm::ArrayRef(arr.data(), arr.size()));
} else {
return failure();
}
}
bool IsTFUniformQuantizedOp(Operation *op) {
return llvm::isa<
TF::UniformDequantizeOp,
TF::UniformQuantizeOp,
TF::UniformQuantizedAddOp,
TF::UniformQuantizedClipByValueOp,
TF::UniformQuantizedConvolutionHybridOp,
TF::UniformQuantizedConvolutionOp,
TF::UniformQuantizedDotHybridOp,
TF::UniformQuantizedDotOp,
TF::UniformRequantizeOp
>(op);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.h"
#include <cstdint>
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/tsl/framework/numeric_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir::quant::tensorflow {
namespace {
std::string GetQint8Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT8, {2, 2});
tensor.matrix<tsl::qint8>()(0, 0) = tsl::qint8(1);
tensor.matrix<tsl::qint8>()(0, 1) = tsl::qint8(2);
tensor.matrix<tsl::qint8>()(1, 0) = tsl::qint8(3);
tensor.matrix<tsl::qint8>()(1, 1) = tsl::qint8(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::string GetQint16Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT16, {2, 2});
tensor.matrix<tsl::qint16>()(0, 0) = tsl::qint16(1);
tensor.matrix<tsl::qint16>()(0, 1) = tsl::qint16(2);
tensor.matrix<tsl::qint16>()(1, 0) = tsl::qint16(3);
tensor.matrix<tsl::qint16>()(1, 1) = tsl::qint16(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::string GetQint32Tensor() {
::tensorflow::Tensor tensor(::tensorflow::DT_QINT32, {2, 2});
tensor.matrix<tsl::qint32>()(0, 0) = tsl::qint32(1);
tensor.matrix<tsl::qint32>()(0, 1) = tsl::qint32(2);
tensor.matrix<tsl::qint32>()(1, 0) = tsl::qint32(3);
tensor.matrix<tsl::qint32>()(1, 1) = tsl::qint32(4);
::tensorflow::TensorProto tensor_proto;
tensor.AsProtoTensorContent(&tensor_proto);
return ::tensorflow::mangling_util::MangleTensor(tensor_proto);
}
std::unique_ptr<MLIRContext> CreateContext() {
auto context = std::make_unique<MLIRContext>();
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
context->getOrLoadDialect<tf_type::TFTypeDialect>();
context->getOrLoadDialect<quant::QuantDialect>();
context->getOrLoadDialect<mlir::mhlo::MhloDialect>();
context->getOrLoadDialect<sparse_tensor::SparseTensorDialect>();
return context;
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint8ToUQ8Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type = RankedTensorType::get(
{2, 2}, quant::UniformQuantizedType::get(
quant::QuantizationFlags::FlagValue::Signed,
IntegerType::get(context.get(), 8),
FloatType::getF32(context.get()), 3.0, 2, -128, 127));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int8_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int8_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int8_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int8_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int8_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint8ToInt8Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint8Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int8_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int8_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int8_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int8_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int8_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint32ToUQ32Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type = RankedTensorType::get(
{2, 2},
quant::UniformQuantizedType::get(
quant::QuantizationFlags::FlagValue::Signed,
IntegerType::get(context.get(), 32), FloatType::getF32(context.get()),
3.0, 2, -2147483648, 2147483647));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint32Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int32_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int32_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int32_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int32_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int32_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, Qint32ToInt32Succeeds) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32));
auto dense_attr =
GetDenseAttrFromTensorProtoAttr(GetQint32Tensor(), result_tensor_type);
ASSERT_TRUE(succeeded(dense_attr));
EXPECT_THAT(dense_attr->getValues<int32_t>(), testing::SizeIs(4));
EXPECT_EQ(dense_attr->getValues<int32_t>()[0], 1);
EXPECT_EQ(dense_attr->getValues<int32_t>()[1], 2);
EXPECT_EQ(dense_attr->getValues<int32_t>()[2], 3);
EXPECT_EQ(dense_attr->getValues<int32_t>()[3], 4);
}
TEST(GetDenseAttrFromTensorProtoAttrTest, UnsupportedQint16Fails) {
auto context = CreateContext();
TensorType result_tensor_type =
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16));
EXPECT_TRUE(failed(
GetDenseAttrFromTensorProtoAttr(GetQint16Tensor(), result_tensor_type)));
}
TEST(IsTFQintTypeTest, ValidTFQintTypeSucceeds) {
auto context = CreateContext();
EXPECT_TRUE(IsTFQintType(TF::Qint8Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Qint16Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Qint32Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Quint8Type::get(context.get())));
EXPECT_TRUE(IsTFQintType(TF::Quint16Type::get(context.get())));
EXPECT_FALSE(IsTFQintType(TF::Int8RefType::get(context.get())));
EXPECT_FALSE(IsTFQintType(TF::Float8E5M2RefType::get(context.get())));
}
TEST(GetIntTypeFromTFQintTest, ChecksIntTypesFromTFQint) {
auto context = CreateContext();
auto type = GetIntTypeFromTFQint(TF::Qint8Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 8);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Qint16Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 16);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Qint32Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 32);
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isSigned());
EXPECT_FALSE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Quint8Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 8);
EXPECT_TRUE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
type = GetIntTypeFromTFQint(TF::Quint16Type::get(context.get()));
EXPECT_TRUE(llvm::isa<IntegerType>(type));
EXPECT_EQ(mlir::dyn_cast<IntegerType>(type).getWidth(), 16);
EXPECT_TRUE(mlir::dyn_cast<IntegerType>(type).isUnsigned());
EXPECT_EQ(GetIntTypeFromTFQint(IntegerType::get(type.getContext(), 32)),
IntegerType::get(type.getContext(), 32));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/tf_type_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
541c6011-78dc-41c7-a579-b06864d5f305 | cpp | tensorflow/tensorflow | bfloat16_type | tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.cc | tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
namespace mlir::quant::stablehlo {
bool IsLargeFloatType(Type type) {
type = getElementTypeOrSelf(type);
return isa<FloatType>(type) && type.getIntOrFloatBitWidth() > 16;
}
Type ToBfloat16Type(Type type) {
if (auto shaped = mlir::dyn_cast<ShapedType>(type)) {
const Type elem = shaped.getElementType();
if (IsLargeFloatType(elem)) {
return shaped.clone(BFloat16Type::get(type.getContext()));
}
} else if (IsLargeFloatType(type)) {
return BFloat16Type::get(type.getContext());
}
return type;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.h"
#include <memory>
#include <gtest/gtest.h>
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
namespace mlir::quant::stablehlo {
namespace {
std::unique_ptr<MLIRContext> CreateContext() {
auto context = std::make_unique<MLIRContext>();
DialectRegistry mlir_registry;
RegisterCommonToolingDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
return context;
}
TEST(IsLargeFloatTypeTest, scalars) {
auto context = CreateContext();
EXPECT_FALSE(IsLargeFloatType(Float8E4M3FNType::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(Float16Type::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(BFloat16Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float32Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float64Type::get(context.get())));
EXPECT_TRUE(IsLargeFloatType(Float80Type::get(context.get())));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 8)));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 16)));
EXPECT_FALSE(IsLargeFloatType(IntegerType::get(context.get(), 32)));
}
TEST(IsLargeFloatTypeTest, tensors) {
auto context = CreateContext();
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float16Type::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float32Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float64Type::get(context.get()))));
EXPECT_TRUE(IsLargeFloatType(
RankedTensorType::get({2, 2}, Float80Type::get(context.get()))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16))));
EXPECT_FALSE(IsLargeFloatType(
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32))));
}
TEST(ToBfloat16TypeTest, scalars) {
auto context = CreateContext();
EXPECT_EQ(ToBfloat16Type(Float8E4M3FNType::get(context.get())),
Float8E4M3FNType::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float16Type::get(context.get())),
Float16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(BFloat16Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float32Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float64Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(Float80Type::get(context.get())),
BFloat16Type::get(context.get()));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 8)),
IntegerType::get(context.get(), 8));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 16)),
IntegerType::get(context.get(), 16));
EXPECT_EQ(ToBfloat16Type(IntegerType::get(context.get(), 32)),
IntegerType::get(context.get(), 32));
}
TEST(ToBfloat16TypeTest, tensors) {
auto context = CreateContext();
EXPECT_EQ(
ToBfloat16Type(
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get()))),
RankedTensorType::get({2, 2}, Float8E4M3FNType::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float16Type::get(context.get()))),
RankedTensorType::get({2, 2}, Float16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, BFloat16Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float32Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float64Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(
RankedTensorType::get({2, 2}, Float80Type::get(context.get()))),
RankedTensorType::get({2, 2}, BFloat16Type::get(context.get())));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 8))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 8)));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 16))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 16)));
EXPECT_EQ(ToBfloat16Type(RankedTensorType::get(
{2, 2}, IntegerType::get(context.get(), 32))),
RankedTensorType::get({2, 2}, IntegerType::get(context.get(), 32)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/bfloat16_type_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8a22cdd-a9f2-4a8b-9332-c4770305a759 | cpp | tensorflow/tensorflow | fill_quantization_options | tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.cc | tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc | #include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::CustomQuantizationMethod;
using ::stablehlo::quantization::PresetQuantizationMethod;
using ::stablehlo::quantization::QuantizationComponentSpec;
using ::stablehlo::quantization::QuantizationOptions;
using QuantizationComponent =
::stablehlo::quantization::QuantizationComponentSpec_QuantizationComponent;
using BitType = ::stablehlo::quantization::QuantizationComponentSpec_BitType;
using BitWidth = ::stablehlo::quantization::QuantizationComponentSpec_BitWidth;
void SetQuantizationComponentSpec(QuantizationComponentSpec* spec,
const QuantizationComponent& component,
const BitType bit_type,
const BitWidth bit_width) {
spec->set_quantization_component(component);
spec->set_bit_type(bit_type);
spec->set_bit_width(bit_width);
}
::stablehlo::quantization::QuantizationOptions FillPresetQuantizationOptions(
::stablehlo::quantization::QuantizationOptions quantization_options_) {
CustomQuantizationMethod custom_method =
quantization_options_.quantization_method().custom_quantization_method();
QuantizationComponentSpec *activation_component, *weight_component,
*bias_component;
const auto preset_method = quantization_options_.quantization_method()
.preset_quantization_method()
.preset_method();
if (!preset_method) return quantization_options_;
switch (preset_method) {
case PresetQuantizationMethod::FLOAT16:
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_FLOAT,
QuantizationComponentSpec::BIT_WIDTH_16);
bias_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(bias_component,
QuantizationComponentSpec::COMPONENT_BIAS,
QuantizationComponentSpec::BIT_TYPE_FLOAT,
QuantizationComponentSpec::BIT_WIDTH_16);
break;
case PresetQuantizationMethod::WEIGHT_ONLY:
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
break;
case PresetQuantizationMethod::POST_TRAINING_QUANTIZATION_STATIC_RANGE:
activation_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(
activation_component, QuantizationComponentSpec::COMPONENT_ACTIVATION,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
weight_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(weight_component,
QuantizationComponentSpec::COMPONENT_WEIGHT,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_8);
bias_component = custom_method.add_quantization_component_spec();
SetQuantizationComponentSpec(bias_component,
QuantizationComponentSpec::COMPONENT_BIAS,
QuantizationComponentSpec::BIT_TYPE_INT,
QuantizationComponentSpec::BIT_WIDTH_32);
break;
default:
break;
}
*quantization_options_.mutable_quantization_method()
->mutable_custom_quantization_method() = custom_method;
return quantization_options_;
}
LogicalResult GetActivationBitWidth(QuantizationOptions quantization_options,
int* bit_width) {
CustomQuantizationMethod custom_method =
quantization_options.quantization_method().custom_quantization_method();
for (const auto& component : custom_method.quantization_component_spec()) {
if (component.quantization_component() ==
QuantizationComponentSpec::COMPONENT_ACTIVATION) {
switch (component.bit_width()) {
case QuantizationComponentSpec::BIT_WIDTH_4:
*bit_width = 4;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_8:
*bit_width = 8;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_16:
*bit_width = 16;
return success();
break;
case QuantizationComponentSpec::BIT_WIDTH_32:
*bit_width = 32;
return success();
break;
default:
break;
}
}
}
return failure();
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.h"
#include <ostream>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_options.pb.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::PresetQuantizationMethod;
using ::stablehlo::quantization::QuantizationComponentSpec;
using ::stablehlo::quantization::QuantizationOptions;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
void FillPresetQuantizationOptionsTestHelper(
const PresetQuantizationMethod::PresetMethod preset_quantization_options,
const QuantizationComponentSpec expected_activation_component,
const QuantizationComponentSpec expected_weight_component,
const QuantizationComponentSpec expected_bias_component) {
QuantizationOptions quantization_options;
quantization_options.mutable_quantization_method()
->mutable_preset_quantization_method()
->set_preset_method(preset_quantization_options);
QuantizationOptions filled_quantization_options =
quant::stablehlo::FillPresetQuantizationOptions(quantization_options);
for (QuantizationComponentSpec component :
filled_quantization_options.quantization_method()
.custom_quantization_method()
.quantization_component_spec()) {
switch (component.quantization_component()) {
case (QuantizationComponentSpec::COMPONENT_ACTIVATION):
EXPECT_THAT(component, EqualsProto(expected_activation_component));
break;
case (QuantizationComponentSpec::COMPONENT_WEIGHT):
EXPECT_THAT(component, EqualsProto(expected_weight_component));
break;
case (QuantizationComponentSpec::COMPONENT_BIAS):
EXPECT_THAT(component, EqualsProto(expected_bias_component));
break;
default:
break;
}
}
}
TEST(FillQuantizationOptionsTest, PresetFloat16) {
QuantizationComponentSpec activation_component, weight_component,
bias_component;
weight_component.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
weight_component.set_bit_width(QuantizationComponentSpec::BIT_WIDTH_16);
weight_component.set_bit_type(QuantizationComponentSpec::BIT_TYPE_FLOAT);
bias_component.set_quantization_component(
QuantizationComponentSpec::COMPONENT_BIAS);
bias_component.set_bit_width(QuantizationComponentSpec::BIT_WIDTH_16);
bias_component.set_bit_type(QuantizationComponentSpec::BIT_TYPE_FLOAT);
FillPresetQuantizationOptionsTestHelper(
PresetQuantizationMethod::FLOAT16,
activation_component,
weight_component,
bias_component);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/utils/fill_quantization_options.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/tests/fill_quantization_options_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02979f14-d915-4bdb-9b1d-3ff75a0e9139 | cpp | tensorflow/tensorflow | stablehlo_op_quant_spec | tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc | tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h"
#include <memory>
#include "absl/status/statusor.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
#define DEBUG_TYPE "stablehlo_opt_quant_spec"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::stablehlo::DotGeneralOp;
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::StaticRangePtq;
bool IsDenylistedLiftedFunction(Operation* op) {
if (auto xla_call_module_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op);
xla_call_module_op != nullptr) {
absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op);
if (method.ok() && method->has_no_quantization()) {
return true;
}
}
return false;
}
void PopulateCoeffOpQuantDimIfPerChannelQuantized(
TF::XlaCallModuleOp xla_call_module_op, OpQuantSpec& spec) {
absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op);
if (method.ok() && method->has_static_range_ptq()) {
const StaticRangePtq& static_range_ptq_spec = method->static_range_ptq();
for (const auto& [operand_idx, quantized_type] :
static_range_ptq_spec.input_quantized_types()) {
if (quantized_type.has_dimension_specs()) {
spec.coeff_op_quant_dim[operand_idx] =
quantized_type.dimension_specs().dimension();
}
}
}
}
}
std::unique_ptr<OpQuantSpec> GetStableHloOpQuantSpec(Operation* op) {
auto spec = std::make_unique<OpQuantSpec>();
if (auto call_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op)) {
auto entry_function =
call_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
StringRef function_name = entry_function.getValue();
if (!function_name.starts_with("composite_")) {
return spec;
}
if (function_name.contains("conv")) {
PopulateCoeffOpQuantDimIfPerChannelQuantized(call_op, *spec);
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("dot_general")) {
const auto module_op = call_op->getParentOfType<ModuleOp>();
const SymbolTable symbol_table(module_op);
auto entry_func_op =
dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(function_name));
auto dot_general_op = *entry_func_op.getOps<DotGeneralOp>().begin();
if (auto optional_dim = GetDotGeneralQuantizationDim(dot_general_op);
optional_dim) {
spec->coeff_op_quant_dim[1] = optional_dim.value();
} else {
spec->coeff_op_quant_dim[1] = -1;
}
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
}
for (const auto [operand_idx, per_channel_dim] : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(operand_idx);
}
}
return spec;
}
std::unique_ptr<OpQuantScaleSpec> GetStableHloQuantConstraints(Operation* op) {
auto scale_spec = std::make_unique<OpQuantScaleSpec>();
if (llvm::isa<mlir::stablehlo::BroadcastInDimOp,
mlir::stablehlo::ConcatenateOp,
mlir::stablehlo::DynamicReshapeOp,
mlir::stablehlo::DynamicSliceOp, mlir::stablehlo::GatherOp,
mlir::stablehlo::PadOp, mlir::stablehlo::ReduceWindowOp,
mlir::stablehlo::ReshapeOp, mlir::stablehlo::SelectOp,
mlir::stablehlo::SliceOp, mlir::stablehlo::TransposeOp>(op)) {
scale_spec->has_same_scale_requirement = true;
}
if (llvm::isa<mlir::stablehlo::DynamicSliceOp, mlir::stablehlo::GatherOp,
mlir::stablehlo::PadOp, mlir::stablehlo::SliceOp>(op)) {
scale_spec->has_same_operand_and_result_type_requirement = true;
}
return scale_spec;
}
bool IsOpQuantizableStableHlo(Operation* op) {
if (isa<func::ConstantOp, mlir::stablehlo::ConstantOp>(op)) {
return true;
} else if (op->hasTrait<OpTrait::IsTerminator>() ||
isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(op)) {
return false;
}
if (IsDenylistedLiftedFunction(op)) {
LLVM_DEBUG(llvm::errs() << "Denylisted quantizable unit: \n" << op << "\n");
return false;
}
if (GetStableHloQuantConstraints(op)->has_same_scale_requirement) {
return true;
}
const bool attr_enforced_quantizable =
op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) &&
op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() ==
QuantTraitValues[QuantizationTrait::FullyQuantizable];
return attr_enforced_quantizable;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::stablehlo::GatherOp;
using ::testing::IsEmpty;
using ::testing::IsTrue;
using ::testing::NotNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using IsOpQuantizableStableHloTest = ::mlir::quant::QuantizationTestBase;
constexpr absl::string_view kModuleConstantAdd = R"mlir(
module {
func.func @constant_add() -> (tensor<3x2xf32>) {
%cst1 = stablehlo.constant dense<2.4> : tensor<3x2xf32>
%cst2 = stablehlo.constant dense<5.7> : tensor<3x2xf32>
%add = stablehlo.add %cst1, %cst2 : (tensor<3x2xf32>, tensor<3x2xf32>) -> tensor<3x2xf32>
func.return %add : tensor<3x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleCompositeSameScale = R"mlir(
module {
func.func @same_scale_after_composite() -> tensor<3x1xf32> {
%0 = "tf.XlaCallModule"() {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : () -> tensor<1x3xf32>
%1 = "quantfork.qcast"(%0) {volatile} : (tensor<1x3xf32>) -> tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
%2 = "quantfork.dcast"(%1) : (tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<1x3xf32>
%3 = stablehlo.reshape %2 : (tensor<1x3xf32>) -> tensor<3x1xf32>
%4 = "quantfork.qcast"(%3) {volatile} : (tensor<3x1xf32>) -> tensor<3x1x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
%5 = "quantfork.dcast"(%4) : (tensor<3x1x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<3x1xf32>
return %5 : tensor<3x1xf32>
}
}
)mlir";
constexpr absl::string_view kModuleCompositeNoAttr = R"mlir(
module {
func.func @composite_without_attr() -> tensor<1x3xf32> {
%0 = "tf.XlaCallModule"() {Sout = [#tf_type.shape<1x3>], _entry_function = @non_quantizable_composite, _original_entry_function = "non_quantizable_composite", _stablehlo_module_attrs = {}, device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : () -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(IsOpQuantizableStableHloTest, ConstantOpQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto constant_op =
FindOperationOfType<mlir::stablehlo::ConstantOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(constant_op));
}
TEST_F(IsOpQuantizableStableHloTest, TerminatorOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto return_op = FindOperationOfType<func::ReturnOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(return_op));
}
TEST_F(IsOpQuantizableStableHloTest, SameScaleOpQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto reshape_op = FindOperationOfType<mlir::stablehlo::ReshapeOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(reshape_op));
}
TEST_F(IsOpQuantizableStableHloTest, NonSameScaleOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto add_op = FindOperationOfType<mlir::stablehlo::AddOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(add_op));
}
TEST_F(IsOpQuantizableStableHloTest, ValidXlaCallModuleOpQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, InvalidXlaCallModuleOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleCompositeNoAttr);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("composite_without_attr");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, QuantizeDequantizeOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto quantize_op = FindOperationOfType<quantfork::QuantizeCastOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(quantize_op));
auto dequantize_op =
FindOperationOfType<quantfork::DequantizeCastOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(dequantize_op));
}
TEST_F(IsOpQuantizableStableHloTest,
XlaCallModuleOpQuantizableWhenNotDenylisted) {
constexpr absl::string_view
kModuleXlaCallModuleOpWithDefaultQuantizationMethod = R"mlir(
func.func @xla_call_module_default_quantization_method(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleOpWithDefaultQuantizationMethod);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>(
"xla_call_module_default_quantization_method");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, DenylistedXlaCallModuleOpNotQuantizable) {
constexpr absl::string_view kModuleDenylistedXlaCallModuleOp = R"mlir(
func.func @xla_call_module_denylisted(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "no_quantization {}", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDenylistedXlaCallModuleOp);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("xla_call_module_denylisted");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(xla_call_module_op));
}
using GetStableHloOpQuantSpecTest = ::mlir::quant::QuantizationTestBase;
TEST_F(GetStableHloOpQuantSpecTest,
EmptyCoeffOpQuantDimForPerTensorQuantizedConvolution) {
constexpr absl::string_view
kXlaCallModuleOpWithPerTensorQuantizedConvolution = R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_conv_fn_1,
_original_entry_function = "composite_conv_fn_1",
_quantization_method = "static_range_ptq {}",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true},
_tfl_quant_trait = "fully_quantizable"
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithPerTensorQuantizedConvolution);
ASSERT_TRUE(module_op);
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const std::unique_ptr<OpQuantSpec> op_quant_spec =
GetStableHloOpQuantSpec(*xla_call_module_op);
ASSERT_THAT(op_quant_spec, NotNull());
EXPECT_THAT(op_quant_spec->coeff_op_quant_dim, IsEmpty());
}
TEST_F(GetStableHloOpQuantSpecTest,
EmptyCoeffOpQuantDimForPerChannelQuantizedConvolution) {
constexpr absl::string_view
kXlaCallModuleOpWithPerChannelQuantizedConvolution = R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_conv_fn_1,
_original_entry_function = "composite_conv_fn_1",
_quantization_method = "static_range_ptq {input_quantized_types {key: 1, value {dimension_specs {dimension: 3}}}}",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true},
_tfl_quant_trait = "fully_quantizable"
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithPerChannelQuantizedConvolution);
ASSERT_TRUE(module_op);
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const std::unique_ptr<OpQuantSpec> op_quant_spec =
GetStableHloOpQuantSpec(*xla_call_module_op);
ASSERT_THAT(op_quant_spec, NotNull());
EXPECT_THAT(op_quant_spec->coeff_op_quant_dim,
UnorderedElementsAre(Pair(1, 3)));
}
using GetStableHloQuantConstraintsTest = ::mlir::quant::QuantizationTestBase;
TEST_F(GetStableHloQuantConstraintsTest,
HasSameOperandAndResultTypeRequirementSucceeds) {
constexpr absl::string_view kModuleGather = R"mlir(
module {
func.func @main() -> (tensor<2x3x2x2xf32>) {
%0 = stablehlo.constant dense<1.0> : tensor<3x4x2xf32>
%1 = stablehlo.constant dense<2> : tensor<2x3x2xi64>
%2 = "stablehlo.gather"(%0, %1) {
dimension_numbers = #stablehlo.gather<
offset_dims = [2, 3],
collapsed_slice_dims = [0],
start_index_map = [1, 0],
index_vector_dim = 2>,
slice_sizes = array<i64: 1, 2, 2>,
indices_are_sorted = false
} : (tensor<3x4x2xf32>, tensor<2x3x2xi64>) -> tensor<2x3x2x2xf32>
func.return %2 : tensor<2x3x2x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleGather);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* gather_op = FindOperationOfType<GatherOp>(main_fn);
const auto spec = GetStableHloQuantConstraints(gather_op);
EXPECT_THAT(spec, NotNull());
EXPECT_THAT(spec->has_same_operand_and_result_type_requirement, IsTrue());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
da2b505e-fc9f-4953-95f5-53652fefab01 | cpp | tensorflow/tensorflow | save_report | tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc | tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <optional>
#include <string>
#include "absl/base/nullability.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
namespace mlir::quant::stablehlo {
namespace {
std::optional<std::string> OptionalStringViewToOptionalString(
std::optional<absl::string_view> view) {
if (view == std::nullopt) return std::nullopt;
return std::make_optional<std::string>(*view);
}
bool IsQuantizeCompositeFunctionPass(absl::Nullable<Pass*> pass,
absl::Nullable<Operation*> op) {
return pass != nullptr &&
pass->getArgument() == "stablehlo-quantize-composite-functions" &&
isa_and_nonnull<ModuleOp>(op);
}
bool ShouldSaveReport(absl::Nullable<Pass*> pass, absl::Nullable<Operation*> op,
const std::optional<std::string>& file_path) {
return file_path != std::nullopt && IsQuantizeCompositeFunctionPass(pass, op);
}
void SaveReport(const QuantizationReport& report,
const absl::string_view file_path) {
if (const absl::Status save_status = report.Save(file_path);
save_status.ok()) {
LOG(INFO) << "Successfully saved quantization report to: " << file_path;
} else {
LOG(ERROR) << "Failed to save quantization report to: " << file_path
<< " with status: " << save_status;
}
}
}
SaveQuantizationReportInstrumentation::SaveQuantizationReportInstrumentation(
std::optional<absl::string_view> file_path)
: file_path_(OptionalStringViewToOptionalString(file_path)) {}
void SaveQuantizationReportInstrumentation::runAfterPass(Pass* pass,
Operation* op) {
if (!IsQuantizeCompositeFunctionPass(pass, op)) return;
auto module_op = cast<ModuleOp>(op);
const QuantizationReport report(module_op);
report.Print();
if (!ShouldSaveReport(pass, op, file_path_)) return;
SaveReport(report, *file_path_);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using SaveQuantizationReportInstrumentationTest = QuantizationTestBase;
TEST_F(SaveQuantizationReportInstrumentationTest, SaveReport) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "tf.Const"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
const std::string report_file_path =
absl::StrCat(testing::TempDir(), "/save_report.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
const absl::StatusOr<std::string> file_data =
ReadFileToString(report_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenNoQuantizeCompositeFunctionsPass) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
pm.addPass(createPrepareQuantizePass());
const std::string report_file_path = absl::StrCat(
testing::TempDir(),
"/report_not_saved_no_quantize_composite_functions_pass.txtpb");
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
report_file_path));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
EXPECT_THAT(ReadFileToString(report_file_path),
StatusIs(absl::StatusCode::kNotFound));
}
TEST_F(SaveQuantizationReportInstrumentationTest,
ReportNotSavedWhenReportFilePathIsNullopt) {
constexpr absl::string_view kModuleWithCompositeDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%cst = "stablehlo.constant"() {value = dense<3.00000000e-1> : tensor<2x3xf32>} : () -> tensor<2x3xf32>
%0 = "quantfork.stats"(%arg0) {layerStats = dense<[6.00000000e-6, 9.00000000e-1]> : tensor<2xf32>} : (tensor<1x2xf32>) -> tensor<1x2xf32>
%1 = "tf.XlaCallModule"(%0, %cst) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _quantization_method = "static_range_ptq { }", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = "quantfork.stats"(%1) {layerStats = dense<[5.00000000e-6, 7.00000000e-1]> : tensor<2xf32>} : (tensor<1x3xf32>) -> tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleWithCompositeDotGeneral);
ASSERT_TRUE(module_op);
PassManager pm(ctx_.get());
QuantizeCompositeFunctionsPassOptions options;
pm.addPass(createQuantizeCompositeFunctionsPass(options));
pm.addInstrumentation(std::make_unique<SaveQuantizationReportInstrumentation>(
std::nullopt));
const LogicalResult run_result = pm.run(*module_op);
ASSERT_TRUE(succeeded(run_result));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/instrumentations/save_report_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
eacfe416-636a-4216-9513-71b894e575ba | cpp | tensorflow/tensorflow | report | tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
#include <optional>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::QuantizationResult;
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::WriteStringToFile;
using ::tsl::protobuf::TextFormat;
std::string GetCompositeFunctionName(const StringRef quantized_func_name) {
return Twine(kCompositeFuncPrefix)
.concat(quantized_func_name.rsplit(kQuantizedFuncPrefix).second)
.str();
}
std::optional<QuantizationResult> GetQuantizationResult(func::CallOp call_op) {
const StringRef callee_name = call_op.getCalleeAttr().getValue();
if (!callee_name.starts_with(kQuantizedFuncPrefix)) {
return std::nullopt;
}
absl::StatusOr<Method> method = GetQuantizationMethod(call_op);
if (!method.ok()) {
call_op->emitError() << "Failed to get quantization method: "
<< method.status().ToString();
return std::nullopt;
}
QuantizationResult result{};
result.mutable_quantizable_unit()->set_name(
GetCompositeFunctionName(callee_name));
*result.mutable_method() = std::move(*method);
return result;
}
std::optional<QuantizationResult> GetQuantizationResult(
TF::XlaCallModuleOp xla_call_module_op) {
const StringAttr callee_name_attr =
mlir::dyn_cast_or_null<StringAttr>(xla_call_module_op->getDiscardableAttr(
kOriginalStablehloEntryFunctionAttrName));
if (callee_name_attr == nullptr) return std::nullopt;
if (callee_name_attr.getValue().starts_with(kCompositeFuncPrefix)) {
QuantizationResult result{};
result.mutable_quantizable_unit()->set_name(
callee_name_attr.getValue().str());
result.mutable_method()->mutable_no_quantization();
return result;
} else {
return std::nullopt;
}
}
void PopulateQuantizedResults(ModuleOp module_op,
QuantizationResults& results) {
module_op.walk([&results](func::CallOp call_op) {
std::optional<QuantizationResult> result = GetQuantizationResult(call_op);
if (result == std::nullopt) return WalkResult::skip();
*results.add_results() = std::move(*result);
return WalkResult::advance();
});
}
void PopulateNonQuantizedResults(ModuleOp module_op,
QuantizationResults& results) {
module_op.walk([&results](TF::XlaCallModuleOp xla_call_module_op) {
std::optional<QuantizationResult> result =
GetQuantizationResult(xla_call_module_op);
if (result == std::nullopt) return WalkResult::skip();
*results.add_results() = std::move(*result);
return WalkResult::advance();
});
}
}
QuantizationReport::QuantizationReport(ModuleOp module_op)
: quantization_results_(CollectResultsFromModuleOp(module_op)) {}
QuantizationResults QuantizationReport::CollectResultsFromModuleOp(
ModuleOp module_op) const {
QuantizationResults results{};
PopulateQuantizedResults(module_op, results);
PopulateNonQuantizedResults(module_op, results);
return results;
}
void QuantizationReport::AddQuantizationResult(QuantizationResult&& result) {
*quantization_results_.add_results() = std::move(result);
}
std::string QuantizationReport::ToString() const {
std::string results_str{};
TextFormat::PrintToString(quantization_results_, &results_str);
return absl::StrCat("===== Quantization Report =====\n\n", results_str,
"\n===== Quantization Report End =====\n\n");
}
void QuantizationReport::Print() const {
llvm::outs() << ToString();
llvm::outs().flush();
}
absl::Status QuantizationReport::Save(const StringRef file_path) const {
std::string results_str{};
TextFormat::PrintToString(GetQuantizationResults(), &results_str);
return WriteStringToFile(file_path, results_str);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/report.h"
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::QuantizableUnit;
using ::stablehlo::quantization::QuantizationResult;
using ::stablehlo::quantization::QuantizationResults;
using ::stablehlo::quantization::io::ReadFileToString;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::testing::TempDir;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using QuantizationReportTest = ::mlir::quant::QuantizationTestBase;
TEST_F(QuantizationReportTest, GetQuantizationResultsReturnsEmptyResults) {
QuantizationReport report{};
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, AddQuantizationResult) {
QuantizationResult result{};
QuantizableUnit& quantizable_unit = *result.mutable_quantizable_unit();
quantizable_unit.set_name("quantized_my_function");
Method& method = *result.mutable_method();
method.mutable_no_quantization();
QuantizationReport report{};
report.AddQuantizationResult(std::move(result));
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& first_result = results.results(0);
EXPECT_THAT(first_result.quantizable_unit().name(),
StrEq("quantized_my_function"));
EXPECT_TRUE(first_result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOp) {
constexpr absl::string_view kQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& result = results.results(0);
EXPECT_THAT(result.quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(result.method().has_static_range_ptq());
}
TEST_F(QuantizationReportTest,
InitializeWithModuleOpWithoutQuantizationMethodAttribute) {
constexpr absl::string_view
kQuantizedDotGeneralMissingQuantizationMethodAttr = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralMissingQuantizationMethodAttr);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOpWithInvalidCalleeName) {
constexpr absl::string_view kQuantizedDotGeneralWithInvalidCalleeName =
R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @invalid_quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @invalid_quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralWithInvalidCalleeName);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), IsEmpty());
}
TEST_F(QuantizationReportTest, InitializeWithModuleOpWithNonQuantizedOp) {
constexpr absl::string_view kNonQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant dense<3.000000e+0> : tensor<2x3xf32>
%1 = "tf.XlaCallModule"(%arg0, %0) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn, _original_entry_function = "composite_dot_general_fn", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %1 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kNonQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(1));
const QuantizationResult& result = results.results(0);
EXPECT_THAT(result.quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest,
InitializeWithModuleOpWithQuantizedAndNonQuantizedOps) {
constexpr absl::string_view kQuantizedDotGeneralAndNonQuantizedDotGeneral =
R"mlir(
func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant dense<3.000000e+0> : tensor<2x3xf32>
%1 = "tf.XlaCallModule"(%arg0, %0) {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
%2 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%3 = stablehlo.uniform_quantize %arg1 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%4 = call @quantized_dot_general_fn_2(%3, %2) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%5 = stablehlo.uniform_dequantize %4 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
%6 = stablehlo.add %1, %5 : tensor<1x3xf32>
return %6 : tensor<1x3xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn_2(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneralAndNonQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const QuantizationResults& results = report.GetQuantizationResults();
ASSERT_THAT(results.results(), SizeIs(2));
const QuantizationResult& quantized_result = results.results(0);
EXPECT_THAT(quantized_result.quantizable_unit().name(),
StrEq("composite_dot_general_fn_2"));
EXPECT_TRUE(quantized_result.method().has_static_range_ptq());
const QuantizationResult& non_quantized_result = results.results(1);
EXPECT_THAT(non_quantized_result.quantizable_unit().name(),
StrEq("composite_dot_general_fn_1"));
EXPECT_TRUE(non_quantized_result.method().has_no_quantization());
}
TEST_F(QuantizationReportTest, ToString) {
QuantizationResult result{};
QuantizableUnit& quantizable_unit = *result.mutable_quantizable_unit();
quantizable_unit.set_name("quantized_my_function");
Method& method = *result.mutable_method();
method.mutable_no_quantization();
QuantizationReport report{};
report.AddQuantizationResult(std::move(result));
std::string result_str{};
TextFormat::PrintToString(report.GetQuantizationResults(), &result_str);
EXPECT_THAT(report.ToString(), HasSubstr("Quantization Report"));
EXPECT_THAT(report.ToString(), HasSubstr(result_str));
EXPECT_THAT(report.ToString(), HasSubstr("Quantization Report End"));
}
TEST_F(QuantizationReportTest, Save) {
constexpr absl::string_view kQuantizedDotGeneral = R"mlir(
func.func @main(%arg0: tensor<1x2xf32>) -> tensor<1x3xf32> {
%0 = stablehlo.constant() {value = dense<127> : tensor<2x3xi8>} : () -> tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>
%1 = stablehlo.uniform_quantize %arg0 : (tensor<1x2xf32>) -> tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>
%2 = call @quantized_dot_general_fn(%1, %0) {_quantization_method = "static_range_ptq { }"} : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
%3 = stablehlo.uniform_dequantize %2 : (tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>) -> tensor<1x3xf32>
return %3 : tensor<1x3xf32>
}
func.func private @quantized_dot_general_fn(%arg0: tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, %arg1: tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>> {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2x!quant.uniform<i8:f32, 4.000000e+0>>, tensor<2x3x!quant.uniform<i8<-127:127>:f32:1, {1.000000e+0,2.000000e+0,3.000000e+0}>>) -> tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>
%1 = stablehlo.uniform_quantize %0 : (tensor<1x3x!quant.uniform<i32:f32:1, {6.000000e+0,7.000000e+0,8.000000e+0}>>) -> tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
return %1 : tensor<1x3x!quant.uniform<i8:f32, 5.000000e+0>>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kQuantizedDotGeneral);
ASSERT_TRUE(module_op);
const QuantizationReport report(*module_op);
const std::string dst_file_path =
absl::StrCat(TempDir(), "/quantization_report.txtpb");
const absl::Status save_status = report.Save(dst_file_path);
ASSERT_THAT(save_status, IsOk());
const absl::StatusOr<std::string> file_data = ReadFileToString(dst_file_path);
ASSERT_THAT(file_data, IsOk());
QuantizationResults results{};
ASSERT_TRUE(TextFormat::ParseFromString(*file_data, &results));
ASSERT_THAT(results.results(), SizeIs(1));
EXPECT_THAT(results.results(0).quantizable_unit().name(),
StrEq("composite_dot_general_fn"));
EXPECT_TRUE(results.results(0).method().has_static_range_ptq());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/report.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/report_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
76ef3c68-b50c-4783-b4da-7df0f63675ba | cpp | tensorflow/tensorflow | saved_model_import | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/cc/saved_model/reader.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::MLIRImportOptions;
using ::tensorflow::SavedModelBundle;
using ::tensorflow::SavedModelSignatureDefsToMlirImport;
using ::tensorflow::quantization::PreprocessAndFreezeGraph;
absl::StatusOr<ImportedMlirModuleOp> SavedModelToMlirModuleOp(
const absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags,
const std::vector<std::string>& signature_keys,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND) {
MLIRImportOptions import_options;
import_options.upgrade_legacy = true;
import_options.lift_variables = false;
import_options.include_variables_in_initializers = true;
auto bundle = std::make_unique<SavedModelBundle>();
std::vector<std::string> exported_names = signature_keys;
absl::StatusOr<OwningOpRef<ModuleOp>> module_op =
SavedModelSignatureDefsToMlirImport(saved_model_path, tags,
absl::MakeSpan(exported_names), &ctx,
import_options, &bundle);
if (!module_op.status().ok()) {
return absl::InternalError(absl::StrCat("Failed to import SavedModel: ",
module_op.status().ToString()));
}
return std::make_pair(std::move(*module_op), std::move(bundle));
}
absl::StatusOr<absl::flat_hash_map<FunctionName, FunctionAlias>>
GetFunctionAliases(absl::string_view saved_model_path,
const std::unordered_set<std::string>& tags) {
tensorflow::MetaGraphDef meta_graph;
TF_RETURN_IF_ERROR(tensorflow::ReadMetaGraphDefFromSavedModel(
saved_model_path, tags, &meta_graph));
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases(
meta_graph.meta_info_def().function_aliases().begin(),
meta_graph.meta_info_def().function_aliases().end());
return function_aliases;
}
void UpdateFunctionAliases(
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
ModuleOp module_op) {
absl::flat_hash_set<FunctionName> existing_func_names;
module_op->walk([&](func::FuncOp func_op) {
FunctionName func_name = func_op.getSymName().str();
existing_func_names.insert(func_name);
auto original_func_name =
func_op->getAttrOfType<StringAttr>("tf._original_func_name");
if (original_func_name) {
if (auto alias_itr = function_aliases.find(original_func_name.str());
alias_itr != function_aliases.end()) {
const FunctionAlias alias = alias_itr->second;
function_aliases[func_name] = alias;
}
}
});
absl::erase_if(function_aliases, [&existing_func_names](const auto& item) {
return !existing_func_names.contains(item.first);
});
}
absl::StatusOr<OwningOpRef<ModuleOp>> ImportSavedModel(
const absl::string_view saved_model_path,
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationConfig& quantization_config,
const absl::string_view mlir_dump_file_prefix,
absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND) {
TF_ASSIGN_OR_RETURN(
ImportedMlirModuleOp imported_module,
SavedModelToMlirModuleOp(saved_model_path, tags, signature_keys, ctx));
auto [module_op, saved_model_bundle] = std::move(imported_module);
UpdateFunctionAliases(function_aliases, *module_op);
absl::flat_hash_set<std::string> aliased_function_names;
absl::c_for_each(function_aliases, [&](const auto& aliases) {
return aliased_function_names.insert(aliases.first);
});
TF_RETURN_IF_ERROR(PreprocessAndFreezeGraph(
mlir_dump_file_prefix, true,
aliased_function_names, *module_op, &ctx,
saved_model_bundle == nullptr ? nullptr
: saved_model_bundle->GetSession(),
true, false));
return std::move(module_op);
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
namespace mlir::quant::stablehlo {
namespace {
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using UpdateFunctionAliasesTest = ::mlir::quant::QuantizationTestBase;
TEST_F(UpdateFunctionAliasesTest, NoAliasesReturnsEmptyMap) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases;
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest, AliasUpdatedByMlirFunctionName) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases,
UnorderedElementsAre(Pair("main", "main_alias")));
}
TEST_F(UpdateFunctionAliasesTest, IgnoresUnmatchedFunctions) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) attributes {tf._original_func_name = "main_original"} {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"not_main", "not_main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest,
SkipsFunctionsWithNoOriginalFuncNameAttribute) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases, IsEmpty());
}
TEST_F(UpdateFunctionAliasesTest, FunctionNameNotChanged) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
func.func private @main_original(%arg: tensor<1x2xf32>) -> (tensor<1x2xf32>) {
return %arg : tensor<1x2xf32>
}
)mlir");
ASSERT_TRUE(module_op);
absl::flat_hash_map<FunctionName, FunctionAlias> function_aliases{
{"main_original", "main_alias"}};
UpdateFunctionAliases(function_aliases, *module_op);
EXPECT_THAT(function_aliases,
UnorderedElementsAre(Pair("main_original", "main_alias")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_import_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fbecbb4b-9f8b-4297-8267-a0914c777604 | cpp | tensorflow/tensorflow | pre_calibration | tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.h"
#include "absl/base/nullability.h"
#include "absl/log/die_if_null.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tsl/platform/errors.h"
namespace mlir::quant::stablehlo {
using ::stablehlo::quantization::QuantizationConfig;
using ::tensorflow::quantization::RunPasses;
PreCalibrationComponent::PreCalibrationComponent(
absl::Nonnull<MLIRContext*> ctx)
: ctx_(ABSL_DIE_IF_NULL(ctx)) {}
absl::StatusOr<ModuleOp> PreCalibrationComponent::Run(
ModuleOp module_op, const QuantizationConfig& config) {
TF_RETURN_IF_ERROR(RunPasses(
kName,
[&config](PassManager& pm) {
AddPreCalibrationPasses(pm, config.calibration_options(),
config.specs(), config.debugger_config());
},
*ctx_, module_op));
return module_op;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.h"
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/config.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/tf_quant_ops.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::ExpandPresets;
using ::stablehlo::quantization::PopulateDefaults;
using ::stablehlo::quantization::QuantizationConfig;
using ::testing::Contains;
using ::testing::SizeIs;
using ::testing::StartsWith;
using ::testing::StrEq;
using ::tsl::testing::IsOk;
MATCHER_P(HasSymName, name, "") {
auto non_const_arg = const_cast<std::remove_const_t<decltype(arg)>>(arg);
*result_listener << "where the name is " << non_const_arg.getSymName().str();
return non_const_arg.getSymName() == name;
}
MATCHER_P2(HasStringAttr, name, value_matcher,
absl::StrCat(negation ? "doesn't have" : "has",
"string attribute: ", name, ", with desirable value")) {
auto non_const_arg = const_cast<std::remove_const_t<decltype(arg)>>(arg);
return non_const_arg->template hasAttrOfType<StringAttr>(name) &&
ExplainMatchResult(
value_matcher,
non_const_arg->template getAttrOfType<StringAttr>(name).str(),
result_listener);
}
MATCHER_P2(HasSymNameAttr, name, value_matcher,
absl::StrCat(negation ? "doesn't have" : "has",
"string attribute: ", name, ", with desirable value")) {
auto non_const_arg = const_cast<std::remove_const_t<decltype(arg)>>(arg);
return non_const_arg->template hasAttrOfType<FlatSymbolRefAttr>(name) &&
ExplainMatchResult(
value_matcher,
non_const_arg->template getAttrOfType<FlatSymbolRefAttr>(name)
.getValue()
.str(),
result_listener);
}
using PreCalibrationComponentTest = ::mlir::quant::QuantizationTestBase;
TEST_F(PreCalibrationComponentTest,
HasCustomAggregatorOpAndQuantizableFuncForSimpleDotGeneral) {
PreCalibrationComponent component(ctx_.get());
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {} {
func.func @main(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> attributes {} {
%0 = stablehlo.constant dense<1.0> : tensor<4x3xf32>
%1 = stablehlo.dot_general %arg0, %0, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x4xf32>, tensor<4x3xf32>) -> tensor<1x3xf32>
return %1 : tensor<1x3xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
QuantizationConfig quantization_config{};
quantization_config.mutable_static_range_ptq_preset();
quantization_config = ExpandPresets(PopulateDefaults(quantization_config));
absl::StatusOr<ModuleOp> pre_calibration_result =
component.Run(*module_op, quantization_config);
EXPECT_THAT(pre_calibration_result, IsOk());
SmallVector<func::FuncOp> func_ops;
for (auto func_op : pre_calibration_result->getOps<func::FuncOp>()) {
func_ops.push_back(func_op);
}
ASSERT_THAT(func_ops, SizeIs(2));
EXPECT_THAT(func_ops, Contains(HasSymName("main")));
EXPECT_THAT(func_ops, Contains(HasSymName("composite_dot_general_fn_1")));
SmallVector<TF::XlaCallModuleOp> xla_call_module_ops;
for (auto xla_call_module_op : func_ops[0].getOps<TF::XlaCallModuleOp>()) {
xla_call_module_ops.push_back(xla_call_module_op);
}
ASSERT_THAT(xla_call_module_ops, SizeIs(1));
auto xla_call_module_op = xla_call_module_ops[0];
EXPECT_THAT(xla_call_module_op,
HasStringAttr("_tfl_quant_trait", StrEq("fully_quantizable")));
EXPECT_THAT(xla_call_module_op,
HasSymNameAttr("_entry_function",
StartsWith("composite_dot_general_fn")));
EXPECT_THAT(xla_call_module_op,
HasStringAttr("_original_entry_function",
StartsWith("composite_dot_general_fn")));
SmallVector<TF::CustomAggregatorOp> custom_aggregator_ops;
for (auto custom_aggregator_op :
func_ops[0].getOps<TF::CustomAggregatorOp>()) {
custom_aggregator_ops.push_back(custom_aggregator_op);
}
EXPECT_THAT(custom_aggregator_ops, SizeIs(2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/pre_calibration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cf805915-470c-4ec5-9114-22bba8f23da3 | cpp | tensorflow/tensorflow | io | tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace stablehlo::quantization::io {
absl::StatusOr<std::string> GetLocalTmpFileName(tsl::Env* const env) {
std::string tmp_fname{};
if (!env->LocalTempFilename(&tmp_fname)) {
return absl::InternalError("Failed to create tmp file name.");
}
return tmp_fname;
}
absl::StatusOr<std::string> GetLocalTmpFileName() {
return GetLocalTmpFileName(tsl::Env::Default());
}
absl::StatusOr<std::string> CreateTmpDir(tsl::Env* const env) {
TF_ASSIGN_OR_RETURN(std::string tmp_dir, GetLocalTmpFileName(env));
if (!env->RecursivelyCreateDir(tmp_dir).ok()) {
return absl::InternalError(
absl::StrFormat("Failed to create tmp dir: '%s'", tmp_dir));
}
return tmp_dir;
}
absl::StatusOr<std::string> CreateTmpDir() {
return CreateTmpDir(tsl::Env::Default());
}
absl::Status WriteStringToFile(const absl::string_view file_path,
const absl::string_view data) {
auto* env = tsl::Env::Default();
return WriteStringToFile(env, std::string(file_path), data);
}
absl::StatusOr<std::string> ReadFileToString(
const absl::string_view file_path) {
auto* env = tsl::Env::Default();
std::string data{};
absl::Status read_status =
ReadFileToString(env, std::string(file_path), &data);
if (read_status.ok()) {
return data;
} else {
return read_status;
}
}
absl::StatusOr<std::vector<std::string>> ListDirectory(
absl::string_view directory) {
std::vector<std::string> children;
TF_RETURN_IF_ERROR(
tsl::Env::Default()->GetChildren(std::string(directory), &children));
return children;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include <cstdint>
#include <fstream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/functional/any_invocable.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tsl/platform/env.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/types.h"
namespace stablehlo::quantization::io {
namespace {
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
class TestEnvBrokenFileSystem : public tsl::Env {
public:
TestEnvBrokenFileSystem() = default;
bool MatchPath(const tsl::string& path, const tsl::string& pattern) override {
return false;
}
void SleepForMicroseconds(int64_t micros) override {}
tsl::string GetRunfilesDir() override { return tsl::string("dummy_path"); }
int32_t GetCurrentThreadId() override { return 0; }
tsl::Thread* StartThread(const tsl::ThreadOptions& thread_options,
const tsl::string& name,
absl::AnyInvocable<void()> fn) override {
return nullptr;
}
bool GetCurrentThreadName(tsl::string* name) override { return false; }
void SchedClosure(absl::AnyInvocable<void()> closure) override {}
void SchedClosureAfter(int64_t micros,
absl::AnyInvocable<void()> closure) override {}
absl::Status LoadDynamicLibrary(const char* library_filename,
void** handle) override {
return absl::OkStatus();
}
absl::Status GetSymbolFromLibrary(void* handle, const char* symbol_name,
void** symbol) override {
return absl::OkStatus();
}
tsl::string FormatLibraryFileName(const tsl::string& name,
const tsl::string& version) override {
return tsl::string("dummy_path");
}
absl::Status GetFileSystemForFile(const std::string& fname,
tsl::FileSystem** result) override {
return absl::InternalError("Broken file system");
}
private:
void GetLocalTempDirectories(std::vector<tsl::string>* list) override {
list->push_back("/tmp");
}
};
class TestEnvBrokenFileSystemAndNoLocalTempDirs
: public TestEnvBrokenFileSystem {
private:
void GetLocalTempDirectories(std::vector<tsl::string>* list) override {}
};
TEST(IoTest, GetLocalTmpFileNameGivesValidFileName) {
absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName();
ASSERT_THAT(tmp_file_name, IsOk());
EXPECT_THAT(*tmp_file_name, Not(IsEmpty()));
}
TEST(IoTest, GetLocalTmpFileNameWhenNoTempDirsReturnsInternalError) {
TestEnvBrokenFileSystemAndNoLocalTempDirs broken_env;
absl::StatusOr<std::string> tmp_file_name = GetLocalTmpFileName(&broken_env);
EXPECT_THAT(tmp_file_name,
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to create tmp file name")));
}
TEST(IoTest, CreateTmpDirReturnsValidTmpPath) {
absl::StatusOr<std::string> tmp_dir = CreateTmpDir();
ASSERT_THAT(tmp_dir, IsOk());
auto* const env = tsl::Env::Default();
EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());
}
TEST(IoTest, CreateTmpDirWhenInvalidPathReturnsInternalError) {
TestEnvBrokenFileSystem test_env{};
absl::StatusOr<std::string> tmp_dir = CreateTmpDir(&test_env);
EXPECT_THAT(tmp_dir, StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to create tmp dir")));
}
TEST(IoTest, WriteStringToFile) {
const std::string dst_file_path =
absl::StrCat(testing::TempDir(), "/tmp_file");
const absl::Status write_status =
WriteStringToFile(dst_file_path, "test_string");
ASSERT_THAT(write_status, IsOk());
auto* const env = tsl::Env::Default();
ASSERT_THAT(env->FileExists(dst_file_path), IsOk());
std::string data{};
ASSERT_THAT(tsl::ReadFileToString(env, dst_file_path, &data), IsOk());
EXPECT_THAT(data, Eq("test_string"));
}
TEST(IoTest, ReadFileToString) {
const std::string src_file_path =
absl::StrCat(testing::TempDir(), "/tmp_file");
{
std::ofstream ofs(src_file_path);
ofs << "test_string";
}
const absl::StatusOr<std::string> read_status =
ReadFileToString(src_file_path);
ASSERT_THAT(read_status, IsOk());
EXPECT_THAT(*read_status, Eq("test_string"));
}
TEST(IoTest, ListChildrenInDirectory) {
absl::StatusOr<std::string> tmp_dir = CreateTmpDir();
ASSERT_THAT(tmp_dir, IsOk());
auto* const env = tsl::Env::Default();
EXPECT_THAT(env->FileExists(*tmp_dir), IsOk());
ASSERT_THAT(
WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file1"), "test_string"),
IsOk());
ASSERT_THAT(
WriteStringToFile(absl::StrCat(*tmp_dir, "/tmp_file2"), "test_string"),
IsOk());
ASSERT_THAT(env->RecursivelyCreateDir(absl::StrCat(*tmp_dir, "/subdir")),
IsOk());
absl::StatusOr<std::vector<std::string>> children = ListDirectory(*tmp_dir);
EXPECT_THAT(children, IsOk());
EXPECT_THAT(children.value(), SizeIs(3));
EXPECT_THAT(children.value(),
UnorderedElementsAre("subdir", "tmp_file1", "tmp_file2"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/io_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
be6c354f-375c-4008-a3fc-46f4eaa2f4f4 | cpp | tensorflow/tensorflow | saved_model_export | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include <memory>
#include <optional>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/io.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/pass_pipeline.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/cc/types.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/run_passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/constants.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/python/unfreeze_constants.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::kTfSavedModelInitializerInitType;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
using ::stablehlo::quantization::QuantizationConfig;
using ::stablehlo::quantization::io::GetLocalTmpFileName;
using ::tensorflow::AssetFileDef;
using ::tensorflow::FunctionDefLibrary;
using ::tensorflow::FunctionLibraryDefinition;
using ::tensorflow::Graph;
using ::tensorflow::GraphDef;
using ::tensorflow::Node;
using ::tensorflow::NodeDef;
using ::tensorflow::OpRegistry;
using ::tensorflow::SaverDef;
using ::tensorflow::quantization::ExportedModel;
using ::tensorflow::quantization::RunPasses;
using ::tensorflow::quantization::UnfreezeConstantsAndSaveVariables;
std::string GetNodeName(const std::vector<std::string>& control_ret_node_names,
const absl::string_view contains) {
for (const std::string& node_name : control_ret_node_names) {
if (absl::StrContains(node_name, contains)) {
VLOG(1) << "Node found: " << node_name << ", contains: " << contains;
return node_name;
}
}
VLOG(1) << "Could not find node whose name conatins: " << contains;
return "";
}
std::string FindFilePrefixTensorName(const GraphDef& graph_def) {
for (const NodeDef& node_def : graph_def.node()) {
if (node_def.op() == FunctionLibraryDefinition::kArgOp) {
const auto index_path_attr_itr =
node_def.attr().find(kTfSavedModelIndexPathAttr.str());
if (index_path_attr_itr != node_def.attr().end()) {
const auto& index_paths = index_path_attr_itr->second.list().s();
if (absl::c_find(index_paths, kTfFilePrefix.str()) !=
index_paths.end()) {
return absl::StrCat(node_def.name(), ":0");
}
}
}
}
return "";
}
}
absl::StatusOr<ExportedModel> CreateExportedModel(
const std::vector<std::string>& signature_keys,
const std::unordered_set<std::string>& tags,
const QuantizationConfig& quantization_config,
absl::string_view debug_name_prefix,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
MLIRContext& ctx ABSL_ATTRIBUTE_LIFETIME_BOUND, ModuleOp module_op) {
TF_ASSIGN_OR_RETURN(const std::string checkpoint_dir, GetLocalTmpFileName());
const ExportOptions export_opts = {
true,
false, checkpoint_dir,
absl::StrCat(debug_name_prefix, kExportStepSuffix)};
TF_ASSIGN_OR_RETURN(const SmallVector<AssetFileDef> asset_file_defs,
RunExportPasses(export_opts, ctx, module_op));
return ConvertMlirModuleToExportedModel(
module_op, checkpoint_dir, function_aliases,
{asset_file_defs.begin(), asset_file_defs.end()});
}
ExportedModel CreateExportedModelFromGraphDef(
GraphDef&& graph_def, const absl::string_view init_node_name,
const absl::string_view checkpoint_dir,
const std::optional<SaverDef> saver_def,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
const std::vector<AssetFileDef>& asset_file_defs) {
ExportedModel exported_model{};
*exported_model.mutable_graph_def() = graph_def;
exported_model.set_init_node_name(std::string(init_node_name));
exported_model.set_checkpoint_dir(std::string(checkpoint_dir));
exported_model.mutable_function_aliases()->insert(function_aliases.begin(),
function_aliases.end());
for (const AssetFileDef& asset_file_def : asset_file_defs) {
*exported_model.mutable_asset_file_defs()->Add() = asset_file_def;
}
if (saver_def != std::nullopt) {
*exported_model.mutable_saver_def() = *std::move(saver_def);
}
return exported_model;
}
void AddExportPasses(mlir::PassManager& pm,
const bool duplicate_shape_determining_constants) {
AddCallModuleSerializationPasses(pm);
if (duplicate_shape_determining_constants) {
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::CreateDuplicateShapeDeterminingConstantsPass());
}
pm.addPass(mlir::quant::CreateInsertMainFunctionPass());
pm.addPass(mlir::quant::CreateLiftHashTableOpsAsArgsPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::CreateFunctionalToExecutorDialectConversionPass());
pm.addPass(mlir::CreateBreakUpIslandsPass());
pm.addPass(mlir::quant::CreateMergeInitializerFunctionOpsToMainPass());
pm.addPass(mlir::quant::CreateMergeSaveFunctionOpsToMainPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::quant::CreateMergeDuplicateResourceOpsPass());
pm.addPass(mlir::TF::CreateStripNoinlineAttributePass());
}
absl::StatusOr<std::optional<SaverDef>> CreateSaverDef(
const std::vector<std::string>& control_ret_node_names,
const GraphDef& graph_def) {
const std::string filename_tensor_name = FindFilePrefixTensorName(graph_def);
const std::string restore_op_name =
GetNodeName(control_ret_node_names, kTfSavedModelInitializerRestoreType);
const std::string save_node_name =
GetNodeName(control_ret_node_names, kTfQuantSaveOpName);
const std::vector<absl::string_view> fields = {
filename_tensor_name, restore_op_name, save_node_name};
const auto is_empty_predicate = [](const absl::string_view s) {
return s.empty();
};
if (absl::c_all_of(fields, is_empty_predicate)) {
return std::nullopt;
} else if (absl::c_none_of(fields, is_empty_predicate)) {
SaverDef saver_def{};
saver_def.set_version(SaverDef::V2);
saver_def.set_filename_tensor_name(filename_tensor_name);
saver_def.set_restore_op_name(restore_op_name);
saver_def.set_save_tensor_name(absl::StrCat(save_node_name, ":0"));
return saver_def;
} else {
return absl::InternalError(
absl::StrCat("Failed to create SaverDef. Fields should be either all "
"empty strings or all non-empty strings. Got fields: ",
absl::StrJoin(fields, ",")));
}
}
absl::StatusOr<ExportedModel> ConvertMlirModuleToExportedModel(
const mlir::ModuleOp module_op, const absl::string_view checkpoint_dir,
const absl::flat_hash_map<FunctionName, FunctionAlias>& function_aliases,
const std::vector<AssetFileDef>& asset_file_defs) {
const tensorflow::GraphExportConfig config{};
FunctionLibraryDefinition flib_def{OpRegistry::Global(),
FunctionDefLibrary()};
std::unique_ptr<Graph> graph;
absl::flat_hash_set<Node*> control_ret_nodes{};
TF_RETURN_IF_ERROR(tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module_op, config, &graph, &flib_def, &control_ret_nodes));
GraphDef graph_def{};
graph->ToGraphDef(&graph_def);
std::vector<std::string> control_ret_node_names{};
for (Node* node : control_ret_nodes) {
control_ret_node_names.push_back(node->name());
}
const std::string init_node_name =
GetNodeName(control_ret_node_names, kTfSavedModelInitializerInitType);
TF_ASSIGN_OR_RETURN(const std::optional<SaverDef> saver_def,
CreateSaverDef(control_ret_node_names, graph_def));
return CreateExportedModelFromGraphDef(std::move(graph_def), init_node_name,
checkpoint_dir, std::move(saver_def),
function_aliases, asset_file_defs);
}
absl::StatusOr<SmallVector<AssetFileDef>> RunExportPasses(
const ExportOptions& export_opts, MLIRContext& ctx, ModuleOp module_op) {
if (export_opts.unfreeze_constants) {
TF_RETURN_IF_ERROR(UnfreezeConstantsAndSaveVariables(
export_opts.checkpoint_dir, ctx, module_op));
LOG(INFO) << "Unfrozen constants and saved variables to checkpoint file: "
<< export_opts.checkpoint_dir;
}
TF_RETURN_IF_ERROR(RunPasses(
export_opts.debug_name,
[dup_constants = export_opts.duplicate_shape_determining_constants](
PassManager& pm) { AddExportPasses(pm, dup_constants); },
ctx, module_op));
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
quant::ConvertAssetArgs(module_op);
if (failed(asset_file_defs)) {
return absl::InternalError("Failed to convert asset args.");
}
return *asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.h"
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/exported_model.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace mlir::quant::stablehlo {
namespace {
using ::tensorflow::AssetFileDef;
using ::tensorflow::GraphDef;
using ::tensorflow::NodeDef;
using ::tensorflow::SaverDef;
using ::tensorflow::quantization::ExportedModel;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::TextFormat;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(CreateExportedModelTest, CreateExportedModelBasicFieldsSet) {
GraphDef graph_def{};
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(node { name: "foo" })pb", &graph_def));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
std::move(graph_def), "init_node_name", "checkpoint_dir",
std::nullopt,
{}, {});
ASSERT_THAT(exported_model.graph_def().node(), SizeIs(1));
EXPECT_THAT(exported_model.graph_def().node()[0].name(), StrEq("foo"));
EXPECT_THAT(exported_model.init_node_name(), StrEq("init_node_name"));
EXPECT_THAT(exported_model.checkpoint_dir(), StrEq("checkpoint_dir"));
EXPECT_FALSE(exported_model.has_saver_def());
EXPECT_THAT(exported_model.function_aliases(), IsEmpty());
EXPECT_THAT(exported_model.asset_file_defs(), IsEmpty());
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedFunctionAliases) {
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "",
std::nullopt,
{{"func1", "alias1"}, {"func2", "alias2"}},
{});
ASSERT_THAT(exported_model.function_aliases(), SizeIs(2));
EXPECT_TRUE(exported_model.function_aliases().contains("func1"));
EXPECT_THAT(exported_model.function_aliases().at("func1"), StrEq("alias1"));
EXPECT_TRUE(exported_model.function_aliases().contains("func2"));
EXPECT_THAT(exported_model.function_aliases().at("func2"), StrEq("alias2"));
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedAssetFileDefs) {
AssetFileDef asset1;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "fname1")pb", &asset1));
AssetFileDef asset2;
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "fname2")pb", &asset2));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "",
std::nullopt, {},
{asset1, asset2});
ASSERT_THAT(exported_model.asset_file_defs(), SizeIs(2));
EXPECT_THAT(exported_model.asset_file_defs()[0].filename(), StrEq("fname1"));
EXPECT_THAT(exported_model.asset_file_defs()[1].filename(), StrEq("fname2"));
}
TEST(CreateExportedModelTest, CreateExportedModelWithAddedSaverDef) {
SaverDef saver_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(filename_tensor_name: "my_file")pb", &saver_def));
const ExportedModel exported_model = CreateExportedModelFromGraphDef(
GraphDef(), "", "", saver_def,
{}, {});
EXPECT_THAT(exported_model.saver_def().filename_tensor_name(), "my_file");
}
TEST(CreateSaverDefTest, CreateValidSaverDef) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(node {
name: "foo",
op: "_Arg",
attr {
key: "tf_saved_model.index_path",
value { list { s: "__tf_file_prefix" } }
}
})pb",
&graph_def));
const std::vector<std::string> control_ret_node_names = {
"restore_op_0", "tf_quant__save_op_0"};
TF_ASSERT_OK_AND_ASSIGN(const std::optional<SaverDef> saver_def,
CreateSaverDef(control_ret_node_names, graph_def));
ASSERT_NE(saver_def, std::nullopt);
EXPECT_THAT(saver_def->version(), SaverDef::V2);
EXPECT_THAT(saver_def->restore_op_name(), "restore_op_0");
EXPECT_THAT(saver_def->filename_tensor_name(), "foo:0");
EXPECT_THAT(saver_def->save_tensor_name(), "tf_quant__save_op_0:0");
}
TEST(CreateSaverDefTest, ReturnsNulloptIfNoSaverDefRelatedNodesExist) {
TF_ASSERT_OK_AND_ASSIGN(
const std::optional<SaverDef> saver_def,
CreateSaverDef({}, GraphDef()));
EXPECT_EQ(saver_def, std::nullopt);
}
TEST(CreateSaverDefTest, ReturnsErrorStatusIfSaverDefNodesPartiallyExist) {
GraphDef graph_def;
ASSERT_TRUE(TextFormat::ParseFromString(
R"pb(node { name: "foo", op: "_Arg" })pb", &graph_def));
const std::vector<std::string> control_ret_node_names = {
"restore_op_0", "tf_quant__save_op_0"};
const absl::StatusOr<std::optional<SaverDef>> saver_def =
CreateSaverDef(control_ret_node_names, graph_def);
EXPECT_THAT(
saver_def,
StatusIs(
absl::StatusCode::kInternal,
HasSubstr(
"should be either all empty strings or all non-empty strings")));
}
using ConvertMlirModuleToExportedModelTest =
::mlir::quant::QuantizationTestBase;
TEST_F(ConvertMlirModuleToExportedModelTest, SimpleGraphDefSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main(%arg: tensor<1x2xf32> {tf_saved_model.index_path = ["input_tensor:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output_tensor:0"]}) attributes {tf.entry_function = {inputs = "input_tensor:0", outputs = "output_tensor:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
tf_executor.fetch %arg : tensor<1x2xf32>
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->graph_def().node(), SizeIs(2));
const auto arg_node_itr =
llvm::find_if(exported_model->graph_def().node(),
[](const NodeDef& node) { return node.op() == "_Arg"; });
ASSERT_NE(arg_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(arg_node_itr->name(), StrEq("input_tensor"));
ASSERT_TRUE(arg_node_itr->attr().contains("tf_saved_model.index_path"));
ASSERT_THAT(arg_node_itr->attr().at("tf_saved_model.index_path").list().s(),
SizeIs(1));
EXPECT_THAT(
arg_node_itr->attr().at("tf_saved_model.index_path").list().s()[0],
StrEq("input_tensor:0"));
const auto retval_node_itr =
llvm::find_if(exported_model->graph_def().node(),
[](const NodeDef& node) { return node.op() == "_Retval"; });
ASSERT_NE(retval_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(retval_node_itr->name(), StrEq("output_tensor"));
ASSERT_TRUE(retval_node_itr->attr().contains("tf_saved_model.index_path"));
ASSERT_THAT(
retval_node_itr->attr().at("tf_saved_model.index_path").list().s(),
SizeIs(1));
EXPECT_THAT(
retval_node_itr->attr().at("tf_saved_model.index_path").list().s()[0],
StrEq("output_tensor:0"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, CheckpointDirSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "my_checkpoint_dir",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->checkpoint_dir(), StrEq("my_checkpoint_dir"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, FunctionAliasesSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func private @function_1() -> () attributes {tf._original_func_name = "__func_1"} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.NoOp"() : () -> ()
}
return
}
func.func private @function_2() -> () attributes {tf._original_func_name = "__func_2"} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.NoOp"() : () -> ()
}
return
}
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
%control_0 = tf_executor.island wraps "tf.PartitionedCall"() <{config = "", config_proto = "", executor_type = "", f = @function_1}> : () -> ()
%control_1 = tf_executor.island wraps "tf.PartitionedCall"() <{config = "", config_proto = "", executor_type = "", f = @function_2}> : () -> ()
tf_executor.fetch %control_0, %control_1 : !tf_executor.control, !tf_executor.control
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(
*module_op, "",
{{"alias_1", "function_1"}, {"alias_2", "function_2"}},
{});
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->function_aliases(), SizeIs(2));
EXPECT_THAT(exported_model->function_aliases().at("alias_1"),
StrEq("function_1"));
EXPECT_THAT(exported_model->function_aliases().at("alias_2"),
StrEq("function_2"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, AssetFileDefSet) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @main() -> () attributes {tf_saved_model.exported_names = ["main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
AssetFileDef asset_file_def{};
ASSERT_TRUE(
TextFormat::ParseFromString(R"pb(filename: "vocab_file.txt",
tensor_info { name: "arg_0:0" })pb",
&asset_file_def));
const std::vector<AssetFileDef> asset_file_defs = {asset_file_def};
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
asset_file_defs);
ASSERT_THAT(exported_model, IsOk());
ASSERT_THAT(exported_model->asset_file_defs(), SizeIs(1));
EXPECT_THAT(exported_model->asset_file_defs()[0].filename(),
StrEq("vocab_file.txt"));
EXPECT_THAT(exported_model->asset_file_defs()[0].tensor_info().name(),
StrEq("arg_0:0"));
}
TEST_F(ConvertMlirModuleToExportedModelTest,
InitNodeNameSetToLocOfControlOutput) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() <{initializers = []}> : () -> ()
"tf_saved_model.asset"() <{filename = "assets/vocab_file.txt", sym_name = "__tf_saved_model_asset0_vocab_file.txt"}> : () -> ()
func.func @main(%arg1: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output:0"]}) attributes {tf.entry_function = {inputs = "arg_0:0", outputs = "output:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
%o_0, %c_0 = tf_executor.island wraps "tf.Const"() <{value = dense<1.0> : tensor<1x2xf32>}> : () -> tensor<1x2xf32>
%o, %c = tf_executor.island wraps "tf.HashTableV2"() <{container = "", key_dtype = !tf_type.string, shared_name = "vocab_file.txt", use_node_name_sharing = false, value_dtype = i64}> {device = ""} : () -> tensor<!tf_type.resource>
%c_9 = tf_executor.island wraps "tf.InitializeTableFromTextFileV2"(%o, %arg1) <{delimiter = "\09", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64}> {_has_manual_control_dependencies = true, device = ""} : (tensor<!tf_type.resource>, tensor<!tf_type.string>) -> ()
%c_10 = tf_executor.island(%c_9) wraps "tf.NoOp"() : () -> () loc("init_op_init_all_tables")
tf_executor.fetch %o_0, %c_10 : tensor<1x2xf32>, !tf_executor.control
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->init_node_name(),
StrEq("init_op_init_all_tables"));
const auto init_node_itr = llvm::find_if(
exported_model->graph_def().node(), [](const NodeDef& node) {
return node.name() == "init_op_init_all_tables";
});
ASSERT_NE(init_node_itr, exported_model->graph_def().node().end());
EXPECT_THAT(init_node_itr->op(), StrEq("NoOp"));
ASSERT_THAT(init_node_itr->input(), SizeIs(1));
EXPECT_THAT(init_node_itr->input()[0],
StrEq("^tf.InitializeTableFromTextFileV2"));
}
TEST_F(ConvertMlirModuleToExportedModelTest, InitNodeNotSetIfLocNameMismatch) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() <{initializers = []}> : () -> ()
"tf_saved_model.asset"() <{filename = "assets/vocab_file.txt", sym_name = "__tf_saved_model_asset0_vocab_file.txt"}> : () -> ()
func.func @main(%arg1: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> (tensor<1x2xf32> {tf_saved_model.index_path = ["output:0"]}) attributes {tf.entry_function = {inputs = "arg_0:0", outputs = "output:0"}, tf_saved_model.exported_names = ["main"]} {
%0 = tf_executor.graph {
%output_0, %control_0 = tf_executor.island wraps "tf.Const"() <{value = dense<1.0> : tensor<1x2xf32>}> : () -> tensor<1x2xf32>
%output_1, %control_1 = tf_executor.island wraps "tf.HashTableV2"() <{container = "", key_dtype = !tf_type.string, shared_name = "vocab_file.txt", use_node_name_sharing = false, value_dtype = i64}> {device = ""} : () -> tensor<!tf_type.resource>
%control_2 = tf_executor.island wraps "tf.InitializeTableFromTextFileV2"(%output_1, %arg1) <{delimiter = "\09", key_index = -2 : i64, value_index = -1 : i64, vocab_size = -1 : i64}> {_has_manual_control_dependencies = true, device = ""} : (tensor<!tf_type.resource>, tensor<!tf_type.string>) -> ()
%control_3 = tf_executor.island(%control_2) wraps "tf.NoOp"() : () -> () loc("init_ok")
tf_executor.fetch %output_0, %control_3 : tensor<1x2xf32>, !tf_executor.control
}
return %0 : tensor<1x2xf32>
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "",
{},
{});
ASSERT_THAT(exported_model, IsOk());
EXPECT_THAT(exported_model->init_node_name(), IsEmpty());
}
TEST_F(ConvertMlirModuleToExportedModelTest,
ConversionFailureWhenNoMainFunction) {
mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
module attributes {tf_saved_model.semantics} {
func.func @not_main() -> () attributes {tf_saved_model.exported_names = ["not_main"]} {
tf_executor.graph {
tf_executor.fetch
}
return
}
}
)mlir");
ASSERT_TRUE(module_op);
const absl::StatusOr<ExportedModel> exported_model =
ConvertMlirModuleToExportedModel(*module_op, "my_checkpoint_dir",
{},
{});
EXPECT_THAT(exported_model,
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("entry function `main` must be present")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0a8a1a8-d85a-4b67-a2ca-d22ba5e3021d | cpp | tensorflow/tensorflow | representative_dataset | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.cc | tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace stablehlo::quantization {
using ::tensorflow::quantization::RepresentativeDatasetFile;
absl::StatusOr<absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
CreateRepresentativeDatasetFileMap(absl::Span<const RepresentativeDatasetConfig>
representative_dataset_configs) {
absl::flat_hash_map<std::string, RepresentativeDatasetFile>
repr_dataset_file_map{};
for (const RepresentativeDatasetConfig& dataset_config :
representative_dataset_configs) {
RepresentativeDatasetFile repr_dataset_file;
repr_dataset_file.set_tfrecord_file_path(dataset_config.tf_record().path());
const std::string signature_key = dataset_config.has_signature_key()
? dataset_config.signature_key()
: "serving_default";
if (repr_dataset_file_map.contains(signature_key)) {
return absl::InvalidArgumentError(
absl::StrCat("RepresentativeDatasetConfig should not contain "
"duplicate signature key: ",
signature_key));
}
repr_dataset_file_map[signature_key] = std::move(repr_dataset_file);
}
return repr_dataset_file_map;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.h"
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant::stablehlo {
namespace {
using ::stablehlo::quantization::RepresentativeDatasetConfig;
using ::tensorflow::quantization::RepresentativeDatasetFile;
using ::testing::Contains;
using ::testing::HasSubstr;
using ::testing::Key;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
TEST(CreateRepresentativeDatasetFileMapTest,
ConfigWithoutExplicitSignatureKeyMappedToServingDefault) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config{};
*(config.mutable_tf_record()->mutable_path()) = "test_path";
representative_dataset_configs.push_back(config);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
ASSERT_THAT(representative_dataset_file_map, IsOk());
ASSERT_THAT(*representative_dataset_file_map, SizeIs(1));
EXPECT_THAT(*representative_dataset_file_map,
Contains(Key("serving_default")));
EXPECT_THAT(representative_dataset_file_map->at("serving_default")
.tfrecord_file_path(),
StrEq("test_path"));
}
TEST(CreateRepresentativeDatasetFileMapTest, ConfigWithExplicitSignatureKey) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config{};
config.set_signature_key("test_signature_key");
*(config.mutable_tf_record()->mutable_path()) = "test_path";
representative_dataset_configs.push_back(config);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
ASSERT_THAT(representative_dataset_file_map, IsOk());
ASSERT_THAT(*representative_dataset_file_map, SizeIs(1));
EXPECT_THAT(*representative_dataset_file_map,
Contains(Key(StrEq("test_signature_key"))));
EXPECT_THAT(representative_dataset_file_map->at("test_signature_key")
.tfrecord_file_path(),
StrEq("test_path"));
}
TEST(CreateRepresentativeDatasetFileMapTest,
ConfigWithDuplicateSignatureKeyReturnsInvalidArgumentError) {
std::vector<RepresentativeDatasetConfig> representative_dataset_configs;
RepresentativeDatasetConfig config_1{};
config_1.set_signature_key("serving_default");
*(config_1.mutable_tf_record()->mutable_path()) = "test_path_1";
representative_dataset_configs.push_back(config_1);
RepresentativeDatasetConfig config_2{};
*(config_2.mutable_tf_record()->mutable_path()) = "test_path_2";
representative_dataset_configs.push_back(config_2);
const absl::StatusOr<
absl::flat_hash_map<std::string, RepresentativeDatasetFile>>
representative_dataset_file_map =
CreateRepresentativeDatasetFileMap(representative_dataset_configs);
EXPECT_THAT(representative_dataset_file_map,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("duplicate signature key: serving_default")));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/cc/calibration/representative_dataset_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b3ef8084-b9ff-4365-81fc-8e5630925d83 | cpp | tensorflow/tensorflow | attrs_and_constraints | tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc | tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc | #include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include <cstdint>
#include <optional>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h"
namespace mlir::quant {
using ::mlir::stablehlo::DotGeneralOp;
bool HasStaticShape(Value value) {
auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType());
if (!shaped_type) return false;
return shaped_type.hasStaticShape();
}
bool HasStaticShapeAtDims(Value value, const ArrayRef<int> dims) {
auto shaped_type = mlir::dyn_cast<ShapedType>(value.getType());
if (!shaped_type || !shaped_type.hasRank()) return false;
for (auto dim : dims) {
if (shaped_type.isDynamicDim(dim)) return false;
}
return true;
}
Type CloneTypeWithNewElementType(Type old_type, Type element_type) {
if (!mlir::isa<ShapedType>(old_type)) return {};
return mlir::cast<ShapedType>(old_type).clone(element_type);
}
SmallVector<Value> CloneOpWithReplacedOperands(
OpBuilder& builder, Operation* op, const ArrayRef<Value> new_operands) {
IRMapping mapping;
for (const auto& arg : enumerate(new_operands)) {
mapping.map(op->getOperand(arg.index()), arg.value());
}
return builder.clone(*op, mapping)->getResults();
}
FailureOr<int32_t> CastI64ToI32(const int64_t value) {
if (!llvm::isInt<32>(value)) {
DEBUG_WITH_TYPE(
"mlir-quant-attrs-and-constraints",
llvm::dbgs()
<< "Tried to cast " << value
<< "from int64 to int32, but lies out of range of int32.\n");
return failure();
}
return static_cast<int32_t>(value);
}
FailureOr<SmallVector<int32_t>> CastI64ArrayToI32(
const ArrayRef<int64_t> int64_array) {
SmallVector<int32_t> int32_array{};
int32_array.reserve(int64_array.size());
for (const int64_t i64 : int64_array) {
FailureOr<int32_t> cast_i32 = CastI64ToI32(i64);
if (failed(cast_i32)) return failure();
int32_array.push_back(*cast_i32);
}
return int32_array;
}
StringRef GetEntryFunctionName(TF::XlaCallModuleOp op) {
if (!op->hasAttrOfType<FlatSymbolRefAttr>(
TF::kStablehloEntryFunctionAttrName)) {
return StringRef();
}
return op
->getAttrOfType<FlatSymbolRefAttr>(TF::kStablehloEntryFunctionAttrName)
.getValue();
}
bool IsHybridQuantizedOp(Operation* op) {
if ((op->getNumOperands() != 2 && op->getNumOperands() != 3) ||
op->getResultTypes().size() != 1) {
return false;
}
Type lhs_type = op->getOperand(0).getType();
Type rhs_type = op->getOperand(1).getType();
Type result_type = op->getResult(0).getType();
return !IsQuantizedTensorType(lhs_type) && IsQuantizedTensorType(rhs_type) &&
!IsQuantizedTensorType(result_type);
}
absl::StatusOr<bool> IsDotGeneralFullyConnected(DotGeneralOp dot_general_op) {
if (dot_general_op == nullptr)
return absl::InvalidArgumentError(
"Given dot_general op cannot be null when checking "
"`IsDotGeneralBatchMatmul`.");
const ::mlir::stablehlo::DotDimensionNumbersAttr dot_dimension_numbers =
dot_general_op.getDotDimensionNumbers();
const ArrayRef<int64_t> lhs_contracting_dims =
dot_dimension_numbers.getLhsContractingDimensions();
const ArrayRef<int64_t> rhs_contracting_dims =
dot_dimension_numbers.getRhsContractingDimensions();
const int64_t input_rank =
mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(0).getType())
.getRank();
const int64_t filter_rank =
mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType())
.getRank();
const bool has_proper_rank =
(input_rank == 1 || input_rank == 2) && filter_rank == 2;
const bool has_proper_contracting_dim =
lhs_contracting_dims.size() == 1 && rhs_contracting_dims.size() == 1 &&
lhs_contracting_dims[0] == input_rank - 1;
const bool is_not_batch_op =
dot_dimension_numbers.getLhsBatchingDimensions().empty();
const bool has_proper_quantization_dimension =
absl::c_find(rhs_contracting_dims, filter_rank) ==
rhs_contracting_dims.end();
return has_proper_rank && has_proper_contracting_dim && is_not_batch_op &&
has_proper_quantization_dimension;
}
std::optional<int64_t> GetDotGeneralQuantizationDim(
DotGeneralOp dot_general_op) {
if (dot_general_op == nullptr) return std::nullopt;
const int64_t filter_rank =
mlir::dyn_cast<ShapedType>(dot_general_op.getOperand(1).getType())
.getRank();
const bool is_per_axis_quantizable =
IsDotGeneralFullyConnected(dot_general_op).value();
if (!is_per_axis_quantizable) return std::nullopt;
return filter_rank - 1;
}
bool ContainsConvOrDot(StringRef str) {
return str.contains("_conv") || str.contains("_dot_general");
}
} | #include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include <cstdint>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant {
namespace {
using ::mlir::stablehlo::AddOp;
using ::mlir::stablehlo::ConstantOp;
using ::mlir::stablehlo::ConvolutionOp;
using ::mlir::stablehlo::DotGeneralOp;
using ::mlir::stablehlo::SubtractOp;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::Optional;
using ::tsl::testing::StatusIs;
using AttrsAndConstraintsTest = ::mlir::quant::QuantizationTestBase;
constexpr absl::string_view kModuleStatic = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleDynamic = R"mlir(
module {
func.func @main(%arg0: tensor<?x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<?x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<?x1024xf32>, tensor<1024x3xf32>) -> tensor<?x3xf32>
return %0 : tensor<?x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleMultipleUses = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%cst = stablehlo.constant dense<1.0> : tensor<1x3xf32>
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
%1 = stablehlo.subtract %cst, %0 : tensor<1x3xf32>
%2 = stablehlo.add %0, %cst : tensor<1x3xf32>
return %2 : tensor<1x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleDotWeightOnlyPtq = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleXlaCallModuleNoEntryNoQuantTrait = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_original_entry_function = "composite_fn_1"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
constexpr absl::string_view kModulePartitionedCall = R"mlir(
module {
func.func @main(%arg0: tensor<2x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<2x2xf32>) {
%cst = "tf.Const"() {device = "", value = dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>} : () -> tensor<2x2xf32>
%0 = "tf.PartitionedCall"(%arg0, %cst) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_fn_1} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32> loc(callsite("test@main"("MatMul") at "QuantizationUnit(\12\06MatMul\1a\07main)"))
return %0 : tensor<2x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} {
%0 = "tf.MatMul"(%arg0, %arg1) {attr_map = "0:transpose_a,1:transpose_b", device = "", transpose_a = false, transpose_b = false} : (tensor<2x2xf32>, tensor<2x2xf32>) -> tensor<2x2xf32>
return %0 : tensor<2x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleHybridQuantized = R"mlir(
module {
func.func @main(%arg0: tensor<1x2xf32>, %arg1: tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03:0>> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<1x3xf32>) {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<1x2xf32>, tensor<2x3x!quant.uniform<i8:f32, 6.000000e-03:0>>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(AttrsAndConstraintsTest, HasStaticShapeSucceedsWithStaticShapes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Value dot_general_result =
FindOperationOfType<DotGeneralOp>(main_fn)->getResult(0);
EXPECT_TRUE(HasStaticShape(dot_general_result));
EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {0}));
EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {1}));
}
TEST_F(AttrsAndConstraintsTest, HasStaticShapeFailsWithDynamicShapes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleDynamic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Value dot_general_result =
FindOperationOfType<DotGeneralOp>(main_fn)->getResult(0);
EXPECT_FALSE(HasStaticShape(dot_general_result));
EXPECT_FALSE(HasStaticShapeAtDims(dot_general_result, {0}));
EXPECT_TRUE(HasStaticShapeAtDims(dot_general_result, {1}));
}
TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsTrueForMatchingRank) {
constexpr absl::string_view kConstantOpWithRankFour =
R"mlir(%0 = stablehlo.constant dense<0> : tensor<1x1x1x1xi8>)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kConstantOpWithRankFour);
ASSERT_TRUE(module_op);
ASSERT_FALSE(module_op->getBodyRegion().empty());
ASSERT_FALSE(module_op->getBodyRegion().front().empty());
auto constant_op = dyn_cast_or_null<mlir::stablehlo::ConstantOp>(
module_op->getBodyRegion().front().front());
ASSERT_THAT(constant_op, NotNull());
EXPECT_TRUE(HasRankOf(constant_op, 4));
}
TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsFalseForNonMatchingRank) {
constexpr absl::string_view kConstantOpWithRankFour =
R"mlir(%0 = stablehlo.constant dense<0> : tensor<1x1x1x1xi8>)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kConstantOpWithRankFour);
ASSERT_TRUE(module_op);
ASSERT_FALSE(module_op->getBodyRegion().empty());
ASSERT_FALSE(module_op->getBodyRegion().front().empty());
auto constant_op = dyn_cast_or_null<mlir::stablehlo::ConstantOp>(
module_op->getBodyRegion().front().front());
ASSERT_THAT(constant_op, NotNull());
EXPECT_FALSE(HasRankOf(constant_op, 3));
}
TEST_F(AttrsAndConstraintsTest,
HasRankOfReturnsTrueForMatchingRankWithUnknownDimensions) {
constexpr absl::string_view kArgumentWithUnknownDims = R"mlir(
func.func @unknown_dims_arg(%arg: tensor<?x?xi8>) -> tensor<?x?xi8> {
return %arg : tensor<?x?xi8>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kArgumentWithUnknownDims);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("unknown_dims_arg");
ASSERT_THAT(func_op, NotNull());
ASSERT_THAT(func_op.getNumArguments(), Eq(1));
EXPECT_TRUE(HasRankOf(func_op.getArgument(0), 2));
}
TEST_F(AttrsAndConstraintsTest, HasRankOfReturnsFalseForUnknownRank) {
constexpr absl::string_view kArgumentWithUnknownRank = R"mlir(
func.func @unknown_rank_arg(%arg: tensor<*xi8>) -> tensor<*xi8> {
return %arg : tensor<*xi8>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kArgumentWithUnknownRank);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("unknown_rank_arg");
ASSERT_THAT(func_op, NotNull());
ASSERT_THAT(func_op.getNumArguments(), Eq(1));
EXPECT_FALSE(HasRankOf(func_op.getArgument(0), 1));
}
TEST_F(AttrsAndConstraintsTest, TryCastSucceeds) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn);
ASSERT_THAT(dot_general_op, NotNull());
EXPECT_TRUE(succeeded(
TryCast<DotGeneralOp>(dot_general_op, "dot_general_op")));
}
TEST_F(AttrsAndConstraintsTest, TryCastFailsOnWrongType) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn);
ASSERT_THAT(dot_general_op, NotNull());
EXPECT_TRUE(
failed(TryCast<AddOp>(dot_general_op, "dot_general_op")));
}
TEST_F(AttrsAndConstraintsTest, TryCastFailsOnNullPtr) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStatic);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto op_nullptr =
FindOperationOfType<DotGeneralOp>(main_fn)->getNextNode()->getNextNode();
EXPECT_THAT(op_nullptr, IsNull());
EXPECT_TRUE(failed(TryCast<DotGeneralOp>(op_nullptr, "op_nullptr")));
EXPECT_TRUE(failed(TryCast<DotGeneralOp>(nullptr, "nullptr")));
}
TEST_F(AttrsAndConstraintsTest, I64ValueInI32RangeAreCastedCorrectly) {
EXPECT_TRUE(succeeded(CastI64ToI32(llvm::minIntN(32))));
EXPECT_TRUE(succeeded(CastI64ToI32(llvm::maxIntN(32))));
}
TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ValueOutOfI32Range) {
EXPECT_TRUE(failed(CastI64ToI32(llvm::minIntN(32) - 10)));
EXPECT_TRUE(failed(CastI64ToI32(llvm::maxIntN(32) + 10)));
}
TEST_F(AttrsAndConstraintsTest, I64ArrayInI32RangeAreCastedCorrectly) {
const SmallVector<int64_t> array_i64 = {llvm::minIntN(32), -2, -1, 0, 1, 2,
llvm::maxIntN(32)};
FailureOr<SmallVector<int32_t>> array_i32 = CastI64ArrayToI32(array_i64);
EXPECT_TRUE(succeeded(array_i32));
EXPECT_THAT(
*array_i32,
ElementsAreArray({static_cast<int32_t>(llvm::minIntN(32)), -2, -1, 0, 1,
2, static_cast<int32_t>(llvm::maxIntN(32))}));
}
TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ArrayUnderI32Range) {
const int64_t under_min_i32 = -2147483658;
ArrayRef<int64_t> array_i64{under_min_i32};
EXPECT_EQ(under_min_i32, llvm::minIntN(32) - 10);
EXPECT_TRUE(failed(CastI64ArrayToI32(array_i64)));
}
TEST_F(AttrsAndConstraintsTest, CastingFailsForI64ArrayAboveI32Range) {
const int64_t below_max_i32 = 2147483657;
ArrayRef<int64_t> array_i64{below_max_i32};
EXPECT_EQ(below_max_i32, llvm::maxIntN(32) + 10);
EXPECT_TRUE(failed(CastI64ArrayToI32(array_i64)));
}
TEST_F(AttrsAndConstraintsTest, FindUserOfDifferentTypes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleMultipleUses);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = FindOperationOfType<DotGeneralOp>(main_fn);
ASSERT_THAT(dot_general_op, NotNull());
EXPECT_THAT(FindUserOfType<AddOp>(dot_general_op), NotNull());
EXPECT_THAT(FindUserOfType<SubtractOp>(dot_general_op), NotNull());
EXPECT_THAT(FindUserOfType<>(dot_general_op), NotNull());
EXPECT_THAT(FindUserOfType<ConvolutionOp>(dot_general_op), IsNull());
}
TEST_F(AttrsAndConstraintsTest, FindOperandOfDifferentTypes) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleMultipleUses);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto subtract_op = FindOperationOfType<SubtractOp>(main_fn);
ASSERT_THAT(subtract_op, NotNull());
EXPECT_THAT(FindOperandOfType<DotGeneralOp>(subtract_op), NotNull());
EXPECT_THAT(FindOperandOfType<ConstantOp>(subtract_op), NotNull());
EXPECT_THAT(FindOperandOfType<>(subtract_op), NotNull());
EXPECT_THAT(FindOperandOfType<AddOp>(subtract_op), IsNull());
}
TEST_F(AttrsAndConstraintsTest, XlaCallModuleOpGetFuncAttr) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
FlatSymbolRefAttr xla_call_op_attr = GetFuncAttr(xla_call_module_op);
EXPECT_EQ(xla_call_op_attr.getValue(), "composite_fn_1");
}
TEST_F(AttrsAndConstraintsTest, PartitionedCallGetFuncAttr) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModulePartitionedCall);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto partitioned_call_op =
FindOperationOfType<TF::PartitionedCallOp>(main_fn);
ASSERT_THAT(partitioned_call_op, NotNull());
FlatSymbolRefAttr partitioned_call_op_attr = GetFuncAttr(partitioned_call_op);
EXPECT_EQ(partitioned_call_op_attr.getValue(), "composite_fn_1");
}
TEST_F(AttrsAndConstraintsTest, GetEntryFunctionNameCorrectly) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_EQ(GetEntryFunctionName(xla_call_module_op),
StringRef("composite_fn_1"));
}
TEST_F(AttrsAndConstraintsTest, GetEntryFunctionNameWhenNotSet) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_THAT(GetEntryFunctionName(xla_call_module_op), IsEmpty());
}
TEST_F(AttrsAndConstraintsTest, HasQuantizableTraitTrue) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_TRUE(HasQuantizableTrait(xla_call_module_op));
}
TEST_F(AttrsAndConstraintsTest, HasQuantizableTraitFalse) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
ASSERT_THAT(xla_call_module_op, NotNull());
EXPECT_FALSE(HasQuantizableTrait(xla_call_module_op));
}
TEST_F(AttrsAndConstraintsTest, IsHybridQuantizedOpTrue) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleHybridQuantized);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* dot_general = FindOperationOfType<DotGeneralOp>(main_fn);
EXPECT_TRUE(IsHybridQuantizedOp(dot_general));
}
TEST_F(AttrsAndConstraintsTest, IsHybridQuantizedOpFalse) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* call_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
EXPECT_FALSE(IsHybridQuantizedOp(call_op));
}
constexpr absl::string_view kModuleDotGeneralFullyConnected = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
constexpr absl::string_view kModuleDotGeneralBatchMatmul = R"mlir(
module {
func.func @main(%arg0: tensor<2x2x2xf32>, %arg1: tensor<2x2x2xf32>) -> tensor<2x2x2xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1,
batching_dims = [0] x [0],
contracting_dims = [2] x [1],
precision = [DEFAULT, DEFAULT]
: (tensor<2x2x2xf32>, tensor<2x2x2xf32>) -> tensor<2x2x2xf32>
return %0 : tensor<2x2x2xf32>
}
}
)mlir";
TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsError) {
DotGeneralOp dot_general_op = nullptr;
StatusIs(absl::StatusCode::kInvalidArgument,
"Given dot_general op cannot be null when checking "
"`IsDotGeneralBatchMatmul`");
}
TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsTrue) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralFullyConnected);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(IsDotGeneralFullyConnected(dot_general_op), true);
}
TEST_F(AttrsAndConstraintsTest, IsDotGeneralFullyConnectedReturnsFalse) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralBatchMatmul);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(IsDotGeneralFullyConnected(dot_general_op), false);
}
TEST_F(AttrsAndConstraintsTest, DotGeneralFullyConnectedReturnsQuantDim) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralFullyConnected);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(GetDotGeneralQuantizationDim(dot_general_op), Optional(1));
}
TEST_F(AttrsAndConstraintsTest, DotGeneralBatchMatmulReturnsNullQuantDim) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotGeneralBatchMatmul);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op = *main_fn.getOps<DotGeneralOp>().begin();
EXPECT_THAT(GetDotGeneralQuantizationDim(dot_general_op), Eq(std::nullopt));
}
TEST_F(AttrsAndConstraintsTest, ContainsConvOrDotTrue) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
const StringRef function_name = GetEntryFunctionName(call_op);
EXPECT_TRUE(ContainsConvOrDot(function_name));
}
TEST_F(AttrsAndConstraintsTest, ContainsConvOrDotFalse) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleNoEntryNoQuantTrait);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
const StringRef function_name = GetEntryFunctionName(call_op);
EXPECT_FALSE(ContainsConvOrDot(function_name));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50eb565e-1a53-429c-ba86-b7ff26d47687 | cpp | tensorflow/tensorflow | lift_as_function_call | tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc | tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc | #include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <queue>
#include <stack>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/utils/stablehlo_type_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/quantization_unit_loc.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_call_module_attrs.h"
#include "tensorflow/core/ir/types/dialect.h"
#include "tensorflow/core/platform/mutex.h"
#include "tsl/platform/protobuf.h"
namespace mlir::quant {
using ::stablehlo::quantization::Method;
using ::tsl::protobuf::TextFormat;
constexpr int64_t kDefaultVersion = 9;
constexpr StringRef kPlatformCpu = "CPU";
constexpr StringRef kStablehloModuleAttrsAttrName = "_stablehlo_module_attrs";
constexpr StringRef kUsesShapePolymorphismAttr = "jax.uses_shape_polymorphism";
bool IsInLiftedFunc(Operation* op) {
if (op == nullptr) return false;
return op->getParentOfType<func::FuncOp>()->hasAttr(kFusedFunctionAttr);
}
bool IsInStableHloOpRegion(Operation* op) {
if (op == nullptr) return false;
auto parent_op = op->getParentOp();
return parent_op != nullptr && stablehlo::IsStablehloOp(parent_op);
}
StringAttr InsertToSymbolTable(Operation& module, Operation& function,
const StringRef func_name) {
static tensorflow::mutex* mtx = new tensorflow::mutex();
tensorflow::mutex_lock lock(*mtx);
SymbolTable symbol_table(&module);
std::string unique_name = func_name.str();
int32_t uniquing_counter = 0;
while (symbol_table.lookup(unique_name) != nullptr) {
++uniquing_counter;
unique_name = absl::StrCat(func_name.str(), "_", uniquing_counter);
}
function.setAttr("sym_name",
StringAttr::get(module.getContext(), unique_name));
return symbol_table.insert(&function);
}
ValueRange CreateTFPartitionedCallOp(OpBuilder& builder,
const Location location,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
TF::PartitionedCallOp call_op = builder.create<TF::PartitionedCallOp>(
location, output_types, args,
FlatSymbolRefAttr::get(builder.getStringAttr(func_name)),
"", "", "");
call_op->setAttr(
kQuantTraitAttrName,
builder.getStringAttr(StringRef(
std::string(QuantTraitValues[QuantizationTrait::FullyQuantizable]))));
return call_op.getOutput();
}
ValueRange CreateTFXlaCallModuleOp(OpBuilder& builder, const Location location,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
MLIRContext* ctx = builder.getContext();
SmallVector<Attribute> shape_attrs;
for (const Type result_type : output_types) {
shape_attrs.push_back(
tf_type::ShapeAttr::get(ctx, mlir::cast<ShapedType>(result_type)));
}
auto empty_array_attr = ArrayAttr::get(ctx, {});
auto platforms = ArrayAttr::get(ctx, {StringAttr::get(ctx, kPlatformCpu)});
auto call_op = builder.create<TF::XlaCallModuleOp>(
location,
output_types,
args,
kDefaultVersion, "",
ArrayAttr::get(ctx, shape_attrs),
empty_array_attr,
platforms,
empty_array_attr,
false,
empty_array_attr);
call_op->setAttr(TF::kStablehloEntryFunctionAttrName,
FlatSymbolRefAttr::get(builder.getStringAttr(func_name)));
call_op->setAttr(kOriginalStablehloEntryFunctionAttrName,
builder.getStringAttr(func_name));
call_op->setAttr(
kQuantTraitAttrName,
builder.getStringAttr(StringRef(
std::string(QuantTraitValues[QuantizationTrait::FullyQuantizable]))));
call_op->setAttr(kStablehloModuleAttrsAttrName,
builder.getDictionaryAttr(builder.getNamedAttr(
kUsesShapePolymorphismAttr, builder.getBoolAttr(true))));
return call_op.getOutput();
}
ValueRange CreateFunctionCallOp(OpBuilder& builder, const Location location,
const FunctionCallOpType call_op_type,
const StringRef func_name,
const TypeRange output_types,
const ValueRange args) {
switch (call_op_type) {
case FunctionCallOpType::TFXlaCallModuleOp:
return CreateTFXlaCallModuleOp(builder, location, func_name, output_types,
args);
case FunctionCallOpType::TFPartitionedCallOp:
return CreateTFPartitionedCallOp(builder, location, func_name,
output_types, args);
}
}
SmallVector<Operation*> FindOpsFromArgumentsToResults(
const ArrayRef<Value> arguments, const ArrayRef<Value> results) {
std::queue<Value> value_queue;
for (Value result : results) {
value_queue.push(result);
}
absl::flat_hash_set<mlir::detail::ValueImpl*> argument_set;
for (Value argument : arguments) {
argument_set.insert(argument.getImpl());
}
std::stack<Operation*> op_stack;
while (!value_queue.empty()) {
Value current_value = value_queue.front();
value_queue.pop();
Operation* defining_node = current_value.getDefiningOp();
if (defining_node == nullptr) continue;
op_stack.push(defining_node);
for (Value arg : defining_node->getOperands()) {
if (!argument_set.contains(arg.getImpl())) {
value_queue.push(arg);
}
}
}
SmallVector<Operation*> sorted_ops;
absl::flat_hash_set<Operation*> unique_ops;
while (!op_stack.empty()) {
Operation* current_op = op_stack.top();
op_stack.pop();
if (unique_ops.contains(current_op)) continue;
sorted_ops.push_back(current_op);
unique_ops.insert(current_op);
}
return sorted_ops;
}
LogicalResult SetAttributeMap(MLIRContext& context,
const ArrayRef<NamedAttribute> attributes,
const ArrayRef<Operation*> ops) {
llvm::SmallDenseMap<NamedAttribute, Operation*> attr_to_op_map;
for (Operation* op : ops) {
for (const NamedAttribute named_attr : op->getAttrs()) {
attr_to_op_map.insert({named_attr, op});
}
}
for (int idx : llvm::seq<int>(0, attributes.size())) {
const NamedAttribute& attribute = attributes[idx];
if (const auto string_attr =
mlir::dyn_cast_or_null<StringAttr>(attribute.getValue());
string_attr != nullptr &&
string_attr.getValue() == kNullAttributeValue) {
continue;
}
if (std::find_if(
attr_to_op_map.begin(), attr_to_op_map.end(), [&](auto attr_op) {
return std::get<0>(attr_op).getName() == attribute.getName();
}) == attr_to_op_map.end()) {
emitError(UnknownLoc::get(&context),
"Could not find attribute: " + attribute.getName().str());
return failure();
}
Operation* owner_op;
for (const auto& [attr, val] : attr_to_op_map) {
if (attr.getName() == attribute.getName()) owner_op = val;
}
if (stablehlo::IsStablehloOp(owner_op)) {
owner_op->setAttr(StringRef(attribute.getName()), attribute.getValue());
} else {
owner_op = attr_to_op_map[attribute];
std::string new_attr_map_str{};
if (owner_op->hasAttr(kAttrMapAttribute)) {
new_attr_map_str =
owner_op->getAttrOfType<StringAttr>(kAttrMapAttribute).str();
absl::StrAppend(&new_attr_map_str, ",");
}
const std::string identifier = std::to_string(idx);
const StringAttr attribute_name = attribute.getName();
absl::StrAppend(&new_attr_map_str, identifier, ":", attribute_name.str());
owner_op->setAttr(kAttrMapAttribute,
StringAttr::get(&context, new_attr_map_str));
}
}
return success();
}
SmallVector<Value, 4> LiftAsFunctionCall(
OpBuilder& builder, const Location location,
const FunctionCallOpType call_op_type, const StringRef func_name,
const ArrayRef<Value> arguments, const ArrayRef<Value> results,
const ArrayRef<NamedAttribute> attributes) {
MLIRContext* context = builder.getContext();
if (results.empty()) {
emitError(UnknownLoc::get(context), "No result values specified");
return {};
}
Operation* result_op = results[0].getDefiningOp();
auto module = result_op->getParentOfType<ModuleOp>();
auto current_func = result_op->getParentOfType<func::FuncOp>();
auto guard = OpBuilder::InsertionGuard(builder);
builder.setInsertionPointAfter(current_func);
TypeRange arg_types{ValueRange{arguments}};
TypeRange result_types{ValueRange{results}};
auto func_type = FunctionType::get(context, arg_types, result_types);
SmallVector<Location> arg_locs;
for (Value arg : arguments) {
arg_locs.push_back(arg.getLoc());
}
auto wrap_func = builder.create<func::FuncOp>(location, func_name, func_type);
wrap_func.setVisibility(SymbolTable::Visibility::Private);
if (call_op_type == FunctionCallOpType::TFXlaCallModuleOp) {
wrap_func->setAttr(TF::kFromXlaCallModuleAttrName, builder.getUnitAttr());
}
wrap_func->setAttr(kFusedFunctionAttr, builder.getUnitAttr());
builder.createBlock(&wrap_func.getBody(), wrap_func.begin(), arg_types,
arg_locs);
IRMapping mapping;
for (int32_t i : llvm::seq<int32_t>(0, arguments.size())) {
mapping.map(arguments[i], wrap_func.getArgument(i));
}
auto cloning_ops = FindOpsFromArgumentsToResults(arguments, results);
Location call_op_loc = location;
for (Operation* op : cloning_ops) {
std::optional<QuantizationUnitLoc::QuantizationUnit> unit =
FindQuantizationUnitFromLoc(op->getLoc());
if (unit.has_value()) {
call_op_loc = QuantizationUnitLoc(builder.getContext(), unit.value());
}
}
if (failed(SetAttributeMap(*context, attributes, cloning_ops))) {
current_func.emitError() << "Some attributes couldn't be found.";
}
for (Operation* op : cloning_ops) {
builder.clone(*op, mapping);
}
SmallVector<Value> return_values;
for (Value result : results) {
return_values.push_back(mapping.lookupOrNull(result));
}
builder.create<func::ReturnOp>(location, return_values);
StringAttr new_func_name =
InsertToSymbolTable(*module, *wrap_func, func_name);
builder.setInsertionPointAfter(result_op);
ValueRange new_results =
CreateFunctionCallOp(builder, call_op_loc, call_op_type,
new_func_name.getValue(), result_types, arguments);
return SmallVector<Value, 4>(new_results.begin(), new_results.end());
}
SmallVector<Value, 4> LiftAsFunctionCall(OpBuilder& builder,
const Location location,
const FunctionCallOpType call_op_type,
const StringRef func_name,
const ArrayRef<Value> arguments,
const ArrayRef<Value> results) {
SmallVector<NamedAttribute> attributes;
return LiftAsFunctionCall(builder, location, call_op_type, func_name,
arguments, results, attributes);
}
SmallVector<Value> AppendToVector(const ArrayRef<Value> arguments,
Value append) {
SmallVector<Value> ret(arguments);
ret.push_back(append);
return ret;
}
bool IsEinsumSupportedByXlaDotV2(StringAttr equation_attr) {
StringRef equation = equation_attr.getValue();
if (!absl::StrContains(equation, "->") || !absl::StrContains(equation, ",") ||
absl::StrContains(equation, ".")) {
return false;
}
int idx_arrow = equation.find("->");
StringRef calc_eq = equation.substr(0, idx_arrow);
StringRef out_eq = equation.substr(idx_arrow + 2);
int idx_comma = calc_eq.find(',');
StringRef lhs_eq = calc_eq.substr(0, idx_comma);
StringRef rhs_eq = calc_eq.substr(idx_comma + 1);
if (absl::StrContains(rhs_eq, ",")) return false;
int lhs_out_idx_start = out_eq.size();
int lhs_out_idx_end = -1;
int rhs_out_idx_start = out_eq.size();
int rhs_out_idx_end = -1;
int lhs_batch_dim_size = 0;
int rhs_batch_dim_size = 0;
for (const char c : lhs_eq) {
if (absl::StrContains(out_eq, c) && absl::StrContains(rhs_eq, c)) {
lhs_batch_dim_size++;
} else if (absl::StrContains(out_eq, c)) {
const int out_idx = out_eq.find(c);
if (out_idx < lhs_out_idx_end) {
return false;
}
lhs_out_idx_start = std::min(lhs_out_idx_start, out_idx);
lhs_out_idx_end = std::max(lhs_out_idx_end, out_idx);
}
}
for (const char c : rhs_eq) {
if (absl::StrContains(out_eq, c) && absl::StrContains(lhs_eq, c)) {
rhs_batch_dim_size++;
} else if (absl::StrContains(out_eq, c)) {
int out_idx = out_eq.find(c);
if (out_idx < rhs_out_idx_end) {
return false;
}
if (out_idx < rhs_out_idx_start) rhs_out_idx_start = out_idx;
if (out_idx > rhs_out_idx_end) rhs_out_idx_end = out_idx;
}
}
if (lhs_batch_dim_size != rhs_batch_dim_size && lhs_batch_dim_size != 0 &&
rhs_batch_dim_size != 0) {
return false;
}
if (lhs_out_idx_end > rhs_out_idx_start) return false;
int batch_dim_size = std::max(rhs_batch_dim_size, lhs_batch_dim_size);
return lhs_out_idx_start >= batch_dim_size &&
rhs_out_idx_start >= batch_dim_size;
}
absl::StatusOr<Method> GetQuantizationMethod(absl::Nonnull<Operation*> op) {
const auto quantization_method_attr =
op->getAttrOfType<StringAttr>(kQuantizationMethodAttr);
if (!quantization_method_attr) {
return absl::InvalidArgumentError(absl::StrCat(
"Attribute ", kQuantizationMethodAttr.str(), " is not found."));
}
Method quantization_method;
const std::string method_txtpb = quantization_method_attr.getValue().str();
if (!TextFormat::ParseFromString(method_txtpb, &quantization_method)) {
return absl::InternalError(
absl::StrCat("Failed to parse Method from textproto: ", method_txtpb));
}
return quantization_method;
}
Method GetQuantizationMethodOrDefault(absl::Nonnull<Operation*> op) {
absl::StatusOr<Method> method = GetQuantizationMethod(op);
if (method.status().code() == absl::StatusCode::kInternal) {
op->emitError(absl::StrCat("Failed to get quantization method: ",
method.status().ToString()));
}
return method.ok() ? *method : Method::default_instance();
}
bool HasWeightOnlyPtqMethod(TF::XlaCallModuleOp xla_call_module_op) {
Method method = GetQuantizationMethodOrDefault(xla_call_module_op);
return method.has_weight_only_ptq();
}
bool IsWeightOnlyQuantizableOp(const Operation& op) {
if (auto call_op = dyn_cast<TF::XlaCallModuleOp>(op)) {
StringRef entry_function_name = GetEntryFunctionName(call_op);
absl::StatusOr<Method> quantization_method = GetQuantizationMethod(call_op);
return ContainsConvOrDot(entry_function_name) && quantization_method.ok() &&
quantization_method->has_weight_only_ptq();
}
return false;
}
SmallVector<func::FuncOp> GetSortedFunctions(ModuleOp module_op) {
auto iterator_range = module_op.getOps<func::FuncOp>();
SmallVector<func::FuncOp> func_ops(iterator_range.begin(),
iterator_range.end());
absl::c_sort(func_ops, [](func::FuncOp op1, func::FuncOp op2) {
return op1.getName() < op2.getName();
});
return func_ops;
}
} | #include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status_matchers.h"
namespace mlir::quant {
namespace {
using ::stablehlo::quantization::Method;
using ::testing::HasSubstr;
using ::testing::NotNull;
using ::testing::SizeIs;
using ::testing::StrEq;
using ::tsl::protobuf::util::MessageDifferencer;
using ::tsl::testing::IsOk;
using ::tsl::testing::StatusIs;
using LiftAsFunctionCallTest = QuantizationTestBase;
constexpr absl::string_view kModuleLifted = R"mlir(
module {
func.func private @composite_dot_general_fn_1(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [DEFAULT, DEFAULT] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, LiftedFunctionSucceeds) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleLifted);
ASSERT_TRUE(module_op);
auto composite_dot_general_fn =
module_op->lookupSymbol<func::FuncOp>("composite_dot_general_fn_1");
ASSERT_THAT(composite_dot_general_fn, NotNull());
auto dot_general_op = FindOperationOfType<mlir::stablehlo::DotGeneralOp>(
composite_dot_general_fn);
EXPECT_TRUE(IsInLiftedFunc(dot_general_op));
}
constexpr absl::string_view kModuleStableHlo = R"mlir(
module {
func.func @main(%arg0: tensor<1x1024xf32>, %arg1: tensor<1024x3xf32>) -> tensor<1x3xf32> attributes {_from_xla_call_module} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0], precision = [] : (tensor<1x1024xf32>, tensor<1024x3xf32>) -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, FunctionLiftedAsXlaCallModuleOp) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStableHlo);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(main_fn);
const SmallVector<NamedAttribute>& attributes = {
builder_.getNamedAttr(
"precision_config",
builder_.getArrayAttr(SmallVector<Attribute>(
1, mlir::stablehlo::PrecisionAttr::get(
ctx_.get(), mlir::stablehlo::Precision::DEFAULT)))),
};
const SmallVector<Value> operands(dot_general_op->getOperands());
const SmallVector<Value> results(dot_general_op->getResults());
Operation* lifted_op =
LiftAsFunctionCall(builder_, dot_general_op->getLoc(),
FunctionCallOpType::TFXlaCallModuleOp,
"composite_dot_general_fn", operands, results,
attributes)[0]
.getDefiningOp();
const auto entry_function_symbol_ref =
lifted_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
SymbolTable symbol_table(*module_op);
auto entry_func = dyn_cast_or_null<func::FuncOp>(
symbol_table.lookup(entry_function_symbol_ref.getValue()));
auto lifted_dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(entry_func);
EXPECT_TRUE(isa<TF::XlaCallModuleOp>(lifted_op));
EXPECT_EQ(
mlir::cast<StringAttr>(lifted_op->getAttr("_original_entry_function")),
"composite_dot_general_fn_1");
EXPECT_EQ(
mlir::cast<ArrayAttr>(lifted_dot_general_op->getAttr("precision_config")),
builder_.getArrayAttr(SmallVector<Attribute>(
1, mlir::stablehlo::PrecisionAttr::get(
ctx_.get(), mlir::stablehlo::Precision::DEFAULT))));
}
TEST_F(LiftAsFunctionCallTest, FunctionNoAttrLiftedAsXlaCallModuleOp) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleStableHlo);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto dot_general_op =
FindOperationOfType<mlir::stablehlo::DotGeneralOp>(main_fn);
const SmallVector<Value> operands(dot_general_op->getOperands());
const SmallVector<Value> results(dot_general_op->getResults());
Operation* lifted_op =
LiftAsFunctionCall(builder_, dot_general_op->getLoc(),
FunctionCallOpType::TFXlaCallModuleOp,
"composite_dot_general_fn", operands, results)[0]
.getDefiningOp();
EXPECT_TRUE(isa<TF::XlaCallModuleOp>(lifted_op));
EXPECT_EQ(
mlir::cast<StringAttr>(lifted_op->getAttr("_original_entry_function")),
"composite_dot_general_fn_1");
}
TEST_F(LiftAsFunctionCallTest, EinsumSupportedForXlaDotV2Succeeds) {
StringAttr einsum_supported_by_xla_dot_v2_attr =
builder_.getStringAttr("ijk,ikm->ijm");
StringAttr einsum_one_operand = builder_.getStringAttr("ijk->ikj");
StringAttr einsum_ellipsis = builder_.getStringAttr("...gse->...gs");
EXPECT_TRUE(IsEinsumSupportedByXlaDotV2(einsum_supported_by_xla_dot_v2_attr));
EXPECT_FALSE(IsEinsumSupportedByXlaDotV2(einsum_one_operand));
EXPECT_FALSE(IsEinsumSupportedByXlaDotV2(einsum_ellipsis));
}
TEST_F(LiftAsFunctionCallTest, GetQuantizationMethodSucceeds) {
constexpr absl::string_view kXlaCallModuleOpWithQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _quantization_method = "no_quantization {}", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
ASSERT_THAT(method, IsOk());
EXPECT_TRUE(method->has_no_quantization());
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodFailsWhenNoQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithNoQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithNoQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
EXPECT_THAT(
method,
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Attribute _quantization_method is not found")));
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodFailsWhenMalformedQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithNoQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _quantization_method = "invalid_field: 123", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithNoQuantizationMethodAttr);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto xla_call_module_ops = main_fn.getOps<TF::XlaCallModuleOp>();
ASSERT_FALSE(xla_call_module_ops.empty());
const absl::StatusOr<Method> method =
GetQuantizationMethod(*xla_call_module_ops.begin());
EXPECT_THAT(method,
StatusIs(absl::StatusCode::kInternal,
HasSubstr("Failed to parse Method from textproto")));
}
constexpr absl::string_view kFunctionWithRegion =
R"mlir(
func.func @main(%arg0: tensor<i1>, %arg1: tensor<f32>, %arg2: tensor<f32>) -> tensor<f32> {
%if = "stablehlo.if"(%arg0) ({
%0 = stablehlo.add %arg1, %arg1 : tensor<f32>
stablehlo.return %0 : tensor<f32>
}, {
%1 = stablehlo.add %arg2, %arg2 : tensor<f32>
stablehlo.return %1 : tensor<f32>
}) : (tensor<i1>) -> (tensor<f32>)
%subtract = stablehlo.subtract %if, %if : tensor<f32>
return %subtract : tensor<f32>
}
)mlir";
TEST_F(LiftAsFunctionCallTest, IsInRegionSucceedsWhenOpInsideRegion) {
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kFunctionWithRegion);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto if_op = FindOperationOfType<mlir::stablehlo::IfOp>(main_fn);
Block& block = if_op->getRegion(0).front();
Operation& add_op = *absl::c_find_if(block, [](Operation& entry) {
return dyn_cast_or_null<::mlir::stablehlo::AddOp>(&entry);
});
EXPECT_TRUE(IsInStableHloOpRegion(&add_op));
}
TEST_F(LiftAsFunctionCallTest, IsInRegionFailsWhenOpNotInsideRegion) {
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kFunctionWithRegion);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto subtract_op = FindOperationOfType<mlir::stablehlo::SubtractOp>(main_fn);
EXPECT_FALSE(IsInStableHloOpRegion(subtract_op));
}
TEST_F(LiftAsFunctionCallTest,
GetQuantizationMethodOrDefaultReturnsCorrectMethod) {
constexpr absl::string_view kXlaCallModuleOpWithQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_dot_general_fn_1,
_quantization_method = "no_quantization { }",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithQuantizationMethodAttr);
ASSERT_TRUE(module_op);
FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const Method method = GetQuantizationMethodOrDefault(*xla_call_module_op);
EXPECT_TRUE(method.has_no_quantization());
}
TEST_F(
LiftAsFunctionCallTest,
GetQuantizationMethodOrDefaultReturnsDefaultWhenNoQuantizationMethodAttr) {
constexpr absl::string_view kXlaCallModuleOpWithoutQuantizationMethodAttr =
R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_dot_general_fn_1,
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithoutQuantizationMethodAttr);
ASSERT_TRUE(module_op);
FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const Method method = GetQuantizationMethodOrDefault(*xla_call_module_op);
EXPECT_TRUE(MessageDifferencer::Equals(method, Method::default_instance()));
}
constexpr absl::string_view kModuleDotWeightOnlyPtq = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodExists) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_TRUE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodDifferentMethod) {
const absl::string_view kModuleDotNoQuantization = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "no_quantization { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotNoQuantization);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodNoMethod) {
const absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(HasWeightOnlyPtqMethod(call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpDot) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDotWeightOnlyPtq);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_TRUE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpNotTfXlaCallModuleOp) {
const absl::string_view kModulePartitionedCallDot = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.PartitionedCall"(%arg0, %1, %0) {_tfl_quant_trait = "fully_quantizable", config = "", config_proto = "", executor_type = "", f = @composite_dot_general_fn_1, _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_dot_general_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
%0 = stablehlo.dot_general %arg0, %arg1, contracting_dims = [1] x [0] : (tensor<?x2xf32>, tensor<2x2xf32>) -> tensor<?x2xf32>
return %0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModulePartitionedCallDot);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::PartitionedCallOp>().begin();
EXPECT_FALSE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, IsWeightOnlyQuantizableOpNoConvNoDot) {
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @main(%arg0: tensor<?x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> (tensor<?x2xf32>) {
%0 = stablehlo.constant dense<[-0.211145893, -0.708605706]> : tensor<2xf32>
%1 = stablehlo.constant dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>
%2 = "tf.XlaCallModule"(%arg0, %1, %0) <{Sout = [#tf_type.shape<?x2>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable", _quantization_method = "weight_only_ptq { }"} : (tensor<?x2xf32>, tensor<2x2xf32>, tensor<2xf32>) -> tensor<?x2xf32>
return %2 : tensor<?x2xf32>
}
func.func private @composite_fn_1(%arg0: tensor<?x2xf32>, %arg1: tensor<2x2xf32>, %arg2: tensor<2xf32>) -> tensor<?x2xf32> attributes {_from_xla_call_module, tf_quant.composite_function} {
return %arg0 : tensor<?x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
EXPECT_FALSE(IsWeightOnlyQuantizableOp(*call_op));
}
TEST_F(LiftAsFunctionCallTest, GetSortedFunctions) {
constexpr absl::string_view kModuleXlaCallModule = R"mlir(
module {
func.func @conv_3_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
func.func @conv_1_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
func.func @conv_2_fn(%arg0: tensor<1x3x3x4xf32>) -> tensor<1x3x3x4xf32> {
%0 = stablehlo.constant dense<2.000000e+00> : tensor<3x3x4x4xf32>
%1 = stablehlo.convolution(%arg0, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
%2 = stablehlo.convolution(%1, %0) dim_numbers = [b, 0, 1, f]x[0, 1, i, o]->[b, 0, 1, f], window = {pad = [[1, 1], [1, 1]]} {batch_group_count = 1 : i64, feature_group_count = 1 : i64} : (tensor<1x3x3x4xf32>, tensor<3x3x4x4xf32>) -> tensor<1x3x3x4xf32>
func.return %2: tensor<1x3x3x4xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleXlaCallModule);
ASSERT_TRUE(module_op);
SmallVector<func::FuncOp> funcs = GetSortedFunctions(*module_op);
ASSERT_THAT(funcs, SizeIs(3));
EXPECT_THAT(funcs[0].getSymName(), StrEq("conv_1_fn"));
EXPECT_THAT(funcs[1].getSymName(), StrEq("conv_2_fn"));
EXPECT_THAT(funcs[2].getSymName(), StrEq("conv_3_fn"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71217517-bd22-4e54-9891-4344e5585a1c | cpp | tensorflow/tensorflow | uniform_quantized_types | tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc | tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc | #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#define DEBUG_TYPE "uniform-quantized-types"
namespace mlir {
namespace quant {
UniformQuantizedType CreateI8F32UniformQuantizedType(const Location loc,
MLIRContext& context,
const double scale,
const int64_t zero_point,
const bool narrow_range) {
return UniformQuantizedType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 8),
FloatType::getF32(&context), scale, zero_point,
llvm::minIntN(8) + (narrow_range ? 1 : 0),
llvm::maxIntN(8));
}
UniformQuantizedType CreateI32F32UniformQuantizedType(
const Location loc, MLIRContext& context, const double scale,
const int64_t zero_point) {
return UniformQuantizedType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 32),
FloatType::getF32(&context), scale, zero_point,
llvm::minIntN(32),
llvm::maxIntN(32));
}
UniformQuantizedPerAxisType CreateI8F32UniformQuantizedPerAxisType(
const Location loc, MLIRContext& context, const ArrayRef<double> scales,
const ArrayRef<int64_t> zero_points, const int quantization_dimension,
const bool narrow_range) {
return UniformQuantizedPerAxisType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 8),
FloatType::getF32(&context),
SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
quantization_dimension,
llvm::minIntN(8) + (narrow_range ? 1 : 0),
llvm::maxIntN(8));
}
UniformQuantizedPerAxisType CreateI32F32UniformQuantizedPerAxisType(
const Location loc, MLIRContext& context, const ArrayRef<double> scales,
const ArrayRef<int64_t> zero_points, const int quantization_dimension) {
return UniformQuantizedPerAxisType::getChecked(
loc, QuantizationFlags::Signed,
IntegerType::get(&context, 32),
FloatType::getF32(&context),
SmallVector<double>(scales), SmallVector<int64_t>(zero_points),
quantization_dimension, llvm::minIntN(32),
llvm::maxIntN(32));
}
bool IsStorageTypeI8(const QuantizedType quantized_type) {
const Type storage_type = quantized_type.getStorageType();
return storage_type.isInteger(8);
}
bool IsStorageTypeI32(const QuantizedType quantized_type) {
const Type storage_type = quantized_type.getStorageType();
return storage_type.isInteger(32);
}
bool IsExpressedTypeF32(const QuantizedType quantized_type) {
const Type expressed_type = quantized_type.getExpressedType();
return mlir::isa<Float32Type>(expressed_type);
}
bool IsI8F32UniformQuantizedType(const Type type) {
const UniformQuantizedType quantized_type =
mlir::dyn_cast_or_null<UniformQuantizedType>(type);
if (!quantized_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI8(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: "
<< quantized_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_type << ".\n");
return false;
}
return true;
}
bool IsI8F32UniformQuantizedPerAxisType(const Type type) {
const UniformQuantizedPerAxisType quantized_per_axis_type =
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type);
if (!quantized_per_axis_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI8(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i8 storage type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
return true;
}
bool IsI32F32UniformQuantizedType(const Type type) {
const UniformQuantizedType quantized_type =
mlir::dyn_cast_or_null<UniformQuantizedType>(type);
if (!quantized_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: "
<< quantized_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_type << ".\n");
return false;
}
return true;
}
bool IsI32F32UniformQuantizedPerAxisType(const Type type) {
const UniformQuantizedPerAxisType quantized_per_axis_type =
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(type);
if (!quantized_per_axis_type) {
LLVM_DEBUG(llvm::dbgs()
<< "Expected a uniform quantized type. Got: " << type << ".\n");
return false;
}
if (!IsStorageTypeI32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an i32 storage type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
if (!IsExpressedTypeF32(quantized_per_axis_type)) {
LLVM_DEBUG(llvm::dbgs() << "Expected an f32 expressed type. Got: "
<< quantized_per_axis_type << ".\n");
return false;
}
return true;
}
bool IsSupportedByTfliteQuantizeOrDequantizeOps(IntegerType storage_type) {
if (storage_type.getWidth() == 8 ||
(storage_type.isSigned() && storage_type.getWidth() == 16)) {
return true;
}
LLVM_DEBUG(llvm::dbgs()
<< "Uniform quantize / dequantize op only supports ui8, i8 or "
"i16 for the storage type of uniform quantized type. Got: "
<< storage_type << ".\n");
return false;
}
bool IsQuantizedTensorType(Type type) {
if (!mlir::isa<TensorType>(type)) {
return false;
}
Type element_type = mlir::cast<TensorType>(type).getElementType();
return mlir::isa<QuantizedType>(element_type);
}
bool IsOpFullyQuantized(Operation* op) {
return llvm::all_of(op->getOperandTypes(), IsQuantizedTensorType) &&
llvm::all_of(op->getResultTypes(), IsQuantizedTensorType);
}
bool IsOpNotQuantized(Operation* op) {
return !llvm::any_of(op->getOperandTypes(), IsQuantizedTensorType) &&
!llvm::any_of(op->getResultTypes(), IsQuantizedTensorType);
}
}
} | #include "tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::ElementsAreArray;
using ::testing::IsNull;
using ::testing::Ne;
using ::testing::NotNull;
using ::testing::Test;
class CreateI8F32UniformQuantizedTypeTest : public Test {
protected:
CreateI8F32UniformQuantizedTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI8F32UniformQuantizedTypeTest, I8StorageTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxEqualToI8MinMax) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, StorageTypeMinMaxNarrowRange) {
const UniformQuantizedType quantized_type = CreateI8F32UniformQuantizedType(
UnknownLoc::get(&ctx_), ctx_,
1.0, 0, true);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {
const UniformQuantizedType quantized_type =
CreateI8F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
8.0, 99);
EXPECT_EQ(quantized_type.getScale(), 8.0);
EXPECT_EQ(quantized_type.getZeroPoint(), 99);
}
class CreateI32F32UniformQuantizedTypeTest : public Test {
protected:
CreateI32F32UniformQuantizedTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI32F32UniformQuantizedTypeTest, I32StorageTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, SignedQuantizedTypeSucceeds) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest,
StorageTypeMinMaxEqualToI32MinMax) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
1.0, 0);
EXPECT_EQ(quantized_type.getStorageTypeMin(),
std::numeric_limits<int32_t>::min());
EXPECT_EQ(quantized_type.getStorageTypeMax(),
std::numeric_limits<int32_t>::max());
}
TEST_F(CreateI32F32UniformQuantizedTypeTest, HasScaleAndZeroPointProperlySet) {
const UniformQuantizedType quantized_type =
CreateI32F32UniformQuantizedType(UnknownLoc::get(&ctx_), ctx_,
8.0, 1111);
EXPECT_EQ(quantized_type.getScale(), 8.0);
EXPECT_EQ(quantized_type.getZeroPoint(), 1111);
}
class CreateI8F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
CreateI8F32UniformQuantizedPerAxisTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, I8StorageTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(8));
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
SignedQuantizedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.isSigned());
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxEqualToI8MinMax) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -128);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxNarrowRange) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0, true);
EXPECT_EQ(quantized_type.getStorageTypeMin(), -127);
EXPECT_EQ(quantized_type.getStorageTypeMax(), 127);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
HasQuantizationDimensionProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
3);
EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);
}
TEST_F(CreateI8F32UniformQuantizedPerAxisTypeTest,
HasScaleAndZeroPointProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI8F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{8.0, 9.0},
SmallVector<int64_t, 2>{98, 99},
0);
EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));
EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));
}
class CreateI32F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
CreateI32F32UniformQuantizedPerAxisTypeTest() : ctx_() {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
};
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, I32StorageTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getStorageType().isSignlessInteger(32));
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest, F32ExpressedTypeSucceeds) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_TRUE(quantized_type.getExpressedType().isF32());
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
StorageTypeMinMaxEqualToI32MinMax) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
0);
EXPECT_EQ(quantized_type.getStorageTypeMin(),
std::numeric_limits<int32_t>::min());
EXPECT_EQ(quantized_type.getStorageTypeMax(),
std::numeric_limits<int32_t>::max());
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
HasQuantizationDimensionProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{1.0, 1.0},
SmallVector<int64_t, 2>{0, 0},
3);
EXPECT_EQ(quantized_type.getQuantizedDimension(), 3);
}
TEST_F(CreateI32F32UniformQuantizedPerAxisTypeTest,
HasScaleAndZeroPointProperlySet) {
const UniformQuantizedPerAxisType quantized_type =
CreateI32F32UniformQuantizedPerAxisType(
UnknownLoc::get(&ctx_), ctx_,
SmallVector<double, 2>{8.0, 9.0},
SmallVector<int64_t, 2>{98, 99},
0);
EXPECT_THAT(quantized_type.getScales(), ElementsAreArray({8.0, 9.0}));
EXPECT_THAT(quantized_type.getZeroPoints(), ElementsAreArray({98, 99}));
}
class IsI8F32UniformQuantizedTypeTest : public Test {
protected:
IsI8F32UniformQuantizedTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI8F32UniformQuantizedTypeTest, I8F32UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsI8F32UniformQuantizedType(qi8_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi8_type),
NotNull());
}
TEST_F(IsI8F32UniformQuantizedTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsStorageTypeI8(qi8_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(), 1.0,
0, -128, 127);
EXPECT_TRUE(IsExpressedTypeF32(qi8_type));
}
class IsI8F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
IsI8F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest,
I8F32UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsI8F32UniformQuantizedPerAxisType(qi8_per_axis_type));
EXPECT_FALSE(IsI8F32UniformQuantizedType(qi8_per_axis_type));
}
TEST_F(IsI8F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_THAT(
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_per_axis_type),
NotNull());
}
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsStorageTypeI8(qi8_per_axis_type));
}
TEST_F(IsI8F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedPerAxisType qi8_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
{1.0},
{0}, 0, -128,
127);
EXPECT_TRUE(IsExpressedTypeF32(qi8_per_axis_type));
}
class IsI32F32UniformQuantizedTypeTest : public Test {
protected:
IsI32F32UniformQuantizedTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI32F32UniformQuantizedTypeTest, I32F32UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
}
TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedTypeSucceeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedType>(qi32_type),
NotNull());
}
TEST_F(IsI32F32UniformQuantizedTypeTest, StorageTypeI32Succeeds) {
const UniformQuantizedType qi32_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedType(qi32_type));
EXPECT_TRUE(IsStorageTypeI32(qi32_type));
}
TEST_F(IsI32F32UniformQuantizedTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedType qi32_per_axis_type =
quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
1.0,
0, -2147483647,
2147483646);
EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));
}
class IsI32F32UniformQuantizedPerAxisTypeTest : public Test {
protected:
IsI32F32UniformQuantizedPerAxisTypeTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,
I32F32UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsI32F32UniformQuantizedPerAxisType(qi32_per_axis_type));
EXPECT_FALSE(IsI32F32UniformQuantizedType(qi32_per_axis_type));
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest,
I8F32UniformQuantizedTypeFails) {
const UniformQuantizedType qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0, 0, -128,
127);
EXPECT_FALSE(IsI32F32UniformQuantizedPerAxisType(qi8_type));
EXPECT_FALSE(IsStorageTypeI32(qi8_type));
EXPECT_THAT(mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi8_type),
IsNull());
}
TEST_F(IsI32F32UniformQuantizedTypeTest, UniformQuantizedPerAxisTypeSucceeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_THAT(
mlir::dyn_cast_or_null<UniformQuantizedPerAxisType>(qi32_per_axis_type),
NotNull());
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, StorageTypeI8Succeeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsStorageTypeI32(qi32_per_axis_type));
}
TEST_F(IsI32F32UniformQuantizedPerAxisTypeTest, ExpressedTypeF32Succeeds) {
const UniformQuantizedPerAxisType qi32_per_axis_type =
quant::UniformQuantizedPerAxisType::get(
QuantizationFlags::Signed, builder_.getI32Type(),
builder_.getF32Type(),
{1.0},
{0}, 0,
-2147483647, 2147483646);
EXPECT_TRUE(IsExpressedTypeF32(qi32_per_axis_type));
}
class IsSupportedByTfliteQuantizeOrDequantizeOpsTest : public Test {
protected:
IsSupportedByTfliteQuantizeOrDequantizeOpsTest() : builder_(&ctx_) {
ctx_.loadDialect<quant::QuantDialect>();
}
MLIRContext ctx_;
OpBuilder builder_;
};
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI8Succeeds) {
auto qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi8_type.getStorageType())));
}
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeI16Succeeds) {
auto qi16_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi16_type.getStorageType())));
}
TEST_F(IsSupportedByTfliteQuantizeOrDequantizeOpsTest, StorageTypeUI8Succeeds) {
auto qi8_type = quant::UniformQuantizedType::get(
QuantizationFlags::Signed, builder_.getI8Type(),
builder_.getF32Type(),
1.0,
0, -128, 127);
EXPECT_TRUE(IsSupportedByTfliteQuantizeOrDequantizeOps(
dyn_cast_or_null<IntegerType>(qi8_type.getStorageType())));
}
using IsOpFullyQuantizedTest = QuantizationTestBase;
TEST_F(IsOpFullyQuantizedTest, TrueIfOpFullyQuantized) {
constexpr absl::string_view kFullyQuantizedAdd = R"mlir(
func.func @fully_quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kFullyQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("fully_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_TRUE(IsOpFullyQuantized(*add_op_itr));
}
TEST_F(IsOpFullyQuantizedTest, FalseIfOpNotQuantized) {
constexpr absl::string_view kNotQuantizedAdd = R"mlir(
func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
return %0 : tensor<2xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_FALSE(IsOpFullyQuantized(*add_op_itr));
}
TEST_F(IsOpFullyQuantizedTest, FalseIfOpPartiallyQuantized) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op_itr =
func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
ASSERT_THAT(
uniform_quantize_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
EXPECT_FALSE(IsOpFullyQuantized(*uniform_quantize_op_itr));
}
using IsOpNotQuantizedTest = QuantizationTestBase;
TEST_F(IsOpNotQuantizedTest, TrueIfOpNotQuantized) {
constexpr absl::string_view kNotQuantizedAdd = R"mlir(
func.func @not_quantized_add(%arg0: tensor<2xf32>) -> tensor<2xf32> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2xf32>
return %0 : tensor<2xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kNotQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("not_quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_TRUE(IsOpNotQuantized(*add_op_itr));
}
TEST_F(IsOpNotQuantizedTest, FalseIfOpQuantized) {
constexpr absl::string_view kQuantizedAdd = R"mlir(
func.func @quantized_add(%arg0: tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.add %arg0, %arg0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizedAdd);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantized_add");
ASSERT_THAT(func_op, NotNull());
auto add_op_itr = func_op.getBody().op_begin<mlir::stablehlo::AddOp>();
ASSERT_THAT(add_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::AddOp>()));
EXPECT_FALSE(IsOpNotQuantized(*add_op_itr));
}
TEST_F(IsOpNotQuantizedTest, FalseIfOpPartiallyQuantized) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op_itr =
func_op.getBody().op_begin<mlir::stablehlo::UniformQuantizeOp>();
ASSERT_THAT(
uniform_quantize_op_itr,
Ne(func_op.getBody().op_end<mlir::stablehlo::UniformQuantizeOp>()));
EXPECT_FALSE(IsOpNotQuantized(*uniform_quantize_op_itr));
}
using UniformQuantizedTypeTest = QuantizationTestBase;
TEST_F(UniformQuantizedTypeTest, GetElementTypeSucceeds) {
constexpr absl::string_view kQuantizeOp = R"mlir(
func.func @quantize(%arg0: tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>> {
%0 = stablehlo.uniform_quantize %arg0 : (tensor<2xf32>) -> tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
return %0 : tensor<2x!quant.uniform<i8:f32, 1.000000e+00:0>>
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kQuantizeOp);
ASSERT_TRUE(module_op);
auto func_op = module_op->lookupSymbol<func::FuncOp>("quantize");
ASSERT_THAT(func_op, NotNull());
auto uniform_quantize_op =
*func_op.getOps<::mlir::stablehlo::UniformQuantizeOp>().begin();
Value result = uniform_quantize_op.getResult();
EXPECT_THAT(GetElementType(result), NotNull());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/uniform_quantized_types_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a941d6c3-ad8c-4445-baba-9b31938820a4 | cpp | tensorflow/tensorflow | quantization_driver | tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc | tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc | #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h"
#include <cmath>
#include <cstdint>
#include <iterator>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
namespace mlir {
namespace quant {
namespace {
constexpr int32_t kBiasMax = std::numeric_limits<int32_t>::max() / 2;
void InitializeStateForValue(
Operation* op, const int index, const Value value, const bool as_result,
std::vector<QuantState>& states,
DenseMap<Value, QuantizationDriver::QuantStateIndex>& value_to_state,
DenseMap<QuantizationDriver::OpWithOperandIndex,
QuantizationDriver::QuantStateIndex>& operand_states,
DenseMap<QuantizationDriver::OpWithResultIndex,
QuantizationDriver::QuantStateIndex>& result_states) {
const auto [cached, inserted] = value_to_state.try_emplace(value, 0);
if (!inserted) {
if (as_result) {
result_states[{op, index}] = cached->second;
} else {
operand_states[{op, index}] = cached->second;
}
return;
}
const QuantizedType quantized_type =
QuantizedType::getQuantizedElementType(value.getType());
const bool immutable = quantized_type != nullptr;
const QuantizationDriver::QuantStateIndex next_state_index = states.size();
states.push_back({quantized_type, immutable});
if (as_result) {
result_states[{op, index}] = next_state_index;
} else {
operand_states[{op, index}] = next_state_index;
}
cached->second = next_state_index;
}
bool HasPerAxisQuantizedOperand(Operation* op) {
for (int i = 0; i < op->getNumOperands(); ++i) {
if (auto dq_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
op->getOperand(i).getDefiningOp())) {
auto type =
mlir::cast<TensorType>(dq_op.getArg().getType()).getElementType();
if (auto per_axis_qtype =
mlir::dyn_cast_or_null<quant::UniformQuantizedPerAxisType>(
QuantizedType::getQuantizedElementType(type))) {
return true;
}
}
}
return false;
}
}
void QuantizationDriver::InitializeArgState(const BlockArgument arg,
const Value arg_value) {
const auto [cached, inserted] = value_to_state_.try_emplace(arg_value, 0);
if (!inserted) {
arg_states_[arg] = cached->second;
return;
}
const QuantizedType quantized_type =
QuantizedType::getQuantizedElementType(arg_value.getType());
const bool immutable = quantized_type != nullptr;
const QuantizationDriver::QuantStateIndex next_state_index = states_.size();
states_.push_back({quantized_type, immutable});
arg_states_[arg] = next_state_index;
cached->second = next_state_index;
}
void QuantizationDriver::InitializeOperandState(Operation* op, const int index,
const Value value) {
InitializeStateForValue(op, index, value, false, states_,
value_to_state_, operand_states_, result_states_);
}
void QuantizationDriver::InitializeResultState(Operation* op, const int index,
const Value value) {
InitializeStateForValue(op, index, value, true, states_,
value_to_state_, operand_states_, result_states_);
}
std::unique_ptr<OpQuantSpec> QuantizationDriver::GetQuantSpec(Operation* op) {
return op_quant_spec_getter_(op);
}
std::unique_ptr<OpQuantScaleSpec> QuantizationDriver::GetQuantScaleSpec(
Operation* op) {
return op_quant_scale_spec_getter_(op);
}
bool QuantizationDriver::IsQuantized(Operation* op) {
for (int i = 0; i < op->getNumResults(); ++i) {
if (GetResultQuantState(op, i).IsEmpty()) return false;
}
return true;
}
bool QuantizationDriver::SetConstantResultParams(Operation* op) {
DenseFPElementsAttr attr;
const Value result = op->getResult(0);
if (!matchPattern(result, m_Constant(&attr))) {
return false;
}
Type final_type;
const auto it = optimized_weights_.find(op);
const bool is_weight = it != optimized_weights_.end();
const bool is_weight_with_per_channel_support =
is_weight && it->second != -1 && is_signed_;
if (is_weight_with_per_channel_support && !disable_per_channel_) {
final_type = GetUniformQuantizedPerAxisTypeForWeight(
attr, it->second, true, 8, is_signed_,
true, legacy_float_scale_);
} else {
final_type = GetUniformQuantizedTypeForWeight(
attr, is_weight && is_signed_,
8, is_signed_,
is_weight, legacy_float_scale_);
}
if (const auto quant_type = mlir::dyn_cast_or_null<QuantizedType>(final_type);
quant_type != nullptr) {
return SetResultParams(op, 0, quant_type);
}
return false;
}
bool QuantizationDriver::SetResultParams(Operation* op, const int result_index,
const QuantizedType quantized_type) {
QuantState& state = GetResultQuantState(op, result_index);
if (state.params == quantized_type) {
return false;
}
if (!state.IsEmpty()) {
RequantizeStates& rescales = GetResultRequantizeStates(op, result_index);
RequantizeState& rescale = rescales.emplace_back();
rescale.pos = RequantizeState::ON_INPUT;
rescale.params = quantized_type;
return true;
}
state.params = quantized_type;
AddUserToList(op, result_index);
return true;
}
QuantizedType QuantizationDriver::GetBiasParams(
Operation* op, const int bias_index,
const ArrayRef<int> non_bias_operand_indices,
const AccumulatorScaleFunc func) {
QuantState& bias_state = GetOperandQuantState(op, bias_index);
if (!bias_state.IsEmpty()) {
return bias_state.params;
}
std::vector<QuantizedType> op_types{};
op_types.reserve(non_bias_operand_indices.size());
int adjusted_quant_dim = -1;
if (op->getNumOperands() > bias_index) {
Operation* bias_op = op->getOperand(bias_index).getDefiningOp();
if (bias_op != nullptr) {
Type bias_type = bias_op->getResult(0).getType();
if (bias_type != builder_.getNoneType()) {
const int bias_rank = mlir::dyn_cast<ShapedType>(bias_type).getRank();
adjusted_quant_dim = bias_rank > 1 ? bias_rank - 1 : 0;
}
}
}
for (const int non_bias_operand_index : non_bias_operand_indices) {
const QuantState& non_bias_state =
GetOperandQuantState(op, non_bias_operand_index);
op_types.push_back(non_bias_state.params);
}
return func(op_types, adjusted_quant_dim, legacy_float_scale_);
}
bool QuantizationDriver::SetOperandParams(Operation* op,
const int operand_index,
const QuantizedType quantized_type,
const bool override) {
QuantState& state = GetOperandQuantState(op, operand_index);
if (state.params == quantized_type) {
return false;
}
if (!state.IsEmpty() && !override) {
RequantizeStates& rescales = GetOperandRequantizeStates(op, operand_index);
for (RequantizeState& rescale : rescales) {
if (rescale.params == quantized_type) {
rescale.users.emplace_back(op, operand_index);
return true;
}
}
RequantizeState& rescale = rescales.emplace_back();
rescale.pos = RequantizeState::ON_OUTPUT;
rescale.params = quantized_type;
rescale.users.emplace_back(op, operand_index);
return true;
}
state.params = quantized_type;
AddOperandToList(op, operand_index);
return true;
}
void QuantizationDriver::QuantizeOpResult(Operation* op, const int result_index,
const QuantizedType quantized_type) {
builder_.setInsertionPointAfter(op);
const Value original_result = op->getResult(result_index);
QuantizeValue(original_result, quantized_type, op->getLoc());
}
void QuantizationDriver::QuantizeArg(BlockArgument arg,
const QuantizedType quantized_type) {
builder_.setInsertionPointToStart(arg.getOwner());
QuantizeValue(arg, quantized_type, builder_.getUnknownLoc());
}
void QuantizationDriver::QuantizeValue(Value value,
QuantizedType quantized_type,
const Location loc) {
const Type expressed_type = value.getType();
const Type new_value_type =
quantized_type.castFromExpressedType(expressed_type);
if (new_value_type == nullptr) return;
auto quantize =
builder_.create<quantfork::QuantizeCastOp>(loc, new_value_type, value);
auto dequantize = builder_.create<quantfork::DequantizeCastOp>(
loc, expressed_type, quantize.getResult());
quantize->setAttr(kVolatileOpAttrName, builder_.getUnitAttr());
value.replaceAllUsesWith(dequantize);
quantize.getOperation()->replaceUsesOfWith(dequantize, value);
}
void QuantizationDriver::RequantizeOpResult(Operation* op,
const int result_index,
RequantizeStates& states) {
if (states.empty()) return;
builder_.setInsertionPointAfter(op);
Value value = op->getResult(result_index);
RequantizeState::RequantizePosition pos = states.front().pos;
if (pos == RequantizeState::NO_REQUANTIZE) {
return;
}
for (const RequantizeState& state : states) {
if (state.pos != pos) {
return;
}
}
if (pos == RequantizeState::ON_OUTPUT) {
Operation* user = value.getUses().begin().getUser();
if (isa<quantfork::QuantizeCastOp>(user)) {
value = user->getResult(0);
builder_.setInsertionPointAfter(user);
}
}
RequantizeValue(value, states, op->getLoc());
}
void QuantizationDriver::RequantizeArg(const BlockArgument arg,
RequantizeStates& states) {
Value value = arg;
builder_.setInsertionPointToStart(arg.getOwner());
if (value.hasOneUse()) {
Operation* user = value.use_begin().getUser();
if (auto q = dyn_cast<quantfork::QuantizeCastOp>(user)) {
value = q.getResult();
builder_.setInsertionPoint(arg.getOwner(), ++Block::iterator(user));
}
}
RequantizeValue(value, states, builder_.getUnknownLoc());
}
void QuantizationDriver::RequantizeValue(Value value, RequantizeStates& states,
const Location loc) {
if (states.empty() || states.front().pos == RequantizeState::NO_REQUANTIZE) {
return;
}
if (states.front().pos == RequantizeState::ON_INPUT) {
RequantizeState& state = states.front();
const Type expressed_type = value.getType();
const Type new_type = state.params.castFromExpressedType(expressed_type);
if (!new_type) return;
auto requantize_op =
builder_.create<quantfork::QuantizeCastOp>(loc, new_type, value);
value.replaceAllUsesWith(requantize_op);
requantize_op.getOperation()->replaceUsesOfWith(requantize_op, value);
return;
}
if (!value.hasOneUse()) {
return;
}
auto dequant_op = dyn_cast_or_null<quantfork::DequantizeCastOp>(
value.use_begin().getUser());
if (!dequant_op) {
return;
}
const int num_uses = std::distance(dequant_op.getResult().use_begin(),
dequant_op.getResult().use_end());
bool clobber_first = num_uses <= states.size();
for (RequantizeState& state : states) {
Type expressed_type = QuantizedType::castToExpressedType(value.getType());
if (!expressed_type) continue;
const Type new_type = state.params.castFromExpressedType(expressed_type);
if (!new_type) continue;
auto requantize_op =
builder_.create<quantfork::QuantizeCastOp>(loc, new_type, value);
if (clobber_first) {
dequant_op.setOperand(requantize_op.getResult());
clobber_first = false;
} else {
auto new_dequant_op = builder_.create<quantfork::DequantizeCastOp>(
loc, dequant_op.getResult().getType(), requantize_op.getResult());
for (auto [op, operand_idx] : state.users) {
op->setOperand(operand_idx, new_dequant_op.getResult());
}
}
}
}
QuantizedType QuantizationDriver::GetQuantParamsForSameScaleConstraint(
Operation* op) {
std::vector<QuantState*> mutable_states, immutable_states;
for (int i = 0; i < op->getNumOperands(); ++i) {
QuantState& state = GetOperandQuantState(op, i);
if (state.immutable) {
immutable_states.push_back(&state);
} else if (!state.IsEmpty()) {
mutable_states.push_back(&state);
}
}
const int immutable_operands_num = immutable_states.size();
const int mutable_operands_num = mutable_states.size();
if (op->getNumOperands() == 1 && immutable_operands_num == 1) {
return immutable_states.front()->params;
}
for (int i = 0; i < op->getNumResults(); ++i) {
QuantState& state = GetResultQuantState(op, i);
if (state.immutable) {
immutable_states.push_back(&state);
} else if (!state.IsEmpty()) {
mutable_states.push_back(&state);
}
}
const int immutable_results_num =
immutable_states.size() - immutable_operands_num;
const int mutable_results_num = mutable_states.size() - mutable_operands_num;
if (op->getNumResults() == 1 && immutable_results_num == 1) {
return immutable_states.back()->params;
}
if (!immutable_states.empty()) return immutable_states.front()->params;
if (op->getNumOperands() == 1 && mutable_operands_num == 1) {
return mutable_states.front()->params;
}
if (op->getNumResults() == 1 && mutable_results_num == 1) {
return mutable_states.back()->params;
}
if (!mutable_states.empty()) return mutable_states.front()->params;
return {};
}
void QuantizationDriver::PreprocessConstantOps() {
fn_.walk([&](arith::ConstantOp cst) {
const auto type = mlir::dyn_cast<ShapedType>(cst.getType());
if (!type || !mlir::isa<FloatType>(type.getElementType())) return;
auto float_attr = mlir::dyn_cast<DenseFPElementsAttr>(cst.getValueAttr());
if (float_attr && (float_attr.getValues<APFloat>().empty() ||
!float_attr.getValues<APFloat>()[0].isFinite())) {
return;
}
const Value value = cst.getResult();
builder_.setInsertionPoint(cst);
SmallVector<std::pair<Operation*, int>> uses;
for (OpOperand& use : value.getUses()) {
uses.push_back({use.getOwner(), use.getOperandNumber()});
}
for (const auto [user, operand_num] : uses) {
const std::unique_ptr<OpQuantSpec> spec = GetQuantSpec(user);
const std::unique_ptr<OpQuantScaleSpec> scale_spec =
GetQuantScaleSpec(user);
const BiasParamsMap biases = spec->biases_params;
if (!biases.contains(operand_num) &&
!scale_spec->has_same_scale_requirement &&
!dyn_cast<quantfork::QuantizeCastOp>(user)) {
weights_.insert(cst);
if (spec->coeff_op_quant_dim.find(operand_num) !=
spec->coeff_op_quant_dim.end()) {
optimized_weights_.insert(
{cst, spec->coeff_op_quant_dim[operand_num]});
}
} else {
if (uses.size() > 1) {
auto new_constant_op =
builder_.create<arith::ConstantOp>(cst.getLoc(), cst.getValue());
user->setOperand(operand_num, new_constant_op);
}
}
}
});
}
void QuantizationDriver::SetupAllStates() {
for (BlockArgument arg : fn_.getArguments()) {
args_.push_back(arg);
Value value = arg;
if (arg.hasOneUse()) {
Operation* user = value.use_begin().getUser();
if (auto q = dyn_cast<quantfork::QuantizeCastOp>(user)) {
value = q.getResult();
}
}
InitializeArgState(arg, value);
}
fn_.walk([&](Operation* op) {
std::unique_ptr<OpQuantScaleSpec> scale_spec = GetQuantScaleSpec(op);
if (!IsOpQuantizable(op) && !scale_spec->has_same_scale_requirement) {
return;
}
work_list_.push_back(op);
for (int i = 0; i < op->getNumOperands(); ++i) {
Value operand = op->getOperand(i);
if (Operation* inst = operand.getDefiningOp()) {
if (auto dq = dyn_cast<quantfork::DequantizeCastOp>(inst)) {
operand = dq.getArg();
}
}
InitializeOperandState(op, i, operand);
}
for (int i = 0; i < op->getNumResults(); ++i) {
Value result = op->getResult(i);
if (result.hasOneUse()) {
Operation* user = result.use_begin().getUser();
if (auto q = dyn_cast<quantfork::QuantizeCastOp>(user)) {
result = q.getResult();
}
}
InitializeResultState(op, i, result);
}
});
}
arith::ConstantOp QuantizationDriver::DuplicateConstantOpIfNeeded(
arith::ConstantOp op, Operation* target_op, const int operand_index) {
if (op.getResult().hasOneUse()) {
return op;
}
OpBuilder builder(op->getContext());
builder.setInsertionPointAfter(op);
arith::ConstantOp new_op = cast<arith::ConstantOp>(builder.clone(*op));
target_op->getOpOperand(operand_index).set(new_op.getResult());
InitializeOperandState(target_op, operand_index, new_op.getResult());
InitializeResultState(new_op, 0, new_op.getResult());
return new_op;
}
bool QuantizationDriver::ShouldCheckBiasScale(
Operation* op, const int bias_index, ArrayRef<int> input_indices,
const QuantizedType quantized_type, int& input_index, int& filter_index) {
auto affine_op = dyn_cast<AffineQuantizedOpInterface>(op);
auto bias_op = op->getOperand(bias_index).getDefiningOp<arith::ConstantOp>();
if (!affine_op || !bias_op || input_indices.size() != 2) return false;
if (!mlir::isa<DenseFPElementsAttr>(bias_op.getValue())) return false;
filter_index = affine_op.GetAffineOperandIndex();
if (!op->getOperand(filter_index).getDefiningOp<arith::ConstantOp>()) {
return false;
}
if (filter_index == input_indices[0]) {
input_index = input_indices[1];
} else if (filter_index == input_indices[1]) {
input_index = input_indices[0];
} else {
return false;
}
const QuantState& input_state = GetOperandQuantState(op, input_index);
const QuantState& filter_state = GetOperandQuantState(op, filter_index);
return input_state.params.getStorageTypeIntegralWidth() == 8 &&
filter_state.params.getStorageTypeIntegralWidth() == 8 &&
quantized_type.getStorageTypeIntegralWidth() == 32;
}
bool QuantizationDriver::SetBiasParamsWithAdjustments(
Operation* op, const int bias_index, ArrayRef<int> input_indices,
const QuantizedType params) {
bool changed = false;
int input_index;
int filter_index;
if (!ShouldCheckBiasScale(op, bias_index, input_indices, params, input_index,
filter_index)) {
return SetOperandParams(op, bias_index, params);
}
QuantState input_state = GetOperandQuantState(op, input_index);
QuantState filter_state = GetOperandQuantState(op, filter_index);
auto bias_op = op->getOperand(bias_index).getDefiningOp<arith::ConstantOp>();
const double input_scale =
mlir::cast<UniformQuantizedType>(input_state.params).getScale();
auto bias_values = mlir::cast<DenseFPElementsAttr>(bias_op.getValue());
if (auto bias_quantized_type = mlir::dyn_cast<UniformQuantizedType>(params);
bias_quantized_type != nullptr) {
double bias_half_range = 0.0f;
for (auto bias : bias_values.getValues<APFloat>()) {
if (bias_half_range < std::abs(bias.convertToFloat())) {
bias_half_range = std::abs(bias.convertToFloat());
}
}
if (bias_half_range / bias_quantized_type.getScale() < kBiasMax) {
return SetOperandParams(op, bias_index, params);
}
const double new_bias_scale =
static_cast<double>(bias_half_range) / kBiasMax;
changed |= SetOperandParams(
op, bias_index,
UniformQuantizedType::getChecked(
bias_op->getLoc(), params.getFlags(), params.getStorageType(),
params.getExpressedType(), new_bias_scale, 0,
params.getStorageTypeMin(), params.getStorageTypeMax()));
arith::ConstantOp filter_op = DuplicateConstantOpIfNeeded(
op->getOperand(filter_index).getDefiningOp<arith::ConstantOp>(), op,
filter_index);
if (!filter_op) {
return SetOperandParams(op, bias_index, params);
}
const auto filter_quantized_type =
mlir::cast<UniformQuantizedType>(filter_state.params);
changed |= SetOperandParams(
op, filter_index,
UniformQuantizedType::getChecked(
filter_op->getLoc(), filter_quantized_type.getFlags(),
filter_quantized_type.getStorageType(),
filter_quantized_type.getExpressedType(),
new_bias_scale / input_scale, 0,
filter_quantized_type.getStorageTypeMin(),
filter_quantized_type.getStorageTypeMax()),
true);
} else if (auto bias_quantized_type =
mlir::dyn_cast<quant::UniformQuantizedPerAxisType>(params);
bias_quantized_type != nullptr) {
const auto filter_quantized_type =
mlir::cast<quant::UniformQuantizedPerAxisType>(filter_state.params);
std::vector<double> new_bias_scales = bias_quantized_type.getScales().vec();
std::vector<double> new_filter_scales =
filter_quantized_type.getScales().vec();
bool needs_adjustment = false;
for (int i = 0; i < bias_quantized_type.getScales().size(); ++i) {
const float abs_bias = std::abs(bias_values.getValues<float>()[i]);
if (abs_bias / new_bias_scales[i] > kBiasMax) {
new_bias_scales[i] = static_cast<double>(abs_bias) / kBiasMax;
new_filter_scales[i] = new_bias_scales[i] / input_scale;
needs_adjustment = true;
}
}
if (!needs_adjustment) {
return SetOperandParams(op, bias_index, params);
}
changed |= SetOperandParams(
op, bias_index,
quant::UniformQuantizedPerAxisType::getChecked(
bias_op->getLoc(), params.getFlags(), params.getStorageType(),
params.getExpressedType(), new_bias_scales,
bias_quantized_type.getZeroPoints(),
bias_quantized_type.getQuantizedDimension(),
params.getStorageTypeMin(), params.getStorageTypeMax()));
arith::ConstantOp filter_op = DuplicateConstantOpIfNeeded(
op->getOperand(filter_index).getDefiningOp<arith::ConstantOp>(), op,
filter_index);
changed |= SetOperandParams(
op, filter_index,
quant::UniformQuantizedPerAxisType::getChecked(
filter_op->getLoc(), filter_quantized_type.getFlags(),
filter_quantized_type.getStorageType(),
filter_quantized_type.getExpressedType(), new_filter_scales,
filter_quantized_type.getZeroPoints(),
filter_quantized_type.getQuantizedDimension(),
filter_quantized_type.getStorageTypeMin(),
filter_quantized_type.getStorageTypeMax()),
true);
}
return changed;
}
void QuantizationDriver::Initialize() {
PreprocessConstantOps();
SetupAllStates();
}
bool QuantizationDriver::PropagateParamsAndReturnIfChanged() {
bool changed = false;
while (!work_list_.empty()) {
Operation* op = work_list_.back();
work_list_.pop_back();
if (quantized_.contains(op)) continue;
quantized_.insert(op);
if (auto constant_op = dyn_cast<arith::ConstantOp>(op); constant_op) {
if (infer_tensor_range_ && IsWeight(constant_op) && !IsQuantized(op)) {
changed |= SetConstantResultParams(op);
}
continue;
}
std::unique_ptr<OpQuantScaleSpec> scale_spec = GetQuantScaleSpec(op);
if (scale_spec->has_same_scale_requirement) {
const QuantizedType params = GetQuantParamsForSameScaleConstraint(op);
if (!params) {
quantized_.erase(op);
continue;
}
if (is_qdq_conversion_ &&
!scale_spec->required_same_quantized_axes_func()) {
if (HasPerAxisQuantizedOperand(op)) continue;
}
for (int i = 0; i < op->getNumOperands(); ++i) {
if (auto type =
mlir::dyn_cast<ShapedType>(op->getOperand(i).getType())) {
if (mlir::isa<FloatType>(type.getElementType()))
changed |= SetOperandParams(op, i, params);
}
}
for (int i = 0; i < op->getNumResults(); ++i)
if (auto type = mlir::dyn_cast<ShapedType>(op->getResult(i).getType());
type != nullptr) {
if (mlir::isa<FloatType>(type.getElementType()))
changed |= SetResultParams(op, i, params);
}
}
if (scale_spec->has_fixed_output_range && infer_tensor_range_ &&
!is_qdq_conversion_) {
const QuantizedType params =
scale_spec->fixed_output_range_func(is_signed_, bit_width_);
for (auto i = 0; i < op->getNumResults(); ++i) {
if (params) {
changed |= SetResultParams(op, i, params);
}
}
}
const std::unique_ptr<OpQuantSpec> spec = GetQuantSpec(op);
for (const auto& [bias_operand_idx, non_bias_params] :
spec->biases_params) {
const auto& [non_bias_operand_indices, accumulator_scale_func] =
non_bias_params;
const QuantizedType params =
GetBiasParams(op, bias_operand_idx, non_bias_operand_indices,
accumulator_scale_func);
if (!params) {
quantized_.erase(op);
continue;
}
changed |= SetBiasParamsWithAdjustments(op, bias_operand_idx,
non_bias_operand_indices, params);
}
}
return changed;
}
void QuantizationDriver::Finalize() {
for (BlockArgument arg : args_) {
const QuantState& state = GetArgQuantState(arg);
RequantizeStates& requantizes = GetArgRequantizeStates(arg);
if (state.IsEmpty() || (state.immutable && requantizes.empty())) {
continue;
}
if (!state.immutable) {
QuantizeArg(arg, state.params);
}
if (!requantizes.empty()) {
RequantizeArg(arg, requantizes);
}
}
for (const auto& [op_with_result_idx, quant_state_idx] : result_states_) {
const auto [op, result_idx] = op_with_result_idx;
const QuantState& state = GetResultQuantState(op, result_idx);
RequantizeStates& requantizes = GetResultRequantizeStates(op, result_idx);
if (state.IsEmpty() || (state.immutable && requantizes.empty())) {
continue;
}
if (!state.immutable) {
QuantizeOpResult(op, result_idx, state.params);
}
if (!requantizes.empty()) {
RequantizeOpResult(op, result_idx, requantizes);
}
}
}
void QuantizationDriver::Run() {
Initialize();
if (PropagateParamsAndReturnIfChanged()) {
Finalize();
}
}
void ApplyQuantizationParamsPropagation(
const func::FuncOp func, const bool is_signed, const int bit_width,
const bool disable_per_channel,
const OpQuantSpecGetter op_quant_spec_getter,
const bool infer_tensor_ranges, const bool legacy_float_scale,
const bool is_qdq_conversion) {
ApplyQuantizationParamsPropagation(
func, is_signed, bit_width, disable_per_channel, op_quant_spec_getter,
GetDefaultQuantScaleSpec, infer_tensor_ranges, legacy_float_scale,
is_qdq_conversion);
}
void ApplyQuantizationParamsPropagation(
const func::FuncOp func, const bool is_signed, const int bit_width,
const bool disable_per_channel,
const OpQuantSpecGetter op_quant_spec_getter,
const OpQuantScaleSpecGetter op_quant_scale_spec_getter,
const bool infer_tensor_ranges, const bool legacy_float_scale,
const bool is_qdq_conversion) {
QuantizationDriver(func, is_signed, bit_width, disable_per_channel,
op_quant_spec_getter, op_quant_scale_spec_getter,
infer_tensor_ranges, legacy_float_scale, is_qdq_conversion)
.Run();
}
}
} | #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using ApplyQuantizationParamsPropagationTest = QuantizationTestBase;
using ::testing::IsEmpty;
using ::testing::Not;
constexpr absl::string_view kModuleTFLite = R"mlir(
module {
func.func @main(%arg0: tensor<1x4x4x3xf32>) -> tensor<1x4x4x3xf32> attributes {_from_xla_call_module} {
%cst_0 = arith.constant dense<1.0> : tensor<3x1x1x3xf32>
%cst_1 = arith.constant dense<2.0> : tensor<3xf32>
%0 = "tf.XlaCallModule"(%arg0, %cst_0, %cst_1) <{Sout = [#tf_type.shape<1x4x4x3>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_1, _original_entry_function = "composite_fn_1", _tfl_quant_trait = "fully_quantizable"} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
%1 = "tf.XlaCallModule"(%0, %cst_0, %cst_1) <{Sout = [#tf_type.shape<1x4x4x3>], module = "", version = 9 : i64}> {_entry_function = @composite_fn_2, _original_entry_function = "composite_fn_2", _tfl_quant_trait = "fully_quantizable"} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
return %1 : tensor<1x4x4x3xf32>
}
func.func private @composite_fn_1(%arg0: tensor<1x4x4x3xf32>, %arg1: tensor<3x1x1x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x4x4x3xf32> attributes {tf_quant.composite_function} {
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
return %0 : tensor<1x4x4x3xf32>
}
func.func private @composite_fn_2(%arg0: tensor<1x4x4x3xf32>, %arg1: tensor<3x1x1x3xf32>, %arg2: tensor<3xf32>) -> tensor<1x4x4x3xf32> attributes {tf_quant.composite_function} {
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
return %0 : tensor<1x4x4x3xf32>
}
}
)mlir";
std::unique_ptr<quant::OpQuantSpec> GetOpQuantSpec(
const mlir::Operation* op,
bool disable_per_channel_for_dense_layers = false) {
auto spec = std::make_unique<quant::OpQuantSpec>();
spec->coeff_op_quant_dim[1] = 3;
spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias};
for (const auto& [key, value] : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(key);
}
return spec;
}
TEST_F(ApplyQuantizationParamsPropagationTest,
ConstsUsedMultipleTimesAreDuplicated) {
const OwningOpRef<ModuleOp> module_op_ref =
ParseModuleOpString(kModuleTFLite);
func::FuncOp main_fn = FindMainFuncOp(*module_op_ref);
auto op_quant_spec_getter = [&](Operation* op) {
return GetOpQuantSpec(op, false);
};
QuantizationDriver quantization_driver(
main_fn, true, 8,
false, op_quant_spec_getter,
GetDefaultQuantScaleSpec,
true, false,
false);
quantization_driver.Initialize();
int64_t num_constant_op = 0;
main_fn.walk([&](arith::ConstantOp cst) { ++num_constant_op; });
EXPECT_EQ(num_constant_op, 4);
}
TEST_F(ApplyQuantizationParamsPropagationTest,
PropagateParamsCreatesQuantState) {
const OwningOpRef<ModuleOp> module_op_ref =
ParseModuleOpString(kModuleTFLite);
func::FuncOp main_fn = FindMainFuncOp(*module_op_ref);
auto op_quant_spec_getter = [&](Operation* op) {
return GetOpQuantSpec(op, false);
};
QuantizationDriver quantization_driver(
main_fn, true, 8,
false, op_quant_spec_getter,
GetDefaultQuantScaleSpec,
true, false,
false);
quantization_driver.Initialize();
ASSERT_TRUE(quantization_driver.PropagateParamsAndReturnIfChanged());
EXPECT_THAT(quantization_driver.GetArgs(), Not(IsEmpty()));
for (const auto& arg : quantization_driver.GetArgs()) {
const QuantState& state = quantization_driver.GetArgQuantState(arg);
EXPECT_TRUE(isa<quant::QuantizedType>(state.params));
}
for (const auto& result : quantization_driver.GetResultStates()) {
Operation* op = result.first.first;
const int res_index = result.first.second;
const QuantState state =
quantization_driver.GetResultQuantState(op, res_index);
EXPECT_TRUE(isa<quant::QuantizedType>(state.params));
}
}
TEST_F(ApplyQuantizationParamsPropagationTest, FinalizeInsertsQDQOps) {
const OwningOpRef<ModuleOp> module_op_ref =
ParseModuleOpString(kModuleTFLite);
func::FuncOp main_fn = FindMainFuncOp(*module_op_ref);
auto op_quant_spec_getter = [&](Operation* op) {
return GetOpQuantSpec(op, false);
};
ApplyQuantizationParamsPropagation(
main_fn, true, 8,
false, op_quant_spec_getter,
true, false,
false);
Operation* xla_call_module_op =
FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
Operation* filter_dcast_op =
xla_call_module_op->getOperand(1).getDefiningOp();
Operation* filter_qcast_op = filter_dcast_op->getOperand(0).getDefiningOp();
ASSERT_NE(filter_qcast_op, nullptr);
EXPECT_TRUE(isa<quantfork::QuantizeCastOp>(filter_qcast_op));
EXPECT_TRUE(isa<quantfork::DequantizeCastOp>(filter_dcast_op));
EXPECT_TRUE(isa<UniformQuantizedPerAxisType>(
mlir::cast<TensorType>(filter_qcast_op->getResult(0).getType())
.getElementType()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
889988ce-fdc3-4f01-8bf7-b5a597041eae | cpp | tensorflow/tensorflow | mlir_dump | tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.cc | tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
namespace quantization {
namespace {
absl::StatusOr<std::string> GetMlirDumpDir() {
auto dump_dir = std::string(
absl::NullSafeStringView(std::getenv("TF_QUANT_MLIR_DUMP_PREFIX")));
if (dump_dir.empty()) {
return absl::FailedPreconditionError(
"Environment variable not set: TF_QUANT_MLIR_DUMP_PREFIX, "
"IR dump file for TF quantization is not created.");
}
if (absl::EqualsIgnoreCase(dump_dir, "sponge")) {
if (!tsl::io::GetTestUndeclaredOutputsDir(&dump_dir)) {
return absl::FailedPreconditionError(
"Environment variable TF_QUANT_MLIR_DUMP_PREFIX=sponge but "
"TEST_UNDECLARED_OUTPUT_DIRS not set.");
}
}
return dump_dir;
}
class WritableFileWrapper : public llvm::raw_ostream {
public:
~WritableFileWrapper() override { flush(); }
static absl::StatusOr<std::unique_ptr<WritableFileWrapper>> Create(
const std::string& filepath) {
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(tsl::Env::Default()->NewWritableFile(filepath, &file));
return absl::WrapUnique(new WritableFileWrapper(std::move(file)));
}
private:
explicit WritableFileWrapper(std::unique_ptr<tsl::WritableFile> file)
: file_(std::move(file)) {
SetBuffered();
}
uint64_t current_pos() const override {
int64_t position;
if (file_->Tell(&position).ok()) {
return position;
} else {
return -1;
}
}
void write_impl(const char* ptr, size_t size) override {
if (file_ && !file_->Append(absl::string_view(ptr, size)).ok()) {
file_ = nullptr;
}
}
std::unique_ptr<tsl::WritableFile> file_;
};
absl::StatusOr<std::unique_ptr<llvm::raw_ostream>> CreateMlirDumpFile(
const absl::string_view dump_file_name) {
const absl::StatusOr<std::string> dump_dir = GetMlirDumpDir();
if (!dump_dir.ok()) {
return dump_dir.status();
}
auto* env = tsl::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(*dump_dir));
const std::string dump_file_path =
tsl::io::JoinPath(*dump_dir, dump_file_name);
TF_ASSIGN_OR_RETURN(std::unique_ptr<llvm::raw_ostream> file,
WritableFileWrapper::Create(dump_file_path));
LOG(INFO) << "IR dump file created: " << dump_file_path;
return file;
}
class PrinterConfig : public mlir::PassManager::IRPrinterConfig {
public:
explicit PrinterConfig(
absl::string_view dump_file_prefix, bool print_module_scope = false,
bool print_after_only_on_change = true,
mlir::OpPrintingFlags op_printing_flags = mlir::OpPrintingFlags())
: mlir::PassManager::IRPrinterConfig(
print_module_scope, print_after_only_on_change,
false, op_printing_flags),
mlir_pass_count_(1),
dump_file_prefix_(dump_file_prefix) {}
void printBeforeIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override {
Dump(pass, print_callback, true);
}
void printAfterIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override {
Dump(pass, print_callback, false);
}
private:
int64_t mlir_pass_count_;
absl::string_view dump_file_prefix_;
llvm::DenseMap<mlir::Pass*, std::unique_ptr<llvm::raw_ostream>>
pass_to_dump_file_before_map_;
llvm::DenseMap<mlir::Pass*, std::unique_ptr<llvm::raw_ostream>>
pass_to_dump_file_after_map_;
llvm::DenseMap<mlir::Pass*, int64_t> pass_to_number_map_;
int64_t GetPassNumber(mlir::Pass* pass) {
if (!pass_to_number_map_.contains(pass)) {
pass_to_number_map_[pass] = mlir_pass_count_++;
}
return pass_to_number_map_[pass];
}
void Dump(mlir::Pass* pass, PrintCallbackFn print_callback, bool is_before) {
auto& pass_to_dump_file_map = is_before ? pass_to_dump_file_before_map_
: pass_to_dump_file_after_map_;
if (!pass_to_dump_file_map.contains(pass)) {
std::string filename = llvm::formatv(
"{0}_{1,0+4}_{2}_{3}.mlir", dump_file_prefix_, GetPassNumber(pass),
pass->getName().str(), is_before ? "before" : "after");
absl::StatusOr<std::unique_ptr<llvm::raw_ostream>> dump_file =
CreateMlirDumpFile(filename);
if (!dump_file.ok()) {
LOG(WARNING) << "Failed to dump MLIR module to " << filename;
return;
}
pass_to_dump_file_map[pass] = std::move(*dump_file);
}
return print_callback(*(pass_to_dump_file_map[pass]));
}
};
}
void EnableIrPrinting(mlir::PassManager& pm,
absl::string_view file_name_prefix) {
mlir::OpPrintingFlags flag{};
flag.useLocalScope().elideLargeElementsAttrs().enableDebugInfo();
if (pm.getContext()->isMultithreadingEnabled()) {
pm.getContext()->disableMultithreading();
}
pm.enableIRPrinting(std::make_unique<PrinterConfig>(
file_name_prefix, false,
true, flag));
}
absl::Status MaybeEnableIrPrinting(mlir::PassManager& pm,
absl::string_view file_name_prefix) {
if (!VLOG_IS_ON(1)) {
LOG(INFO) << "Verbosity level too low to enable IR printing.";
return absl::OkStatus();
}
EnableIrPrinting(pm, file_name_prefix);
LOG(INFO) << "IR dump for TensorFlow quantization pipeline enabled.";
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace quantization {
namespace mlir_dump_test {
class NoOpPass
: public mlir::PassWrapper<NoOpPass, mlir::OperationPass<mlir::ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(NoOpPass)
NoOpPass() = default;
llvm::StringRef getArgument() const final { return "no-op-pass"; }
void runOnOperation() override {
}
};
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateNoOpPass() {
return std::make_unique<NoOpPass>();
}
class ParentPass
: public mlir::PassWrapper<ParentPass,
mlir::OperationPass<mlir::ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ParentPass)
ParentPass() = default;
llvm::StringRef getArgument() const final { return "parent-pass"; }
void runOnOperation() override {
mlir::MLIRContext* ctx = &getContext();
mlir::ModuleOp module_op = getOperation();
mlir::PassManager pm(ctx);
pm.addPass(CreateNoOpPass());
EnableIrPrinting(pm, "dump2");
if (failed(pm.run(module_op))) {
signalPassFailure();
}
}
};
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateParentPass() {
return std::make_unique<ParentPass>();
}
}
namespace {
using namespace tensorflow::quantization::mlir_dump_test;
class EnableIrPrintingTest : public ::testing::Test {
protected:
EnableIrPrintingTest() : env_(tsl::Env::Default()) {
if (!tsl::io::GetTestUndeclaredOutputsDir(&test_dir_)) {
test_dir_ = tsl::testing::TmpDir();
}
}
void SetUp() override {
tsl::setenv("TF_QUANT_MLIR_DUMP_PREFIX", test_dir_.c_str(), 1);
mlir::DialectRegistry dialects;
dialects.insert<mlir::BuiltinDialect, mlir::func::FuncDialect,
mlir::stablehlo::StablehloDialect>();
ctx_ = std::make_unique<mlir::MLIRContext>(dialects);
ctx_->loadAllAvailableDialects();
}
void TearDown() override {
std::vector<std::string> files;
TF_ASSERT_OK(
env_->GetMatchingPaths(tsl::io::JoinPath(test_dir_, "*"), &files));
for (const std::string& file : files) {
TF_ASSERT_OK(env_->DeleteFile(file));
}
}
tsl::Env* env_;
std::string test_dir_;
std::unique_ptr<mlir::MLIRContext> ctx_;
};
TEST_F(EnableIrPrintingTest, PassSuccessfullyRuns) {
mlir::PassManager pm = {ctx_.get()};
pm.addPass(CreateNoOpPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
EnableIrPrinting(pm, "dump");
constexpr absl::string_view program = R"mlir(
module{
func.func @main(%arg0: tensor<10xf32>) -> tensor<10xf32> {
return %arg0 : tensor<10xf32>
}
func.func @func1(%arg0: tensor<10xf32>, %arg1: tensor<10xf32>) -> tensor<10xf32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
%1 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
return %0 : tensor<10xf32>
}
})mlir";
auto module_op = mlir::parseSourceString<mlir::ModuleOp>(program, ctx_.get());
const mlir::LogicalResult result = pm.run(module_op.get());
EXPECT_FALSE(failed(result));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump_0001_tensorflow::quantization::mlir_dump_test"
"::NoOpPass_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0002_Canonicalizer_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0002_Canonicalizer_after.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0003_Canonicalizer_before.mlir")));
}
TEST_F(EnableIrPrintingTest, NestedPassSuccessfullyRuns) {
mlir::MLIRContext ctx{};
mlir::PassManager pm = {&ctx};
pm.addPass(CreateParentPass());
EnableIrPrinting(pm, "dump");
mlir::OpBuilder builder(&ctx);
auto module_op = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
const absl::Cleanup module_op_cleanup = [module_op] { module_op->destroy(); };
const mlir::LogicalResult result = pm.run(module_op);
EXPECT_FALSE(failed(result));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump_0001_tensorflow::quantization::mlir_dump_test"
"::ParentPass_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump2_0001_tensorflow::quantization::mlir_dump_test"
"::NoOpPass_before.mlir")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
83b1774b-22a8-4980-b94c-a9ffd854051b | cpp | tensorflow/tensorflow | tf_to_uniform_attribute_utils | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.h"
#include <array>
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/ops/uniform_op_quant_spec.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/core/util/quantization/uniform_quant_ops_attr.pb.h"
namespace mlir::quant {
using QuantMethod = tensorflow::quantization::QuantizationMethod::PresetMethod;
enum class OpType {
kDynamicRangeOp,
kUnaryOp,
kBinaryOp,
kQuantizationOp,
};
constexpr std::array<absl::string_view, 3> kQuantizationAxisAttrs = {
"input_quantization_axis", "quantization_axis", "rhs_quantization_axis"};
constexpr std::array<absl::string_view, 2> kSuffixes = {"_min_val", "_max_val"};
Attribute GetWindowStridesValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr stride = mlir::dyn_cast<ArrayAttr>(identifier_to_attr["strides"]);
const int stride_h = mlir::cast<IntegerAttr>(stride[1]).getInt();
const int stride_w = mlir::cast<IntegerAttr>(stride[2]).getInt();
return rewriter.getI64ArrayAttr({stride_h, stride_w});
}
Attribute GetLhsDilationValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
return rewriter.getI64ArrayAttr({1, 1});
}
Attribute GetRhsDilationValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr dilations =
mlir::dyn_cast<ArrayAttr>(identifier_to_attr["dilations"]);
const int dilation_h = mlir::cast<IntegerAttr>(dilations[1]).getInt();
const int dilation_w = mlir::cast<IntegerAttr>(dilations[2]).getInt();
return rewriter.getI64ArrayAttr({dilation_h, dilation_w});
}
Attribute GetPaddingValue(PatternRewriter& rewriter,
llvm::StringMap<Attribute>& identifier_to_attr) {
llvm::StringRef padding =
mlir::dyn_cast<StringAttr>(identifier_to_attr["padding"]).getValue();
return rewriter.getStringAttr(padding);
}
Attribute GetExplicitPaddingValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
ArrayAttr explicit_padding =
mlir::dyn_cast<ArrayAttr>(identifier_to_attr["explicit_paddings"]);
return explicit_padding;
}
Attribute GetDimensionNumbersValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
tensorflow::UniformQuantizedConvolutionDimensionNumbersAttr dimension_numbers;
if (!tensorflow::protobuf::TextFormat::ParseFromString(
R"pb(
input_batch_dimension: 0
input_feature_dimension: 3
input_spatial_dimensions: 1
input_spatial_dimensions: 2
kernel_output_feature_dimension: 3
kernel_input_feature_dimension: 2
kernel_spatial_dimensions: 0
kernel_spatial_dimensions: 1
output_batch_dimension: 0
output_feature_dimension: 3
output_spatial_dimensions: 1
output_spatial_dimensions: 2
)pb",
&dimension_numbers)) {
return rewriter.getStringAttr("");
}
return rewriter.getStringAttr(dimension_numbers.SerializeAsString());
}
Attribute GetBatchGroupCountValue(
PatternRewriter& rewriter, llvm::StringMap<Attribute>& identifier_to_attr) {
return rewriter.getI64IntegerAttr(1);
}
Attribute GetQuantizationAxis(PatternRewriter& rewriter, Operation* op,
const int operand_index) {
auto* defining_op = op->getOperand(operand_index).getDefiningOp();
for (auto attr : kQuantizationAxisAttrs) {
if (defining_op->hasAttr(attr)) {
return defining_op->getAttr(attr);
}
}
return rewriter.getI64IntegerAttr(-1);
}
LogicalResult CheckIfAttrIs8Bit(const std::string& attr, Operation* op,
bool& is_8_bit) {
Type element_type;
if (attr == "lhs_quantization" || attr == "input_quantization" ||
attr == "quantization") {
if (op->getNumOperands() < 1) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOperand(0).getType());
}
if (attr == "rhs_quantization") {
if (op->getNumOperands() < 2) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOperand(1).getType());
}
if (attr == "output_quantization") {
if (op->getNumResults() < 1) {
return failure();
}
element_type = getElementTypeOrSelf(op->getOpResult(0).getType());
}
if (element_type) {
is_8_bit = mlir::isa<TF::Qint8Type>(element_type);
return success();
}
return failure();
}
LogicalResult FillQuantizationAttributes(
PatternRewriter& rewriter, Operation* op, NamedAttrList& attrs,
llvm::StringMap<Attribute>& identifier_to_attr, OpType op_type) {
absl::flat_hash_map<std::string, int> min_max_scheme_for_8bit = {
{"min", -128}, {"max", 127}};
absl::flat_hash_map<std::string, int> min_max_schema_for_32bit = {
{"min", -2147483648}, {"max", 2147483647}};
std::vector<std::string> quantization_attributes;
switch (op_type) {
case OpType::kDynamicRangeOp:
quantization_attributes = {"rhs_quantization"};
break;
case OpType::kUnaryOp:
quantization_attributes = {"quantization"};
break;
case OpType::kBinaryOp:
quantization_attributes = {"lhs_quantization", "rhs_quantization",
"output_quantization"};
break;
case OpType::kQuantizationOp:
quantization_attributes = {"input_quantization", "output_quantization"};
break;
default:
quantization_attributes = {};
break;
}
for (const auto& attr : quantization_attributes) {
bool attr_is_8_bit;
if (failed(CheckIfAttrIs8Bit(attr, op, attr_is_8_bit))) {
return failure();
}
for (int i = 0; i < kSuffixes.size(); i++) {
int64_t quant_val;
if (attr_is_8_bit) {
quant_val = i == 0 ? min_max_scheme_for_8bit["min"]
: min_max_scheme_for_8bit["max"];
} else {
quant_val = i == 0 ? min_max_schema_for_32bit["min"]
: min_max_schema_for_32bit["max"];
}
std::string attr_minmax = absl::StrCat(attr, kSuffixes[i]);
attrs.push_back(rewriter.getNamedAttr(
attr_minmax, rewriter.getI64IntegerAttr(quant_val)));
}
}
return success();
}
LogicalResult FillAttributesForUniformQuantizedDotOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (quantization_method ==
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
if (failed(FillQuantizationAttributes(rewriter, op, attrs,
identifier_to_attr,
OpType::kDynamicRangeOp))) {
return failure();
}
} else {
if (failed(FillQuantizationAttributes(
rewriter, op, attrs, identifier_to_attr, OpType::kBinaryOp))) {
return failure();
}
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
rewriter.getI64IntegerAttr(-1)));
}
std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
absl::flat_hash_set<int> operands = spec->quantizable_operands;
int quant_dim = -1;
if (enable_per_channel_quantization && operands.size() == 1) {
quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
}
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedConvolutionOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
absl::flat_hash_map<std::string, Attribute (*)(PatternRewriter&,
llvm::StringMap<Attribute>&)>
attribute_getter_map;
attribute_getter_map = {{"window_strides", GetWindowStridesValue},
{"lhs_dilation", GetLhsDilationValue},
{"rhs_dilation", GetRhsDilationValue},
{"padding", GetPaddingValue},
{"explicit_padding", GetExplicitPaddingValue},
{"dimension_numbers", GetDimensionNumbersValue},
{"batch_group_count", GetBatchGroupCountValue}};
for (auto& attr : op->getAttrs()) {
llvm::StringRef attr_name = attr.getName().getValue();
if (attribute_getter_map.find(attr_name.str()) !=
attribute_getter_map.end()) {
auto attr_val =
(attribute_getter_map[attr_name.str()])(rewriter, identifier_to_attr);
attrs.push_back(rewriter.getNamedAttr(attr_name, attr_val));
}
}
auto feature_group_cnt_attr = llvm::StringRef("feature_group_count");
int feature_group_cnt = 1;
ShapedType input_shape =
mlir::dyn_cast<ShapedType>(op->getOperand(0).getType());
if (!input_shape) {
return op->emitError(
"Only input with known shape is supported for Uniform Quantized "
"opset.");
}
if (op->getParentOfType<func::FuncOp>().getName().contains("depthwise_")) {
feature_group_cnt = input_shape.getDimSize(3);
}
attrs.push_back(rewriter.getNamedAttr(
feature_group_cnt_attr, rewriter.getI64IntegerAttr(feature_group_cnt)));
if (quantization_method ==
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
if (failed(FillQuantizationAttributes(rewriter, op, attrs,
identifier_to_attr,
OpType::kDynamicRangeOp))) {
return failure();
}
} else {
if (failed(FillQuantizationAttributes(
rewriter, op, attrs, identifier_to_attr, OpType::kBinaryOp))) {
return failure();
}
}
if (quantization_method !=
tensorflow::quantization::QuantizationMethod::METHOD_DYNAMIC_RANGE_INT8) {
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
rewriter.getI64IntegerAttr(-1)));
}
std::unique_ptr<OpQuantSpec> spec = GetUniformOpQuantSpec(op);
absl::flat_hash_set<int> operands = spec->quantizable_operands;
int quant_dim = -1;
if (enable_per_channel_quantization && operands.size() == 1) {
quant_dim = spec->coeff_op_quant_dim[*(operands.begin())];
}
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
rewriter.getI64IntegerAttr(quant_dim)));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedAddOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
const QuantMethod quantization_method,
const bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kBinaryOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
if (activation_quantization_axis == rewriter.getI64IntegerAttr(-1)) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 1);
}
}
attrs.push_back(rewriter.getNamedAttr("lhs_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("rhs_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
activation_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizedClipByValueOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kUnaryOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
}
attrs.push_back(
rewriter.getNamedAttr("quantization_axis", activation_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformRequantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kQuantizationOp))) {
return failure();
}
Attribute activation_quantization_axis = rewriter.getI64IntegerAttr(-1);
Attribute output_quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
activation_quantization_axis =
GetQuantizationAxis(rewriter, op, 0);
auto output_scale_type =
mlir::dyn_cast<ShapedType>(op->getOperand(3).getType());
if (!output_scale_type) {
return failure();
}
if (output_scale_type.hasRank() && 0 < output_scale_type.getRank()) {
output_quantization_axis = activation_quantization_axis;
}
}
attrs.push_back(rewriter.getNamedAttr("input_quantization_axis",
activation_quantization_axis));
attrs.push_back(rewriter.getNamedAttr("output_quantization_axis",
output_quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
LogicalResult FillAttributesForUniformQuantizeOp(
PatternRewriter& rewriter, Operation* op,
llvm::StringMap<Attribute>& identifier_to_attr,
QuantMethod quantization_method, bool enable_per_channel_quantization) {
NamedAttrList attrs;
if (failed(FillQuantizationAttributes(rewriter, op, attrs, identifier_to_attr,
OpType::kUnaryOp))) {
return failure();
}
Attribute quantization_axis = rewriter.getI64IntegerAttr(-1);
if (enable_per_channel_quantization) {
quantization_axis = rewriter.getI64IntegerAttr(3);
}
attrs.push_back(
rewriter.getNamedAttr("quantization_axis", quantization_axis));
op->setAttrs(rewriter.getDictionaryAttr(attrs));
return success();
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.h"
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringMap.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using QuantMethod = tensorflow::quantization::QuantizationMethod::PresetMethod;
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& op_builder)
: mlir::PatternRewriter(op_builder) {}
~EmptyPatternRewriter() override = default;
};
class TfToUniformAttributeUtilsTestPeer {
public:
explicit TfToUniformAttributeUtilsTestPeer() = delete;
explicit TfToUniformAttributeUtilsTestPeer(MLIRContext* ctx)
: rewriter_(OpBuilder(ctx)) {}
EmptyPatternRewriter rewriter_;
};
class TfToUniformAttributeUtilsTest : public ::testing::Test {
protected:
TfToUniformAttributeUtilsTest() : ctx_() {
ctx_.loadDialect<TF::TensorFlowDialect>();
}
MLIRContext ctx_;
};
TF::UniformQuantizedAddOp ParseUniformQuantizedAddOp(
const absl::string_view add_op_str, Block& block, MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(add_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto uq_add_op = dyn_cast_or_null<TF::UniformQuantizedAddOp>(block.back());
EXPECT_TRUE(uq_add_op);
return uq_add_op;
}
TF::UniformRequantizeOp ParseUniformRequantizedOp(
const absl::string_view requant_op_str, Block& block, MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(requant_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto uq_requant_op = dyn_cast_or_null<TF::UniformRequantizeOp>(block.back());
EXPECT_TRUE(uq_requant_op);
return uq_requant_op;
}
TEST_F(TfToUniformAttributeUtilsTest, UniformQuantizedAddOpAttributes) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kAddOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<2x!tf_type.qint32>} : () -> tensor<2x!tf_type.qint32>
%2 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%3 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%4 = "tf.UniformQuantizedAdd"(%0, %1, %2, %3, %2, %3, %2, %3) {device = "", lhs_quantization_axis = -1 : i64, lhs_quantization_max_val = 127 : i64, lhs_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64, rhs_quantization_axis = -1 : i64, rhs_quantization_max_val = 127 : i64, rhs_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2x!tf_type.qint32>, tensor<f32>, tensor<i32>, tensor<f32>, tensor<i32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint32>
)mlir";
Block block{};
TF::UniformQuantizedAddOp op =
ParseUniformQuantizedAddOp(kAddOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformQuantizedAddOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
false);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getLhsQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getLhsQuantizationMinValAttr().getInt());
ASSERT_EQ(2147483647, op.getRhsQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getRhsQuantizationMinValAttr().getInt());
ASSERT_EQ(2147483647, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(-1, op.getLhsQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getRhsQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest, UniformQuantizedRequantizeOpAttributes) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%4 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
true);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(3, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest,
UniformQuantizedRequantizeOpAttributes_OutputPerChannel) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%4 = "tf.Const"() {value = dense<0> : tensor<2xi32>} : () -> tensor<2xi32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = 1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<2xf32>, tensor<2xi32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
true);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(3, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(3, op.getOutputQuantizationAxisAttr().getInt());
}
TEST_F(TfToUniformAttributeUtilsTest,
UniformQuantizedRequantizeOpAttributes_DisablePerChannelQuantization) {
TfToUniformAttributeUtilsTestPeer test_peer(&ctx_);
constexpr absl::string_view kRequantOpExpr =
R"mlir(
%0 = "tf.Const"() {value = #tf_type<tensor_proto : "0x746674656"> : tensor<1x3x2x2x!tf_type.qint32>, quantization_axis = 3} : () -> tensor<1x3x2x2x!tf_type.qint32>
%1 = "tf.Const"() {value = dense<1.0> : tensor<2xf32>} : () -> tensor<2xf32>
%2 = "tf.Const"() {value = dense<2> : tensor<2xi32>} : () -> tensor<2xi32>
%3 = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
%4 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
%5 = "tf.UniformRequantize"(%0, %1, %2, %3, %4) {device = "", input_quantization_axis = 3 : i64, input_quantization_max_val = 127 : i64, input_quantization_min_val = -127 : i64, output_quantization_axis = -1 : i64, output_quantization_max_val = 127 : i64, output_quantization_min_val = -127 : i64} : (tensor<1x3x2x2x!tf_type.qint32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<1x3x2x2x!tf_type.qint8>
)mlir";
Block block{};
TF::UniformRequantizeOp op =
ParseUniformRequantizedOp(kRequantOpExpr, block, ctx_);
llvm::StringMap<Attribute> identifier_to_attr;
QuantMethod quantization_method =
tensorflow::quantization::QuantizationMethod::METHOD_STATIC_RANGE_INT8;
auto res = FillAttributesForUniformRequantizeOp(
test_peer.rewriter_, op, identifier_to_attr, quantization_method,
false);
ASSERT_TRUE(succeeded(res));
ASSERT_EQ(2147483647, op.getInputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-2147483648, op.getInputQuantizationMinValAttr().getInt());
ASSERT_EQ(127, op.getOutputQuantizationMaxValAttr().getInt());
ASSERT_EQ(-128, op.getOutputQuantizationMinValAttr().getInt());
ASSERT_EQ(-1, op.getInputQuantizationAxisAttr().getInt());
ASSERT_EQ(-1, op.getOutputQuantizationAxisAttr().getInt());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
66f0c126-a09c-451b-83a5-da25cfacf60d | cpp | tensorflow/tensorflow | tf_to_xla_attribute_utils | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc | tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc | #include <algorithm>
#include <numeric>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/str_format.h"
#include "llvm/ADT/ArrayRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/lite/core/c/builtin_op_data.h"
#include "tensorflow/compiler/mlir/lite/kernels/padding.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.h"
#include "xla/xla_data.pb.h"
namespace mlir::quant {
namespace {
Value GetDimValue(OpBuilder &builder, Location loc, Value shape_value,
int32_t dim) {
Type attribute_type = builder.getI64Type();
return builder.create<TF::StridedSliceOp>(
loc,
RankedTensorType::get(
{}, mlir::cast<ShapedType>(shape_value.getType()).getElementType()),
shape_value,
Create1DConstValue<int32_t>(builder, loc, {dim}),
Create1DConstValue<int32_t>(builder, loc, {dim + 1}),
Create1DConstValue<int32_t>(builder, loc, {1}),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 0),
builder.getIntegerAttr(attribute_type, 1));
}
void GetSamePaddingValues(OpBuilder &builder, Location loc, Value input_size,
int64_t filter_sz, int64_t dilation_rate,
int64_t stride, Value &padding_low,
Value &padding_high) {
Value zero = CreateScalarConstValue<int32_t>(builder, loc, 0);
Value one = CreateScalarConstValue<int32_t>(builder, loc, 1);
Value two = CreateScalarConstValue<int32_t>(builder, loc, 2);
Value filter_size = CreateScalarConstValue<int32_t>(builder, loc, filter_sz);
Type int32_scalar_type = zero.getType();
auto scalar_add = [&](Value lhs, Value rhs) {
return builder.create<TF::AddOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_mul = [&](Value lhs, Value rhs) {
return builder.create<TF::MulOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_sub = [&](Value lhs, Value rhs) {
return builder.create<TF::SubOp>(loc, int32_scalar_type, lhs, rhs);
};
auto scalar_div = [&](Value lhs, Value rhs) {
return builder.create<TF::DivOp>(loc, int32_scalar_type, lhs, rhs);
};
Value stride_value = CreateScalarConstValue<int32_t>(builder, loc, stride);
Value dilation_rate_value =
CreateScalarConstValue<int32_t>(builder, loc, dilation_rate);
Value effective_filter_size_op = scalar_add(
scalar_mul(dilation_rate_value, scalar_sub(filter_size, one)), one);
Value output_size = scalar_div(
scalar_add(input_size, scalar_sub(stride_value, one)), stride_value);
Value padding_needed = scalar_sub(
scalar_add(effective_filter_size_op,
scalar_mul(stride_value, scalar_sub(output_size, one))),
input_size);
padding_needed = builder.create<TF::MaximumOp>(loc, padding_needed, zero);
padding_low = scalar_div(padding_needed, two);
padding_high = scalar_sub(padding_needed, padding_low);
}
Value PadForDynamicShapedInputSamePadding(
OpBuilder &builder, Location loc, Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides, ArrayAttr dilations,
StringAttr conv_padding, Value &padding, int num_dims) {
Value zero_rank1 = CreateConstValue<int32_t>(builder, loc, {1}, {0});
SmallVector<Value> temp_padding_values{zero_rank1, zero_rank1};
auto reshape_op = [&](Value value, const SmallVector<int64_t> &shape) {
const int64_t rank = shape.size();
return builder.create<TF::ReshapeOp>(
loc, RankedTensorType::get(shape, builder.getI32Type()), value,
CreateConstValue<int64_t>(builder, loc, {rank}, shape));
};
ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType());
Value input_shape_value = builder.create<TF::ShapeOp>(
loc, RankedTensorType::get({num_dims}, builder.getI32Type()), input);
auto scalar_to_rank1 = [&](Value value) { return reshape_op(value, {1}); };
for (int i : llvm::seq<int>(1, num_dims - 1)) {
Value input_size_i = GetDimValue(builder, loc, input_shape_value, i);
const int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt();
const int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
const int filter_i = filter_shape.getDimSize(i - 1);
Value pad_i_low, pad_i_high;
GetSamePaddingValues(builder, loc, input_size_i, filter_i, dilation_i,
stride_i, pad_i_low, pad_i_high);
temp_padding_values.push_back(scalar_to_rank1(pad_i_low));
temp_padding_values.push_back(scalar_to_rank1(pad_i_high));
}
temp_padding_values.push_back(zero_rank1);
temp_padding_values.push_back(zero_rank1);
padding = CreateConstValue<int32_t>(
builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(2 * (num_dims - 2), 0));
Value zero = CreateScalarConstValue(builder, loc, 0);
Value temp_padding_rank1 = builder.create<TF::ConcatOp>(
loc, RankedTensorType::get({2 * num_dims}, builder.getI32Type()), zero,
temp_padding_values);
Value temp_padding = reshape_op(temp_padding_rank1, {num_dims, 2});
return builder.create<TF::PadV2Op>(
loc, input.getType(), input, temp_padding,
CreateScalarConstValue<int8_t>(builder, loc, input_zp_value));
}
}
Value CalculatePaddingAndPadIfNeeded(OpBuilder &builder, Location loc,
Value input, Value filter,
int8_t input_zp_value, ArrayAttr strides,
ArrayAttr dilations,
StringAttr conv_padding,
ArrayAttr explicit_paddings,
Value &padding, int num_dims) {
ShapedType input_shape = mlir::cast<ShapedType>(input.getType());
SmallVector<int64_t> spatial_dims(num_dims - 2);
absl::c_iota(spatial_dims, 1);
bool has_dynamic_spatial_dim = absl::c_any_of(
spatial_dims,
[&input_shape](int64_t dim) { return input_shape.isDynamicDim(dim); });
if (conv_padding.strref() == "SAME" && has_dynamic_spatial_dim) {
return PadForDynamicShapedInputSamePadding(
builder, loc, input, filter, input_zp_value, strides, dilations,
conv_padding, padding, num_dims);
}
ShapedType filter_shape = mlir::cast<ShapedType>(filter.getType());
SmallVector<int32_t> padding_values(2 * num_dims, 0);
if (conv_padding.strref() == "EXPLICIT") {
if (explicit_paddings.size() != 2 * num_dims) {
emitError(loc,
absl::StrFormat(
"explicit_paddings are expected to be %d-element arrays",
2 * num_dims));
return {};
}
for (int i : spatial_dims) {
padding_values[2 * i] =
mlir::cast<IntegerAttr>(explicit_paddings[2 * i]).getInt();
padding_values[2 * i + 1] =
mlir::cast<IntegerAttr>(explicit_paddings[2 * i + 1]).getInt();
}
} else if (conv_padding.strref() == "SAME") {
for (int i : spatial_dims) {
int input_size = input_shape.getDimSize(i);
int filter_size = filter_shape.getDimSize(i - 1);
int stride_i = mlir::cast<IntegerAttr>(strides[i]).getInt();
int dilation_i = mlir::cast<IntegerAttr>(dilations[i]).getInt();
int out_size = tflite_migration::ComputeOutSize(
kTfLitePaddingSame, input_size, filter_size, stride_i, dilation_i);
int offset = 0;
int padding_before = tflite_migration::ComputePaddingWithOffset(
stride_i, dilation_i, input_size, filter_size, out_size, &offset);
int padding_after = padding_before + offset;
padding_values[2 * i] = padding_before;
padding_values[2 * i + 1] = padding_after;
}
}
if (input_zp_value == 0 ||
absl::c_all_of(padding_values, [](int v) { return v == 0; })) {
padding = CreateConstValue<int32_t>(
builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(padding_values.begin() + 2,
padding_values.end() - 2));
return input;
}
padding =
CreateConstValue<int32_t>(builder, loc, {num_dims - 2, 2},
SmallVector<int32_t>(2 * (num_dims - 2), 0));
Value temp_padding =
CreateConstValue<int32_t>(builder, loc, {num_dims, 2}, padding_values);
SmallVector<int64_t> output_shape(input_shape.getShape().begin(),
input_shape.getShape().end());
for (int i : spatial_dims) {
output_shape[i] += padding_values[2 * i] + padding_values[2 * i + 1];
}
return builder.create<TF::PadV2Op>(
loc, RankedTensorType::get(output_shape, builder.getI8Type()), input,
temp_padding,
CreateScalarConstValue<int8_t>(builder, loc, input_zp_value));
}
Value PackOperand(OpBuilder &builder, Location loc, Value value, int pack_dim) {
ShapedType value_type = mlir::cast<ShapedType>(value.getType());
const int rank = value_type.getRank();
SmallVector<int64_t> packed_shape(value_type.getShape().begin(),
value_type.getShape().end());
RankedTensorType shape_type =
RankedTensorType::get({rank}, builder.getI64Type());
Value shape_value = builder.create<TF::ShapeOp>(loc, shape_type, value);
if (packed_shape[pack_dim] % 2 != 0) {
packed_shape[pack_dim] += 1;
SmallVector<int32_t> padding(rank * 2, 0);
padding[pack_dim * 2 + 1] = 1;
Value padding_value =
CreateConstValue<int32_t>(builder, loc, {rank, 2}, padding);
value = builder.create<TF::PadV2Op>(
loc, RankedTensorType::get(packed_shape, builder.getI8Type()), value,
padding_value, CreateScalarConstValue<int8_t>(builder, loc, 0));
SmallVector<int64_t> shape_add(rank, 0);
shape_add[pack_dim] = 1;
shape_value = builder.create<TF::AddOp>(
loc, shape_type, shape_value,
CreateConstValue<int64_t>(builder, loc, {rank}, shape_add));
}
packed_shape[pack_dim] /= 2;
SmallVector<int64_t> divisor(rank, 1);
divisor[pack_dim] = 2;
RankedTensorType packed_output_type =
RankedTensorType::get(packed_shape, builder.getI8Type());
Value packed_shape_value = builder.create<TF::DivOp>(
loc, shape_type, shape_value,
CreateConstValue<int64_t>(builder, loc, {rank}, divisor));
Value packed_low_begin_value = CreateConstValue<int64_t>(
builder, loc, {rank}, SmallVector<int64_t>(rank, 0));
Value packed_low_value =
builder.create<TF::SliceOp>(loc, packed_output_type, value,
packed_low_begin_value, packed_shape_value);
packed_low_value = builder.create<TF::BitwiseAndOp>(
loc, packed_output_type, packed_low_value,
CreateScalarConstValue<int8_t>(builder, loc, 0x0F));
SmallVector<int64_t> packed_high_begin(rank, 0);
packed_high_begin[pack_dim] = packed_shape[pack_dim];
Value packed_high_begin_value =
CreateConstValue<int64_t>(builder, loc, {rank}, packed_high_begin);
Value packed_high_value =
builder.create<TF::SliceOp>(loc, packed_output_type, value,
packed_high_begin_value, packed_shape_value);
packed_high_value = builder.create<TF::LeftShiftOp>(
loc, packed_output_type, packed_high_value,
CreateScalarConstValue<int8_t>(builder, loc, 4));
Operation *packed = builder.create<TF::BitwiseOrOp>(
loc, packed_output_type, packed_low_value, packed_high_value);
return ConstantFoldOpIfPossible(packed).front();
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.h"
#include <cstdint>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
namespace mlir::quant {
namespace {
void PackOperandTestHelper(
const llvm::SmallVector<int64_t>& unpacked_shape,
const llvm::SmallVector<int8_t>& unpacked_values, int pack_dim,
const llvm::SmallVector<int64_t>& expected_packed_shape,
const llvm::SmallVector<int8_t>& expected_packed_values) {
MLIRContext context;
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
OpBuilder builder(&module->getBodyRegion());
context.loadDialect<TF::TensorFlowDialect>();
Value value = CreateConstValue<int8_t>(builder, module->getLoc(),
unpacked_shape, unpacked_values);
Value packed_value = PackOperand(builder, module->getLoc(), value, pack_dim);
DenseIntElementsAttr packed_value_attr;
ASSERT_TRUE(matchPattern(packed_value, m_Constant(&packed_value_attr)));
ShapedType packed_shape_type =
mlir::dyn_cast<ShapedType>(packed_value.getType());
llvm::SmallVector<int64_t> packed_shape(packed_shape_type.getShape().begin(),
packed_shape_type.getShape().end());
EXPECT_THAT(packed_shape, testing::ElementsAreArray(expected_packed_shape));
llvm::SmallVector<int8_t> packed_value_vector(
packed_value_attr.getValues<int8_t>());
EXPECT_THAT(packed_value_vector,
testing::ElementsAreArray(expected_packed_values));
}
TEST(TfToXlaAttributeUtilsTest, PackOperandPackDimSizeEven) {
PackOperandTestHelper({2, 2},
{0x01, 0x02, 0x03, 0x04},
0,
{1, 2},
{0x31, 0x42});
}
TEST(TfToXlaAttributeUtilsTest, PackOperandPackDimSizeOdd) {
PackOperandTestHelper(
{2, 3},
{0x01, 0x02, 0x03, 0x04, 0x05, 0x06},
1,
{2, 2},
{0x31, 0x02, 0x64, 0x05});
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_xla_attribute_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dd964e63-6f3d-4030-987b-827743b1f564 | cpp | tensorflow/tensorflow | tf_quantize_op | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include <functional>
#include <optional>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Matchers.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_quantize_op_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
namespace {
constexpr StringRef kDequantizeFunctionName = "composite_dequantize";
constexpr StringRef kUniformQuantizationFunctionName = "uniform";
func::FuncOp PrepareFunctionRegister(PatternRewriter& rewriter, Value input_val,
ShapedType result_type,
StringRef func_name,
Value& func_input_arg) {
Operation* input_op = input_val.getDefiningOp();
Operation* insertion_point = input_op->getParentOfType<func::FuncOp>();
if (!insertion_point) insertion_point = input_op->getParentOfType<ModuleOp>();
rewriter.setInsertionPointAfter(insertion_point);
UnrankedTensorType create_unknown_input_shape =
CreateUnknownShapeFromElementType(input_val.getType());
UnrankedTensorType create_unknown_output_shape =
CreateUnknownShapeFromElementType(result_type);
FunctionType func_type =
FunctionType::get(rewriter.getContext(), {create_unknown_input_shape},
{create_unknown_output_shape});
func::FuncOp quantization_func =
rewriter.create<func::FuncOp>(input_op->getLoc(), func_name, func_type);
OpBuilder::InsertionGuard guard = OpBuilder::InsertionGuard(rewriter);
ArrayRef<Type> inputs = quantization_func.getFunctionType().getInputs();
Block* block = rewriter.createBlock(
&quantization_func.getBody(), quantization_func.begin(), inputs,
SmallVector<Location>(inputs.size(), quantization_func.getLoc()));
func_input_arg = block->getArgument(0);
return quantization_func;
}
TF::PartitionedCallOp FinalizeFunctionRegister(
PatternRewriter& rewriter, Value input, Value output,
func::FuncOp& quantization_func, Operation* quantized_op,
StringRef func_name, IRRewriter::InsertPoint original_point,
Type quantize_result_type) {
rewriter.create<func::ReturnOp>(input.getLoc(), ArrayRef<Value>({output}));
quantization_func.setVisibility(func::FuncOp::Visibility::Private);
SymbolTable symbol_table(quantized_op->getParentOfType<ModuleOp>());
symbol_table.insert(quantization_func);
FlatSymbolRefAttr func_name_attr =
FlatSymbolRefAttr::get(rewriter.getStringAttr(func_name));
rewriter.restoreInsertionPoint(original_point);
auto quantize_call = rewriter.create<TF::PartitionedCallOp>(
quantized_op->getLoc(), quantize_result_type, input, func_name_attr,
"", "", "");
return quantize_call;
}
std::optional<TF::PartitionedCallOp> RegisterOperationsInFuncOp(
StringRef func_name, PatternRewriter& rewriter, QuantizedType quant_type,
Value input_val, ShapedType result_type,
std::function<Operation*(PatternRewriter&, Operation*, Value, ShapedType,
QuantizedType)>
quantization_operations_func) {
Operation* input_op = input_val.getDefiningOp();
auto original_point = rewriter.saveInsertionPoint();
auto unique_func_name = func_name.str();
SymbolTable symbol_table(input_op->getParentOfType<ModuleOp>());
while (symbol_table.lookup(unique_func_name)) {
absl::StrAppend(&unique_func_name, "_");
}
Value func_input_arg;
func::FuncOp func_op = PrepareFunctionRegister(
rewriter, input_val, result_type, unique_func_name, func_input_arg);
Operation* last_op_in_func =
quantization_operations_func(rewriter, func_op.getOperation(),
func_input_arg, result_type, quant_type);
auto end_call_op = FinalizeFunctionRegister(
rewriter, input_val, last_op_in_func->getResult(0), func_op, input_op,
unique_func_name, original_point, result_type);
return end_call_op;
}
QuantizedType CalculateUniformQuantParams(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
const bool kIsNarrowRange = true;
const bool kIsSigned = true;
const int kBitWidth = 8;
DenseFPElementsAttr attr;
if (!matchPattern(op->getResult(0), m_Constant(&attr))) return nullptr;
QuantizedType quant_type = mlir::dyn_cast<quant::QuantizedType>(
quant::GetUniformQuantizedTypeForWeight(
attr, kIsNarrowRange && kIsSigned, kBitWidth, kIsSigned,
kIsNarrowRange, false));
return quant_type;
}
std::optional<Value> AddUniformQuantizeOps(PatternRewriter& rewriter,
TF::ConstOp op,
QuantizedType quant_type) {
DenseFPElementsAttr attr;
if (!matchPattern(op->getResult(0), m_Constant(&attr))) {
return nullptr;
}
Type expressed_type = op.getResult().getType();
Type quantized_type = quant_type.castFromExpressedType(expressed_type);
ShapedType shaped_quantized_type = mlir::cast<ShapedType>(quantized_type);
DenseElementsAttr tensor_proto_attr =
mlir::dyn_cast<DenseElementsAttr>(Quantize(attr, shaped_quantized_type));
if (!tensor_proto_attr) {
return nullptr;
}
Type storage_type =
mlir::cast<QuantizedType>(shaped_quantized_type.getElementType())
.getStorageType();
ShapedType new_type = shaped_quantized_type.clone(storage_type);
rewriter.setInsertionPointAfter(op);
auto const_op =
rewriter.create<TF::ConstOp>(op.getLoc(), new_type, tensor_proto_attr);
auto new_identity_op = rewriter.create<TF::IdentityOp>(
op->getLoc(), const_op.getType(), const_op);
return new_identity_op.getResult();
}
Operation* LogicsForUniformDequanization(PatternRewriter& rewriter,
Operation* func_op, Value input_val,
ShapedType original_input_tensor_type,
QuantizedType quant_type) {
auto loc = input_val.getLoc();
rewriter.setInsertionPointToStart(
&(cast<func::FuncOp>(func_op)).getBody().front());
UnrankedTensorType create_unknown_input_shape =
CreateUnknownShapeFromElementType(original_input_tensor_type);
auto new_cast_op =
rewriter.create<TF::CastOp>(loc, create_unknown_input_shape, input_val);
auto qtype = mlir::dyn_cast<UniformQuantizedType>(quant_type);
TensorType scale_type = RankedTensorType::get({}, rewriter.getF32Type());
Value scale_op = rewriter.create<TF::ConstOp>(
loc, scale_type,
DenseFPElementsAttr::get(scale_type,
{static_cast<float>(qtype.getScale())}));
if (original_input_tensor_type.getElementType().isBF16()) {
scale_op = rewriter.create<TF::CastOp>(
loc, UnrankedTensorType::get(rewriter.getBF16Type()), scale_op);
}
auto mul_op = rewriter.create<TF::MulOp>(loc, new_cast_op.getType(), scale_op,
new_cast_op);
return mul_op;
}
std::optional<TF::PartitionedCallOp> AddUniformDequantizeOps(
PatternRewriter& rewriter, QuantizedType quant_type,
Value val_to_dequantize, ShapedType result_type) {
auto func_name = absl::StrJoin(
{kDequantizeFunctionName, kUniformQuantizationFunctionName}, "_");
std::optional<TF::PartitionedCallOp> dequant_op = RegisterOperationsInFuncOp(
func_name, rewriter, quant_type, val_to_dequantize, result_type,
LogicsForUniformDequanization);
return dequant_op;
}
}
std::optional<TF::PartitionedCallOp> ApplyUniformQuantization(
PatternRewriter& rewriter, TF::ConstOp op,
tensorflow::quantization::QuantizationComponentSpec& weight_spec) {
QuantizedType quant_type =
CalculateUniformQuantParams(rewriter, op, weight_spec);
if (!quant_type) return nullptr;
std::optional<Value> quantized_val =
AddUniformQuantizeOps(rewriter, op, quant_type);
if (!quantized_val.has_value()) return std::nullopt;
std::optional<TF::PartitionedCallOp> dequantized_val =
AddUniformDequantizeOps(rewriter, quant_type, quantized_val.value(),
mlir::cast<ShapedType>(op.getType()));
return dequantized_val;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.h"
#include <optional>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir::quant {
namespace {
using QuantizationComponentSpec =
tensorflow::quantization::QuantizationComponentSpec;
class EmptyPatternRewriter : public mlir::PatternRewriter {
public:
explicit EmptyPatternRewriter(const OpBuilder& other_builder)
: mlir::PatternRewriter(other_builder) {}
~EmptyPatternRewriter() override = default;
};
TEST(TfQuantOpTest, applyUniformQuantization) {
MLIRContext context;
OwningOpRef<ModuleOp> module(ModuleOp::create(UnknownLoc::get(&context)));
OpBuilder builder(&module->getBodyRegion());
context.loadDialect<TF::TensorFlowDialect, quant::QuantDialect,
func::FuncDialect>();
EmptyPatternRewriter pattern_rewriter(builder);
Value value = CreateConstValue<float>(builder, module->getLoc(), {1024, 2},
SmallVector<float>(2048, 0));
QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
std::optional<TF::PartitionedCallOp> dequantize_op = ApplyUniformQuantization(
pattern_rewriter, cast<TF::ConstOp>(value.getDefiningOp()), quant_spec);
EXPECT_TRUE(dequantize_op.has_value());
EXPECT_EQ(dequantize_op.value().func().getName().str(),
"composite_dequantize_uniform");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_quantize_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
03955f20-a5af-4082-9ddf-ddb88325bfea | cpp | tensorflow/tensorflow | tf_op_quant_spec | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc | tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
namespace mlir {
namespace quant {
bool IsOpWithDataMovementTrait(Operation* op) {
return isa<TF::IdentityOp, TF::CastOp, TF::ReshapeOp, TF::XlaShardingOp,
TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp, TF::ExpandDimsOp,
TF::SqueezeOp, TF::TransposeOp>(op);
}
bool IsOpWithQuantizableTrait(Operation* op) {
return isa<TF::XlaConvV2Op, TF::XlaDotV2Op, TF::MatMulOp, TF::Conv2DOp,
TF::GatherOp, TF::GatherV2Op, TF::XlaGatherOp,
TF::ResourceGatherOp, TF::DepthwiseConv2dNativeOp, TF::Conv3DOp,
TF::BatchMatMulV2Op, TF::EinsumOp>(op);
}
bool IsOpWithInt8TypeOperand(Operation* op) {
return (isa<TF::XlaConvV2Op, TF::XlaDotV2Op, TF::XlaGatherOp, TF::GatherOp,
TF::GatherV2Op>(op));
}
bool IsValueWithQuantizablePrecision(Value val) {
auto type = mlir::dyn_cast<ShapedType>(val.getType());
if (!type) return false;
if (type.getElementType().isF32() || type.getElementType().isBF16())
return true;
return false;
}
std::optional<tensorflow::quantization::QuantizationComponentSpec>
GetWeightComponentSpec(
const tensorflow::quantization::QuantizationOptions& quantization_options) {
for (auto& cur_spec : quantization_options.quantization_method()
.quantization_component_specs()) {
if (cur_spec.quantization_component() ==
tensorflow::quantization::QuantizationComponentSpec::COMPONENT_WEIGHT)
return cur_spec;
}
return std::nullopt;
}
std::unique_ptr<OpQuantSpec> GetTFOpQuantSpec(Operation* op) {
auto spec = std::make_unique<OpQuantSpec>();
if (auto call_op = dyn_cast<TF::PartitionedCallOp>(op)) {
StringRef function_name =
mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue();
if (!function_name.starts_with("composite_")) {
return spec;
}
if (function_name.contains("depthwise_conv2d")) {
spec->coeff_op_quant_dim[1] = 3;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("conv2d")) {
spec->coeff_op_quant_dim[1] = 3;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("matmul")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias") ||
function_name.contains("and_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("einsum")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("conv3d")) {
spec->coeff_op_quant_dim[1] = 4;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("batch_matmul")) {
spec->coeff_op_quant_dim[1] = -1;
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("gather")) {
spec->coeff_op_quant_dim[0] = -1;
}
for (auto quantizable_operand : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(quantizable_operand.first);
}
}
return spec;
}
std::unique_ptr<OpQuantScaleSpec> GetTfQuantScaleSpec(Operation* op) {
auto scale_spec = std::make_unique<OpQuantScaleSpec>();
if (llvm::isa<
TF::AvgPoolOp,
TF::ConcatOp,
TF::ConcatV2Op,
TF::ExpandDimsOp,
TF::IdentityNOp,
TF::IdentityOp,
TF::MaxPoolOp,
TF::PadV2Op,
TF::RankOp,
TF::ReshapeOp,
TF::SelectOp,
TF::SelectV2Op,
TF::ShapeNOp,
TF::ShapeOp,
TF::SizeOp,
TF::SqueezeOp,
TF::TransposeOp
>(op)) {
scale_spec->has_same_scale_requirement = true;
}
return scale_spec;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
namespace mlir::quant {
namespace {
using QuantizationOptions = tensorflow::quantization::QuantizationOptions;
using QuantizationComponentSpec =
tensorflow::quantization::QuantizationComponentSpec;
TEST(TfOpQuantSpecTest, WeightComponentSpecExist) {
QuantizationOptions quant_options;
QuantizationComponentSpec quant_spec;
quant_spec.set_quantization_component(
QuantizationComponentSpec::COMPONENT_WEIGHT);
quant_spec.set_tensor_type(QuantizationComponentSpec::TENSORTYPE_INT_8);
auto mutable_quant_method = quant_options.mutable_quantization_method();
*mutable_quant_method->add_quantization_component_specs() = quant_spec;
auto output = GetWeightComponentSpec(quant_options);
EXPECT_TRUE(output.has_value());
}
TEST(TfOpQuantSpecTest, WeightComponentSpecDoNotExist) {
QuantizationOptions quant_options;
auto output = GetWeightComponentSpec(quant_options);
EXPECT_FALSE(output.has_value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
95025948-2ae7-4372-8da2-9564390b0027 | cpp | tensorflow/tensorflow | save_variables | tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/ir/importexport/convert_tensor.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::mlir::func::FuncOp;
using ::mlir::tf_saved_model::GetInitializerFunction;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
absl::StatusOr<std::string> AddTensorToBundleWriter(
mlir::TF::AssignVariableOp assign_var_op, BundleWriter& bundle_writer) {
auto resource_operand = assign_var_op.getOperand(0);
auto var_handle_op =
llvm::dyn_cast<mlir::TF::VarHandleOp>(resource_operand.getDefiningOp());
if (!var_handle_op) {
assign_var_op->emitRemark(
"Operand idx 0 is not a tf.VarHandleOp. The initializing tensor is not "
"saved to checkpoint.");
return "";
}
auto assigned_value_operand = assign_var_op.getOperand(1);
auto const_op =
llvm::dyn_cast<mlir::TF::ConstOp>(assigned_value_operand.getDefiningOp());
if (!const_op) {
assign_var_op->emitRemark(
"Operand idx 1 is not a tf.ConstOp. The initializing tensor is not "
"saved to checkpoint.");
return "";
}
Tensor const_tensor{};
if (const absl::Status status = mlir::tfg::ConvertToTensor(
const_op.getValue(), &const_tensor);
!status.ok()) {
return status;
}
if (!bundle_writer.Add(var_handle_op.getSharedName(), const_tensor)
.ok()) {
return bundle_writer.status();
}
return var_handle_op.getSharedName().str();
}
}
absl::StatusOr<std::vector<std::string>> SaveVariablesToCheckpoint(
const absl::string_view prefix, mlir::ModuleOp module_op) {
FuncOp session_init_func_type_restore_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerRestoreType);
if (!session_init_func_type_restore_op) {
LOG(INFO) << "No session initializer function with type 'restore_op'. No "
"variables are saved to checkpoint.";
return std::vector<std::string>{};
}
BundleWriter bundle_writer(Env::Default(), prefix);
if (!bundle_writer.status().ok()) {
return bundle_writer.status();
}
std::vector<std::string> saved_variable_shared_names;
for (auto assign_variable_op :
session_init_func_type_restore_op.getOps<mlir::TF::AssignVariableOp>()) {
if (const absl::StatusOr<std::string> variable_shared_name =
AddTensorToBundleWriter(assign_variable_op, bundle_writer);
!variable_shared_name.ok()) {
return variable_shared_name.status();
} else if (!variable_shared_name->empty()) {
saved_variable_shared_names.emplace_back(
std::move(*variable_shared_name));
VLOG(1) << "Saved a variable with shared_name: " << *variable_shared_name;
}
}
if (saved_variable_shared_names.empty()) {
LOG(INFO) << "No variables are saved to checkpoint";
return saved_variable_shared_names;
}
if (!bundle_writer.Finish().ok()) {
return bundle_writer.status();
}
return saved_variable_shared_names;
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.h"
#include <string>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/tensor_bundle/tensor_bundle.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace quantization {
namespace {
using ::tensorflow::test::AsTensor;
using ::tensorflow::test::ExpectEqual;
using ::testing::IsEmpty;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
using ::tsl::testing::IsOk;
class SaveVariablesToCheckpointTest : public ::testing::Test {
protected:
SaveVariablesToCheckpointTest() : env_(Env::Default()) {
ctx_.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect,
mlir::tf_saved_model::TensorFlowSavedModelDialect>();
}
absl::StatusOr<std::string> MakeTempDir() {
std::string tmp_dir{};
if (!env_->LocalTempFilename(&tmp_dir)) {
return absl::InternalError("Failed to create temp file.");
}
TF_CHECK_OK(env_->CreateDir(tmp_dir));
return tmp_dir;
}
mlir::OwningOpRef<mlir::ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref =
mlir::parseSourceString<mlir::ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
Env* env_{};
mlir::MLIRContext ctx_{};
};
TEST_F(SaveVariablesToCheckpointTest, VariableSavedToCheckpoint) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, UnorderedElementsAre("var_0"));
BundleReader bundle_reader(env_, *checkpoint_prefix);
Tensor loaded_tensor{};
EXPECT_TRUE(bundle_reader.Lookup("var_0", &loaded_tensor).ok());
ExpectEqual(loaded_tensor, AsTensor<float>({1.0, 2.0}));
}
TEST_F(SaveVariablesToCheckpointTest, MultipleVariablesSavedToCheckpoint) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
%cst_0 = "tf.Const"() {device = "", value = dense<[3, 4, 5, 6]> : tensor<4xi32>} : () -> tensor<4xi32>
%1 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_1"} : () -> tensor<!tf_type.resource<tensor<4xi32>>>
"tf.AssignVariableOp"(%1, %cst_0) : (tensor<!tf_type.resource<tensor<4xi32>>>, tensor<4xi32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, UnorderedElementsAre("var_0", "var_1"));
BundleReader bundle_reader(env_, *checkpoint_prefix);
Tensor loaded_var_0{};
EXPECT_TRUE(bundle_reader.Lookup("var_0", &loaded_var_0).ok());
ExpectEqual(loaded_var_0, AsTensor<float>({1.0, 2.0}));
Tensor loaded_var_1{};
EXPECT_TRUE(bundle_reader.Lookup("var_1", &loaded_var_1).ok());
ExpectEqual(loaded_var_1, AsTensor<int>({3, 4, 5, 6}));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoInitializerFunction) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoSessionInitializerOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @my_func() -> () {
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
EXPECT_TRUE(
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref).ok());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
NoVariablesSavedWhenNoSessionInitializerOpTypeRestoreOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_init_op]} : () -> ()
func.func @init_func_init_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "init_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest, MutableVariablesNotSaved) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%add = "tf.AddV2"(%cst, %cst) : (tensor<2xf32>, tensor<2xf32>) -> tensor<2xf32>
%var_handle = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%var_handle, %add) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest,
VariableNotSavedWhenNonVarHandleOpOperandForAssignVariableOp) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["init"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%var_handle = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
%var_handle_cast = "tf.Cast"(%var_handle) : (tensor<!tf_type.resource<tensor<2xf32>>>) -> tensor<!tf_type.resource>
"tf.AssignVariableOp"(%var_handle_cast, %cst) : (tensor<!tf_type.resource>, tensor<2xf32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
const absl::StatusOr<std::vector<std::string>> variable_shared_names =
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref);
EXPECT_TRUE(variable_shared_names.ok());
EXPECT_THAT(*variable_shared_names, IsEmpty());
BundleReader bundle_reader(env_, *checkpoint_prefix);
EXPECT_THAT(bundle_reader.status(), Not(IsOk()));
}
TEST_F(SaveVariablesToCheckpointTest, FailsWhenDuplicateSharedName) {
constexpr absl::string_view kModuleCode = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func_restore_op]} : () -> ()
func.func @init_func_restore_op() -> () attributes {tf_saved_model.exported_names = ["restore"], tf_saved_model.initializer_type = "restore_op"} {
%cst = "tf.Const"() {device = "", value = dense<[1.0, 2.0]> : tensor<2xf32>} : () -> tensor<2xf32>
%0 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<2xf32>>>
"tf.AssignVariableOp"(%0, %cst) : (tensor<!tf_type.resource<tensor<2xf32>>>, tensor<2xf32>) -> ()
%cst_0 = "tf.Const"() {device = "", value = dense<[3, 4, 5, 6]> : tensor<4xi32>} : () -> tensor<4xi32>
%1 = "tf.VarHandleOp"() {container = "", device = "/device:CPU:0", shared_name = "var_0"} : () -> tensor<!tf_type.resource<tensor<4xi32>>>
"tf.AssignVariableOp"(%1, %cst_0) : (tensor<!tf_type.resource<tensor<4xi32>>>, tensor<4xi32>) -> ()
return
}
}
)mlir";
mlir::OwningOpRef<mlir::ModuleOp> module_op_ref =
ParseModuleOpString(kModuleCode);
const absl::StatusOr<std::string> checkpoint_prefix = MakeTempDir();
EXPECT_TRUE(checkpoint_prefix.ok());
const absl::Cleanup checkpoint_prefix_cleanup = [this, &checkpoint_prefix]() {
int64_t undeleted_files, undeleted_dirs;
TF_CHECK_OK(env_->DeleteRecursively(*checkpoint_prefix, &undeleted_files,
&undeleted_dirs));
};
EXPECT_FALSE(
SaveVariablesToCheckpoint(*checkpoint_prefix, *module_op_ref).ok());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/save_variables_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f0e563bc-2bb4-482f-856e-379601a30544 | cpp | tensorflow/tensorflow | const_op_size | tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include <climits>
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/Types.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace quant {
namespace {
constexpr int64_t kAssumedNumBytesPerElem = 4;
int64_t GetSizeOfIntOrFloatConst(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
const ElementsAttr const_value = const_op.getValue();
const auto bytes_per_elem =
static_cast<int64_t>(dtype.getIntOrFloatBitWidth() / CHAR_BIT);
return bytes_per_elem * const_value.getNumElements();
}
int64_t GetSizeOfStringConst(TF::ConstOp const_op) {
const ElementsAttr const_value = const_op.getValue();
const auto str_attr = cast<DenseStringElementsAttr>(const_value);
return absl::c_accumulate(
str_attr.getRawStringData(), 0,
[](int64_t acc, const StringRef str_value) -> int64_t {
return acc + str_value.size();
});
}
int64_t GetSizeOfUnsupportedTypeConst(TF::ConstOp const_op) {
return kAssumedNumBytesPerElem * const_op.getValue().getNumElements();
}
}
int64_t GetSizeInBytes(TF::ConstOp const_op) {
const Type dtype = const_op.getDtype();
if (dtype.isIntOrFloat()) {
return GetSizeOfIntOrFloatConst(const_op);
} else if (isa<TF::StringType>(dtype)) {
return GetSizeOfStringConst(const_op);
} else {
return GetSizeOfUnsupportedTypeConst(const_op);
}
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.h"
#include "absl/strings/string_view.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::Eq;
class GetSizeInBytesTest : public ::testing::Test {
protected:
GetSizeInBytesTest() : ctx_() { ctx_.loadDialect<TF::TensorFlowDialect>(); }
MLIRContext ctx_;
};
TF::ConstOp ParseConstOp(const absl::string_view const_op_str, Block& block,
MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(const_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
auto const_op = dyn_cast_or_null<TF::ConstOp>(block.front());
EXPECT_TRUE(const_op);
return const_op;
}
TEST_F(GetSizeInBytesTest, Int32ScalarConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
TEST_F(GetSizeInBytesTest, Int32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<1> : tensor<2xi32>} : () -> tensor<2xi32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(8));
}
TEST_F(GetSizeInBytesTest, Int8ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<2> : tensor<3xi8>} : () -> tensor<3xi8>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(3));
}
TEST_F(GetSizeInBytesTest, Float32ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<4xf32>} : () -> tensor<4xf32>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Float64ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr =
R"mlir(%cst = "tf.Const"() {value = dense<3.0> : tensor<2xf64>} : () -> tensor<2xf64>)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(16));
}
TEST_F(GetSizeInBytesTest, Bfloat16ConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<1.0> : tensor<7xbf16>} : () -> tensor<7xbf16>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(14));
}
TEST_F(GetSizeInBytesTest, TfStringConstOpSizeInBytes) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = dense<["Hello World", "Quantization"]> : tensor<2x!tf_type.string>} : () -> tensor<2x!tf_type.string>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(23));
}
TEST_F(GetSizeInBytesTest, ConstOpWithUnknownSizeAssumes4BytesPerElement) {
constexpr absl::string_view kConstOpExpr = R"mlir(
%cst = "tf.Const"() {value = #tf_type<tensor_proto : "0xDEADBAAD"> : tensor<!tf_type.variant>} : () -> tensor<!tf_type.variant>
)mlir";
Block block{};
TF::ConstOp int_tensor_const_op = ParseConstOp(kConstOpExpr, block, ctx_);
const int64_t num_bytes = GetSizeInBytes(int_tensor_const_op);
EXPECT_THAT(num_bytes, Eq(4));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/const_op_size_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5b1e8462-a99b-4326-a185-6585971938a1 | cpp | tensorflow/tensorflow | constant_fold | tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc | #include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.h"
#include <algorithm>
#include <cstdint>
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/constant_fold_utils.h"
namespace mlir {
namespace TF {
static bool IsFoldedByDefaultPolicy(Operation* inst) {
auto get_size = [&](TypeRange types) {
int64_t size = 0;
for (auto t : types) {
auto tensor_type = mlir::cast<TensorType>(t);
if (!tensor_type.getElementType().isIntOrFloat()) continue;
if (!tensor_type.hasStaticShape()) continue;
size += tensor_type.getNumElements() *
tensor_type.getElementType().getIntOrFloatBitWidth();
}
return size;
};
int64_t results_size = get_size(inst->getResultTypes());
int64_t operands_size = get_size(inst->getOperandTypes());
constexpr int kSizeFactor = 2;
constexpr int64_t kResultsSizeThreshold = (1 << 16);
constexpr int64_t kOperandsSizeThreshold = (1 << 30);
return (operands_size <= kOperandsSizeThreshold) &&
((results_size <= kResultsSizeThreshold) ||
(results_size <= kSizeFactor * operands_size));
}
LogicalResult ConstantFoldFallbackHook(
Operation* inst, ArrayRef<Attribute> operands,
SmallVectorImpl<OpFoldResult>& results) {
if (!CanBeFolded(inst)) return failure();
if (!IsFoldedByDefaultPolicy(inst)) return failure();
bool has_empty_numerical_results =
llvm::all_of(inst->getResultTypes(), [](Type ty) {
ShapedType shaped_ty = mlir::cast<ShapedType>(ty);
Type element_ty = shaped_ty.getElementType();
return shaped_ty.hasStaticShape() && shaped_ty.getNumElements() == 0 &&
element_ty.isIntOrFloat();
});
if (has_empty_numerical_results &&
inst->isRegistered()) {
for (Type ty : inst->getResultTypes()) {
auto shaped_ty = mlir::cast<ShapedType>(ty);
results.push_back(
DenseElementsAttr::get(shaped_ty, llvm::ArrayRef<Attribute>()));
}
return success();
}
if (std::any_of(operands.begin(), operands.end(), [](Attribute attr) {
return !attr || !mlir::isa<ElementsAttr>(attr);
}))
return failure();
SmallVector<ElementsAttr, 4> inputs;
inputs.reserve(operands.size());
for (auto input : operands) {
inputs.push_back(mlir::cast<ElementsAttr>(input));
}
SmallVector<Attribute> constants;
LogicalResult status = EvaluateOperation(inst, inputs, constants);
results.assign(constants.begin(), constants.end());
return status;
}
static bool init_hooks = ([] () {
TensorFlowDialect::RegisterConstantFoldHook(ConstantFoldFallbackHook);
}(), true);
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold.h"
#include <utility>
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace quant {
namespace {
using ::testing::NotNull;
using ::testing::SizeIs;
using ConstantFoldingTest = ::mlir::quant::QuantizationTestBase;
TEST_F(ConstantFoldingTest, FoldLargeConstant) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant() -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%mul = "tf.Mul"(%cast, %scale) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* mul_op = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(mul_op);
EXPECT_THAT(results, SizeIs(1));
EXPECT_TRUE(isa<TF::ConstOp>(results[0].getDefiningOp()));
}
TEST_F(ConstantFoldingTest, NotFoldingIdentity) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant() -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%scale = "tf.Const"() {value = dense<2.0> : tensor<f32>} : () -> tensor<f32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%identity = "tf.Identity"(%scale) : (tensor<f32>) -> tensor<f32>
%mul = "tf.Mul"(%cast, %identity) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
EXPECT_THAT(results, SizeIs(1));
auto mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
EXPECT_THAT(mul_op, NotNull());
EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, NotFoldingArgument) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<f32>) -> (tensor<1024x24x24x3xf32>) {
%zp = "tf.Const"() {value = dense<2> : tensor<i32>} : () -> tensor<i32>
%weight = "tf.Const"() {value = dense<1> : tensor<1024x24x24x3xi8>} : () -> tensor<1024x24x24x3xi8>
%input_i32 = "tf.Cast"(%weight) : (tensor<1024x24x24x3xi8>) -> tensor<1024x24x24x3xi32>
%output = "tf.Sub"(%input_i32, %zp) : (tensor<1024x24x24x3xi32>, tensor<i32>) -> tensor<1024x24x24x3xi32>
%cast = "tf.Cast"(%output) : (tensor<1024x24x24x3xi32>) -> tensor<1024x24x24x3xf32>
%mul = "tf.Mul"(%cast, %arg0) : (tensor<1024x24x24x3xf32>, tensor<f32>) -> tensor<1024x24x24x3xf32>
func.return %mul : tensor<1024x24x24x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
Operation* op_to_fold = FindOperationOfType<TF::MulOp>(test_func);
SmallVector<Value> results = ConstantFoldOpIfPossible(op_to_fold);
EXPECT_THAT(results, SizeIs(1));
TF::MulOp mul_op = dyn_cast_or_null<TF::MulOp>(results[0].getDefiningOp());
EXPECT_THAT(mul_op, NotNull());
EXPECT_TRUE(isa<TF::CastOp>(mul_op.getX().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, FoldDepthwiseConvWeight) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<*xf32>) -> (tensor<?x?x?x3xf32>) {
%cst = "tf.Const"() {value = dense<2.000000e+00> : tensor<2x3x3x1xf32>} : () -> tensor<2x3x3x1xf32>
%cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_2 = "tf.Const"() {value = dense<3.0> : tensor<f32>} : () -> tensor<f32>
%w = "tf.Mul"(%cst, %cst_2) : (tensor<2x3x3x1xf32>, tensor<f32>) -> tensor<2x3x3x1xf32>
%0 = "tf.DepthwiseConv2dNative"(%arg0, %w) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
%1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
%2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
func.return %2 : tensor<?x?x?x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
RewritePatternSet patterns(ctx_.get());
patterns.add<ConstantFoldQuantizableOperands>(ctx_.get());
EXPECT_TRUE(
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns))));
auto depthwise_conv_op =
FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func);
EXPECT_THAT(depthwise_conv_op, NotNull());
EXPECT_TRUE(isa<TF::ConstOp>(depthwise_conv_op.getFilter().getDefiningOp()));
}
TEST_F(ConstantFoldingTest, DepthwiseConvWeightNotFoldable) {
constexpr absl::string_view kModuleCode = R"mlir(
module {
func.func @test_fold_constant(%arg0: tensor<*xf32>, %arg1: tensor<f32>) -> (tensor<?x?x?x3xf32>) {
%cst = "tf.Const"() {value = dense<2.000000e+00> : tensor<2x3x3x1xf32>} : () -> tensor<2x3x3x1xf32>
%cst_0 = "tf.Const"() {value = dense<0.400000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%cst_1 = "tf.Const"() {value = dense<0.500000e+00> : tensor<3xf32>} : () -> tensor<3xf32>
%w = "tf.Mul"(%cst, %arg1) : (tensor<2x3x3x1xf32>, tensor<f32>) -> tensor<2x3x3x1xf32>
%0 = "tf.DepthwiseConv2dNative"(%arg0, %w) {data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 1, 1]} : (tensor<*xf32>, tensor<2x3x3x1xf32>) -> tensor<?x?x?x3xf32>
%1 = "tf.BiasAdd"(%0, %cst_0) {data_format = "NHWC"} : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
%2 = "tf.Mul"(%1, %cst_1) : (tensor<?x?x?x3xf32>, tensor<3xf32>) -> tensor<?x?x?x3xf32>
func.return %2 : tensor<?x?x?x3xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op_ref = ParseModuleOpString(kModuleCode);
const auto test_func =
module_op_ref->lookupSymbol<func::FuncOp>("test_fold_constant");
ASSERT_THAT(test_func, NotNull());
RewritePatternSet patterns(ctx_.get());
patterns.add<ConstantFoldQuantizableOperands>(ctx_.get());
EXPECT_TRUE(
succeeded(applyPatternsAndFoldGreedily(test_func, std::move(patterns))));
auto depthwise_conv_op =
FindOperationOfType<TF::DepthwiseConv2dNativeOp>(test_func);
EXPECT_THAT(depthwise_conv_op, NotNull());
EXPECT_TRUE(isa<TF::MulOp>(depthwise_conv_op.getFilter().getDefiningOp()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/constant_fold.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/constant_fold_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ad4656c8-136d-4a47-88e4-36e8e523ceea | cpp | tensorflow/tensorflow | convert_asset_args | tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.cc | tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include "absl/algorithm/container.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::mlir::tf_saved_model::AssetOp;
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::LookupBoundInputOfType;
using ::tensorflow::AssetFileDef;
SmallVector<NamedAttribute> ReplaceBoundInputAttrWithIndexPathAttr(
const ArrayRef<NamedAttribute> arg_attrs, const StringRef index_path,
Builder& builder) {
SmallVector<NamedAttribute> new_arg_attrs;
for (auto arg_attr : arg_attrs) {
if (arg_attr.getName() == "tf_saved_model.bound_input") continue;
new_arg_attrs.emplace_back(arg_attr);
}
const NamedAttribute index_path_attr(
builder.getStringAttr(kTfSavedModelIndexPathAttr),
builder.getStrArrayAttr({index_path}));
new_arg_attrs.emplace_back(index_path_attr);
return new_arg_attrs;
}
StringRef MaybeStripAssetDirectoryPrefix(const StringRef filename) {
if (filename.find("assets/") == 0) {
return filename.drop_front(7);
} else {
return filename;
}
}
AssetFileDef CreateAssetFileDef(const StringRef filename,
const StringRef tensor_name) {
AssetFileDef asset_file_def{};
asset_file_def.set_filename(MaybeStripAssetDirectoryPrefix(filename).str());
tensorflow::TensorInfo tensor_info{};
tensor_info.set_name(tensor_name.str());
*asset_file_def.mutable_tensor_info() = tensor_info;
return asset_file_def;
}
SmallVector<StringRef> GetEntryFunctionInputs(func::FuncOp func_op) {
auto entry_function_attr =
func_op->getAttrOfType<DictionaryAttr>("tf.entry_function");
SmallVector<StringRef> inputs;
mlir::dyn_cast_or_null<StringAttr>(entry_function_attr.get("inputs"))
.strref()
.split(inputs, ",");
return inputs;
}
void ConvertMainArgAttrs(func::FuncOp main_func_op, const int arg_idx,
const StringRef index_path) {
const ArrayRef<NamedAttribute> arg_attrs =
main_func_op.getArgAttrDict(arg_idx).getValue();
Builder builder(main_func_op.getContext());
SmallVector<NamedAttribute> new_arg_attrs =
ReplaceBoundInputAttrWithIndexPathAttr(arg_attrs, index_path, builder);
main_func_op.setArgAttrs(arg_idx, new_arg_attrs);
}
}
FailureOr<SmallVector<AssetFileDef>> ConvertAssetArgs(ModuleOp module_op) {
func::FuncOp main_func_op = FindMainFuncOp(module_op);
if (!main_func_op) return failure();
SmallVector<StringRef> input_names = GetEntryFunctionInputs(main_func_op);
SymbolTable symbol_table(module_op);
SmallVector<AssetFileDef> asset_file_defs;
for (BlockArgument argument : main_func_op.getArguments()) {
const int arg_idx = argument.getArgNumber();
auto asset_op =
LookupBoundInputOfType<AssetOp>(main_func_op, arg_idx, symbol_table);
if (!asset_op) continue;
const StringRef input_name = input_names[arg_idx];
ConvertMainArgAttrs(main_func_op, arg_idx, input_name);
asset_file_defs.emplace_back(CreateAssetFileDef(
asset_op.getFilenameAttr(), input_name));
}
return asset_file_defs;
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.h"
#include <gmock/gmock.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace mlir::quant {
namespace {
using ::tensorflow::AssetFileDef;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
class ConvertAssetArgsTest : public ::testing::Test {
protected:
ConvertAssetArgsTest() {
ctx_.loadDialect<func::FuncDialect, TF::TensorFlowDialect,
tf_saved_model::TensorFlowSavedModelDialect>();
}
OwningOpRef<ModuleOp> ParseModuleOpString(
const absl::string_view module_op_str) {
auto module_op_ref = parseSourceString<ModuleOp>(module_op_str, &ctx_);
EXPECT_TRUE(module_op_ref);
return module_op_ref;
}
mlir::MLIRContext ctx_{};
};
func::FuncOp GetMainFuncOp(ModuleOp module_op) {
for (auto func_op : module_op.getOps<func::FuncOp>()) {
if (func_op.getSymName() == "main") {
return func_op;
}
}
return {};
}
TEST_F(ConvertAssetArgsTest, ConvertsSingleAssetArg) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.asset"() {filename = "assets/file_0.txt", sym_name = "__tf_saved_model_asset0"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.bound_input = @__tf_saved_model_asset0}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, SizeIs(1));
const AssetFileDef& asset_file_def = *asset_file_defs->begin();
EXPECT_THAT(asset_file_def.filename(), Eq("file_0.txt"));
EXPECT_THAT(asset_file_def.tensor_info().name(), Eq("arg_0:0"));
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, NonBoundedArgsNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
func.func @main(%arg_0: tensor<!tf_type.string> {tf_saved_model.index_path = ["arg_0:0"]}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), IsNull());
const ArrayRef<Attribute> index_path_attrs =
mlir::cast<ArrayAttr>(arg_attrs.get("tf_saved_model.index_path"))
.getValue();
EXPECT_THAT(index_path_attrs, SizeIs(1));
StringAttr index_path =
mlir::dyn_cast_or_null<StringAttr>(index_path_attrs[0]);
EXPECT_THAT(index_path, NotNull());
EXPECT_THAT(index_path, Eq("arg_0:0"));
}
TEST_F(ConvertAssetArgsTest, ArgsBoundedToGlobalTensorNotModified) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(
module {
"tf_saved_model.global_tensor"() {type = tensor<2xi32>, value = dense<2> : tensor<2xi32>, sym_name = "__tf_saved_model_x"} : () -> ()
func.func @main(%arg_0: tensor<!tf_type.resource<tensor<2xi32>>> {tf_saved_model.bound_input = @__tf_saved_model_x}) -> () attributes {tf.entry_function = {inputs = "arg_0:0", outputs = ""}} {
return
}
}
)mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(succeeded(asset_file_defs));
EXPECT_THAT(*asset_file_defs, IsEmpty());
func::FuncOp main_func_op = GetMainFuncOp(*module_op);
DictionaryAttr arg_attrs = main_func_op.getArgAttrDict(0);
EXPECT_THAT(arg_attrs.get("tf_saved_model.bound_input"), NotNull());
}
TEST_F(ConvertAssetArgsTest, FailsWhenNoMain) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(R"mlir(module {})mlir");
FailureOr<SmallVector<AssetFileDef>> asset_file_defs =
ConvertAssetArgs(*module_op);
EXPECT_TRUE(failed(asset_file_defs));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d58050f3-3eca-4d91-8bb5-3561892698dc | cpp | tensorflow/tensorflow | calibration_statistics_saver_op | tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc | tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc | #include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/nullability.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_average_min_max.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_base.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_histogram.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_collector_min_max.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/platform/file_system.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::CalibrationOptions;
using CalibrationMethod =
::stablehlo::quantization::CalibrationOptions_CalibrationMethod;
using ::tensorflow::calibrator::CalibrationStatistics;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorAverageMinMax;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorBase;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorHistogram;
using ::tensorflow::calibrator::CalibrationStatisticsCollectorMinMax;
using ::tensorflow::calibrator::CalibrationStatisticsMap;
}
REGISTER_OP("CalibrationStatisticsSaver")
.Input("args: Tin")
.Attr("Tin: list(type) >= 0")
.Attr("ids: list(string) >= 1")
.Attr("calibration_methods: list(int) >= 1")
.Attr("output_file_path: string")
.SetIsStateful()
.Doc(R"doc(
Aggregates and saves the calibration statistics data.
This op collects outputs of multiples CustomAggregator ops, which includes
`min`, `max` and `histogram`. Then it aggregates them according to the
calibration method and save the result to the given file path as a binary
proto file.)doc");
class CalibrationStatisticsSaverOp : public OpKernel {
public:
explicit CalibrationStatisticsSaverOp(
absl::Nonnull<OpKernelConstruction*> context)
: OpKernel(context) {
std::string output_file_path;
OP_REQUIRES_OK(context,
context->GetAttr("output_file_path", &output_file_path));
OP_REQUIRES_OK(context, context->env()->NewWritableFile(output_file_path,
&output_file_));
OP_REQUIRES_OK(context, context->GetAttr("ids", &ids_));
OP_REQUIRES_OK(context, context->GetAttr("calibration_methods",
&calibration_methods_));
OP_REQUIRES(
context, ids_.size() == calibration_methods_.size(),
absl::AbortedError(
"The `ids` and `calibration_methods` must have the same size."));
OP_REQUIRES(context, context->num_inputs() == ids_.size() * 3,
absl::AbortedError("The number of inputs must be three times "
"the size of the `ids` list."));
for (int i = 0; i < ids_.size(); ++i) {
OP_REQUIRES(context, context->input_type(i * 3) == DT_FLOAT,
absl::AbortedError("The input `min` must have float type."));
OP_REQUIRES(context, context->input_type(i * 3 + 1) == DT_FLOAT,
absl::AbortedError("The input `max` must have float type."));
OP_REQUIRES(
context, context->input_type(i * 3 + 2) == DT_INT64,
absl::AbortedError("The input `histogram` must have int64 type."));
}
}
~CalibrationStatisticsSaverOp() override {
CalibrationStatisticsMap statistics_map;
for (const auto& [id, collector] : id_to_collector_) {
std::optional<CalibrationStatistics> statistics =
collector->GetStatistics();
if (!statistics.has_value()) continue;
statistics_map.mutable_statistics()->emplace(id, std::move(*statistics));
}
if (auto status = output_file_->Append(statistics_map.SerializeAsString());
!status.ok()) {
LOG(ERROR) << "Failed to write calibration statistics: "
<< status.message();
}
if (auto status = output_file_->Close(); !status.ok()) {
LOG(ERROR) << "Failed to close calibration statistics file: "
<< status.message();
}
}
void Compute(absl::Nonnull<OpKernelContext*> context) override {
for (int idx = 0; idx < ids_.size(); ++idx) {
AssignIfNotExists(
ids_[idx], static_cast<CalibrationMethod>(calibration_methods_[idx]));
const Tensor& min_tensor = context->input(3 * idx);
const Tensor& max_tensor = context->input(3 * idx + 1);
const Tensor& histogram_tensor = context->input(3 * idx + 2);
const float min_value = min_tensor.scalar<float>()();
const float max_value = max_tensor.scalar<float>()();
auto histogram_flat = histogram_tensor.flat<int64_t>();
absl::Span<const int64_t> histogram_data =
absl::MakeSpan(histogram_flat.data(), histogram_flat.size());
id_to_collector_[ids_[idx]]->Collect(min_value, max_value,
histogram_data);
}
}
private:
std::unique_ptr<tsl::WritableFile> output_file_;
std::vector<std::string> ids_;
std::vector<int32_t> calibration_methods_;
absl::flat_hash_map<std::string,
std::unique_ptr<CalibrationStatisticsCollectorBase>>
id_to_collector_;
void AssignIfNotExists(absl::string_view id,
const CalibrationMethod calibration_method) {
std::unique_ptr<CalibrationStatisticsCollectorBase>& collector =
id_to_collector_[id];
if (collector != nullptr) return;
switch (calibration_method) {
case CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX:
collector =
std::make_unique<CalibrationStatisticsCollectorAverageMinMax>();
break;
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_PERCENTILE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC:
case CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY:
collector = std::make_unique<CalibrationStatisticsCollectorHistogram>();
break;
case CalibrationOptions::CALIBRATION_METHOD_MIN_MAX:
default:
collector = std::make_unique<CalibrationStatisticsCollectorMinMax>();
}
}
};
REGISTER_KERNEL_BUILDER(Name("CalibrationStatisticsSaver").Device(DEVICE_CPU),
CalibrationStatisticsSaverOp);
} | #include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/status_matchers.h"
namespace tensorflow {
namespace {
using ::stablehlo::quantization::CalibrationOptions;
using ::tensorflow::calibrator::CalibrationStatistics;
using ::tensorflow::calibrator::CalibrationStatisticsMap;
using ::testing::Contains;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using ::testing::Key;
using ::testing::SizeIs;
using ::tsl::testing::StatusIs;
class CalibrationStatisticsSaverTest : public OpsTestBase {};
TEST_F(CalibrationStatisticsSaverTest, MissingOutputPath) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Finalize(node_def()));
ASSERT_THAT(InitOp(),
StatusIs(tsl::error::INVALID_ARGUMENT,
HasSubstr("NodeDef missing attr 'output_file_path'")));
}
TEST_F(CalibrationStatisticsSaverTest, WrongNumInputs) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", "/tmp/statistics.pbtxt")
.Finalize(node_def()));
ASSERT_THAT(InitOp(),
StatusIs(tsl::error::ABORTED,
HasSubstr("The number of inputs must be three times "
"the size of the `ids` list.")));
}
TEST_F(CalibrationStatisticsSaverTest, WrongInputTypes) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_FLOAT);
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", "/tmp/statistics.pbtxt")
.Finalize(node_def()));
ASSERT_THAT(
InitOp(),
StatusIs(tsl::error::ABORTED,
HasSubstr("The input `histogram` must have int64 type")));
}
TEST_F(CalibrationStatisticsSaverTest, SimpleMinMax) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({0}), {});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(1));
ASSERT_THAT(statistics_map.statistics(), ElementsAre(Key("1")));
const CalibrationStatistics& stats = statistics_map.statistics().at("1");
ASSERT_TRUE(stats.has_min_max_statistics());
EXPECT_FLOAT_EQ(stats.min_max_statistics().global_min(), 1.f);
EXPECT_FLOAT_EQ(stats.min_max_statistics().global_max(), 5.f);
}
TEST_F(CalibrationStatisticsSaverTest, SimpleAverageMinMax) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({0}), {});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(1));
ASSERT_THAT(statistics_map.statistics(), ElementsAre(Key("1")));
const CalibrationStatistics& stats = statistics_map.statistics().at("1");
ASSERT_TRUE(stats.has_average_min_max_statistics());
EXPECT_FLOAT_EQ(stats.average_min_max_statistics().min_sum(), 1.f);
EXPECT_FLOAT_EQ(stats.average_min_max_statistics().max_sum(), 5.f);
EXPECT_EQ(stats.average_min_max_statistics().num_samples(), 1);
}
TEST_F(CalibrationStatisticsSaverTest, SimpleHistogram) {
std::vector<std::string> ids{"1"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({8}), {1, 4, 6, 7, 3, 2, 1, 0});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(1));
ASSERT_THAT(statistics_map.statistics(), ElementsAre(Key("1")));
const CalibrationStatistics& stats = statistics_map.statistics().at("1");
ASSERT_TRUE(stats.has_histogram_statistics());
EXPECT_FLOAT_EQ(stats.histogram_statistics().bin_width(), 0.5f);
EXPECT_FLOAT_EQ(stats.histogram_statistics().lower_bound(), 1.f);
EXPECT_THAT(stats.histogram_statistics().hist_freq(),
ElementsAre(1, 4, 6, 7, 3, 2, 1));
}
TEST_F(CalibrationStatisticsSaverTest, MultipleStats) {
std::vector<std::string> ids{"1", "2"};
std::vector<int32_t> calibration_methods{
CalibrationOptions::CALIBRATION_METHOD_AVERAGE_MIN_MAX,
CalibrationOptions::CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE};
std::vector<NodeDefBuilder::NodeOut> inputs;
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
inputs.emplace_back("min", 0, DT_FLOAT);
inputs.emplace_back("max", 0, DT_FLOAT);
inputs.emplace_back("histogram", 0, DT_INT64);
const std::string dir = testing::TmpDir();
const std::string output_file_path = io::JoinPath(dir, "statistics.pbtxt");
TF_CHECK_OK(NodeDefBuilder("op", "CalibrationStatisticsSaver")
.Input(inputs)
.Attr("ids", ids)
.Attr("calibration_methods", calibration_methods)
.Attr("output_file_path", output_file_path)
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({0}), {});
AddInputFromArray<float>(TensorShape({}), {1.f});
AddInputFromArray<float>(TensorShape({}), {5.f});
AddInputFromArray<int64_t>(TensorShape({8}), {1, 4, 6, 7, 3, 2, 1, 0});
TF_CHECK_OK(RunOpKernel());
kernel_.reset();
CalibrationStatisticsMap statistics_map;
TF_CHECK_OK(
ReadBinaryProto(Env::Default(), output_file_path, &statistics_map));
ASSERT_THAT(statistics_map.statistics(), SizeIs(2));
ASSERT_THAT(statistics_map.statistics(), Contains(Key("1")));
ASSERT_THAT(statistics_map.statistics(), Contains(Key("2")));
const CalibrationStatistics& stats_1 = statistics_map.statistics().at("1");
ASSERT_TRUE(stats_1.has_average_min_max_statistics());
EXPECT_FLOAT_EQ(stats_1.average_min_max_statistics().min_sum(), 1.f);
EXPECT_FLOAT_EQ(stats_1.average_min_max_statistics().max_sum(), 5.f);
EXPECT_EQ(stats_1.average_min_max_statistics().num_samples(), 1);
const CalibrationStatistics& stats_2 = statistics_map.statistics().at("2");
ASSERT_TRUE(stats_2.has_histogram_statistics());
EXPECT_FLOAT_EQ(stats_2.histogram_statistics().bin_width(), 0.5f);
EXPECT_FLOAT_EQ(stats_2.histogram_statistics().lower_bound(), 1.f);
EXPECT_THAT(stats_2.histogram_statistics().hist_freq(),
ElementsAre(1, 4, 6, 7, 3, 2, 1));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_saver_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f7c5503c-3601-4d95-bf3c-810508313e4c | cpp | tensorflow/tensorflow | tfr_decompose_ctx | tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.cc | tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc | #include "tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.h"
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/SourceMgr.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/Extensions/AllExtensions.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_attr.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/mlir/tfr/ir/tfr_ops.h"
#include "tensorflow/compiler/mlir/tfr/passes/passes.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/util/env_var.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace tfr {
const char* const kTFRLibEnv = "TF_MLIR_TFR_LIB_DIR";
absl::StatusOr<std::unique_ptr<TFRDecomposeContext>> TFRDecomposeContext::Get(
mlir::MLIRContext* mlir_ctx) {
Env* env = Env::Default();
std::string tfr_lib_dir;
TF_RETURN_IF_ERROR(ReadStringFromEnvVar(
kTFRLibEnv, "tensorflow/compiler/mlir/tfr/resources", &tfr_lib_dir));
string composite_mlir_dir = io::JoinPath(env->GetRunfilesDir(), tfr_lib_dir);
std::vector<string> files;
TF_RETURN_IF_ERROR(env->GetChildren(composite_mlir_dir, &files));
if (files.empty()) {
return errors::Internal(absl::StrCat(
"Failed to find the decomposition lib from path ", composite_mlir_dir));
}
std::string tfr_raw_text;
for (const auto& file : files) {
string fullpath = io::JoinPath(composite_mlir_dir, file);
if (env->MatchPath(fullpath, io::JoinPath(composite_mlir_dir, "*.mlir"))) {
std::string text;
TF_RETURN_IF_ERROR(ReadFileToString(env, fullpath, &text));
tfr_raw_text.append(text);
}
}
auto ctx = TFRDecomposeContext::GetFromText(tfr_raw_text, mlir_ctx);
if (!ctx) {
return errors::Internal(absl::StrCat(
"Failed to load the imported decomposition lib: ", tfr_raw_text));
}
return ctx;
}
std::unique_ptr<TFRDecomposeContext> TFRDecomposeContext::GetFromText(
StringPiece tfr_raw_text, mlir::MLIRContext* mlir_ctx) {
mlir_ctx->allowUnregisteredDialects(true);
mlir::DialectRegistry registry;
registry.insert<mlir::arith::ArithDialect,
mlir::func::FuncDialect,
mlir::scf::SCFDialect,
mlir::shape::ShapeDialect,
mlir::TF::TensorFlowDialect,
mlir::tf_device::TensorFlowDeviceDialect,
mlir::tf_executor::TensorFlowExecutorDialect,
mlir::TFR::TFRDialect>();
mlir::func::registerAllExtensions(registry);
mlir_ctx->appendDialectRegistry(registry);
mlir_ctx->loadAllAvailableDialects();
auto memory_buffer = llvm::MemoryBuffer::getMemBuffer(
llvm::StringRef(tfr_raw_text.data(), tfr_raw_text.size()));
llvm::SourceMgr source_mgr;
source_mgr.AddNewSourceBuffer(std::move(memory_buffer), llvm::SMLoc());
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceFile<mlir::ModuleOp>(source_mgr, mlir_ctx);
auto module_op = module.release();
return std::make_unique<TFRDecomposeContext>(module_op);
}
absl::StatusOr<FunctionDef> TFRDecomposeContext::ExpandNode(
const NodeDef& node_def, StringPiece func_name) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef(node_def.op(), &op_def));
DataTypeVector input_dtys, output_dtys;
TF_RETURN_IF_ERROR(InputTypesForNode(node_def, *op_def, &input_dtys));
TF_RETURN_IF_ERROR(OutputTypesForNode(node_def, *op_def, &output_dtys));
mlir::MLIRContext* context = tfr_module_.getContext();
llvm::SmallVector<mlir::Type, 4> input_tys, output_tys;
mlir::Builder builder(context);
for (auto ty : input_dtys) {
mlir::Type elt_ty;
TF_RETURN_IF_ERROR(ConvertDataType(ty, builder, &elt_ty));
mlir::TensorType mlir_ty = mlir::UnrankedTensorType::get(elt_ty);
input_tys.push_back(mlir_ty);
}
for (auto ty : output_dtys) {
mlir::Type elt_ty;
TF_RETURN_IF_ERROR(ConvertDataType(ty, builder, &elt_ty));
mlir::TensorType mlir_ty = mlir::UnrankedTensorType::get(elt_ty);
output_tys.push_back(mlir_ty);
}
llvm::SmallVector<mlir::NamedAttribute, 4> attrs;
for (const auto& attr : node_def.attr()) {
TF_ASSIGN_OR_RETURN(auto mlir_attr,
ConvertAttributeValue(attr.second, &builder));
attrs.push_back({mlir::StringAttr::get(context, attr.first), mlir_attr});
}
mlir::Location loc = mlir::UnknownLoc::get(context);
mlir::ModuleOp module = mlir::ModuleOp::create(loc);
mlir::FunctionType func_type =
mlir::FunctionType::get(context, input_tys, output_tys);
llvm::StringRef func_name_str(func_name.data(), func_name.size());
auto func = mlir::func::FuncOp::create(loc, func_name_str, func_type, {});
module.push_back(func);
func.addEntryBlock();
mlir::OpBuilder op_builder(func.getBody());
const std::string tf_op_full_name = absl::StrCat("tf.", node_def.op());
mlir::OperationState op_state(loc, tf_op_full_name);
op_state.addOperands(func.getArguments());
op_state.addTypes(output_tys);
op_state.addAttributes(attrs);
mlir::Operation* tf_op = op_builder.create(op_state);
op_builder.create<mlir::func::ReturnOp>(loc, tf_op->getResults());
TF_RETURN_IF_ERROR(DecomposeGraph(module));
FunctionDef func_def;
TF_RETURN_IF_ERROR(
tensorflow::tf2xla::v2::ConvertMlirFunctionToFunctionLibraryDef(
func, export_confs_, &func_def));
module.erase();
return func_def;
}
Status TFRDecomposeContext::DecomposeGraph(mlir::ModuleOp user_module) {
if (failed(pm_.run(user_module))) {
return errors::Internal("Failed to run the decompose passes.");
}
return absl::OkStatus();
}
TFRDecomposeContext::TFRDecomposeContext(mlir::ModuleOp tfr_module)
: tfr_module_(tfr_module), pm_(tfr_module_.getContext()) {
mlir::OpPassManager& func_pm = pm_.nest<mlir::func::FuncOp>();
func_pm.addPass(mlir::CreateExecutorDialectToFunctionalConversionPass());
func_pm.addPass(mlir::TFR::CreateDecomposeTFOpsPass(tfr_module_));
func_pm.addPass(mlir::TFR::CreateRaiseToTFOpsPass(
tfr_module_, true));
func_pm.addPass(mlir::CreateFunctionalToExecutorDialectConversionPass());
pm_.addPass(mlir::CreateBreakUpIslandsPass());
}
void TFRDecomposeContext::Destroy() { tfr_module_.erase(); }
absl::StatusOr<FunctionDef> ExpandNode(const NodeDef& node_def,
StringPiece func_name) {
mlir::MLIRContext mlir_ctx;
TF_ASSIGN_OR_RETURN(auto ctx, TFRDecomposeContext::Get(&mlir_ctx));
return ctx->ExpandNode(node_def, func_name);
}
Status DecomposeGraph(mlir::ModuleOp user_module) {
mlir::MLIRContext* mlir_ctx = user_module.getContext();
TF_ASSIGN_OR_RETURN(auto ctx, TFRDecomposeContext::Get(mlir_ctx));
return ctx->DecomposeGraph(user_module);
}
}
} | #include "tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.h"
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/test.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/test.h"
using testing::ElementsAreArray;
using testing::Test;
using NodeAndType = std::pair<std::string, tensorflow::DataType>;
namespace tensorflow {
namespace {
REGISTER_OP("MyAddN")
.Input("inputs: N * T")
.Output("sum: T")
.Attr("N: int >= 1")
.Attr("T: {numbertype, variant}")
.SetIsCommutative()
.SetIsAggregate()
.SetShapeFn(shape_inference::UnchangedShape);
REGISTER_OP("RiscAddDummy")
.Input("x: T")
.Input("y: T")
.Output("z: T")
.Attr(
"T: {bfloat16, half, float, double, uint8, int8, int16, int32, int64, "
"complex64, complex128, string}")
.SetShapeFn(shape_inference::UnchangedShape);
constexpr char tfr_raw_text[] = R"(
tfr.func @tf__my_add_n(%values: !tfr.tensor_list,
%n: i64 {tfr.name="N"}) -> !tfr.tensor {
%index = arith.constant 0 : index
%cst = arith.constant 1 : i64
%eq = arith.cmpi "eq", %n, %cst : i64
%v1 = tfr.get_element %values[%index] : (!tfr.tensor_list, index) -> !tfr.tensor
%res = scf.if %eq -> !tfr.tensor {
scf.yield %v1 : !tfr.tensor
} else {
%step = arith.index_cast %cst : i64 to index
%end = arith.index_cast %n : i64 to index
%reduce = scf.for %i = %step to %end step %step iter_args(%reduce_iter=%v1) -> !tfr.tensor {
%v = tfr.get_element %values[%i] : (!tfr.tensor_list, index) -> !tfr.tensor
%reduce_next = tfr.call @tf__risc_add_dummy(%reduce_iter, %v) : (!tfr.tensor, !tfr.tensor) -> !tfr.tensor
scf.yield %reduce_next : !tfr.tensor
}
scf.yield %reduce : !tfr.tensor
}
tfr.return %res : !tfr.tensor
}
tfr.func @tf__my_add_n_(!tfr.tensor_list<N,T>, i64 {tfr.name="N"}) -> !tfr.tensor attributes{N,T}
tfr.func @tf__risc_add_dummy_(!tfr.tensor<T>, !tfr.tensor<T>) -> !tfr.tensor<T> attributes{T}
)";
class TFRDecomposeContextTest : public Test {
protected:
void SetUp() override {
test_ctx_ = tfr::TFRDecomposeContext::GetFromText(tfr_raw_text, &ctx_);
}
void TearDown() override { test_ctx_->Destroy(); }
mlir::MLIRContext ctx_;
std::unique_ptr<tfr::TFRDecomposeContext> test_ctx_;
};
std::vector<NodeAndType> NodesSequenceOf(const FunctionDef& graph) {
std::vector<NodeAndType> nodes;
for (auto& node : graph.node_def()) {
nodes.push_back({node.op(), node.attr().at("T").type()});
}
return nodes;
}
TEST_F(TFRDecomposeContextTest, FLOAT_1_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("input", 0, DT_FLOAT);
NodeDef test_node;
auto status = NodeDefBuilder("float_add", "MyAddN")
.Input(src_list)
.Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"Identity", DT_FLOAT}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
TEST_F(TFRDecomposeContextTest, FLOAT_3_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("in0", 0, DT_FLOAT);
src_list.emplace_back("in1", 0, DT_FLOAT);
src_list.emplace_back("in2", 0, DT_FLOAT);
NodeDef test_node;
auto status = NodeDefBuilder("float_add_3", "MyAddN")
.Input(src_list)
.Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"RiscAddDummy", DT_FLOAT},
{"RiscAddDummy", DT_FLOAT}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
TEST_F(TFRDecomposeContextTest, INT32_3_ins) {
std::vector<NodeDefBuilder::NodeOut> src_list;
src_list.emplace_back("in0", 0, DT_INT32);
src_list.emplace_back("in1", 0, DT_INT32);
src_list.emplace_back("in2", 0, DT_INT32);
NodeDef test_node;
auto status =
NodeDefBuilder("int_add", "MyAddN").Input(src_list).Finalize(&test_node);
EXPECT_TRUE(status.ok());
auto decomposed = test_ctx_->ExpandNode(test_node, "test");
EXPECT_TRUE(decomposed.ok());
std::vector<NodeAndType> expected_results{{"RiscAddDummy", DT_INT32},
{"RiscAddDummy", DT_INT32}};
EXPECT_THAT(NodesSequenceOf(decomposed.value()),
ElementsAreArray(expected_results));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4395a6b8-bcbe-457c-bb22-0a6e80a57083 | cpp | tensorflow/tensorflow | import_model | tensorflow/compiler/mlir/tensorflow/translate/import_model.cc | tensorflow/compiler/mlir/tfrt/translate/import_model_test.cc | #include "tensorflow/compiler/mlir/tensorflow/translate/import_model.h"
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cctype>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/IR/Verifier.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Interfaces/DerivedAttributeOpInterface.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/cc/saved_model/bundle_v2.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/cc/saved_model/loader.h"
#include "tensorflow/cc/saved_model/loader_util.h"
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_attributes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/initialize_variables_in_session_init.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/lift_variables.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_import_options.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/node_order.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/upgrade_graph.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_attr.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dynamic_shape_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_runner.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_debug_info.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_debug_info_builder.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/crash_analysis.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/stack_frame.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/threadpool.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tensorflow/core/protobuf/struct.pb.h"
#include "tensorflow/core/protobuf/trackable_object_graph.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
static inline absl::string_view StringRefToView(llvm::StringRef ref) {
return {ref.data(), ref.size()};
}
namespace tensorflow {
constexpr size_t kNumThreadToConvertSignatures = 10;
constexpr absl::string_view kOutputShapesAttrName = "_output_shapes";
using ::mlir::NamedAttrList;
using ::mlir::TensorType;
using ::mlir::tf_saved_model::AssetOp;
using ::mlir::tf_saved_model::GlobalTensorOp;
using ::mlir::tf_saved_model::kTfSavedModelExportedNamesAttr;
using ::mlir::tf_saved_model::kTfSavedModelIndexPathAttr;
using ::mlir::tf_saved_model::kTfSavedModelInitializerInitType;
using ::mlir::tf_saved_model::kTfSavedModelInitializerRestoreType;
using ::mlir::tf_saved_model::kTfSavedModelInitializerTypeAttr;
using ::mlir::tf_saved_model::SessionInitializerOp;
using ::tsl::StatusOr;
namespace {
bool IsOutputShapesAttribute(const AttrValue& attr_value,
llvm::StringRef attr_name) {
return attr_name.compare(kOutputShapesAttrName) == 0 &&
attr_value.value_case() == AttrValue::kList;
}
bool IsResourceOutputShapesAttribute(const AttrValue& attr_value,
llvm::StringRef attr_name) {
if (attr_name == "_handle_dtypes" || attr_name == "_handle_shapes")
return attr_value.value_case() == AttrValue::kList;
return false;
}
void LoadImporterDialects(mlir::MLIRContext& context) {
mlir::DialectRegistry registry;
mlir::RegisterAllTensorFlowDialectsImpl(registry, false);
context.appendDialectRegistry(registry);
for (llvm::StringRef name : registry.getDialectNames())
context.getOrLoadDialect(name);
}
absl::StatusOr<std::string> GetDenseTensorNameFromTensorInfo(
const TensorInfo& tensor_info) {
TF_RET_CHECK(tensor_info.encoding_case() == tensorflow::TensorInfo::kName)
<< "Only dense tensor is supported, but got encoding case "
<< tensor_info.encoding_case();
return tensor_info.name();
}
class NameUniquifier : public OpOrArgNameMapper {
public:
explicit NameUniquifier(const FunctionLibraryDefinition& flib)
: flib_(flib) {}
private:
bool IsUnique(llvm::StringRef name) override {
return !flib_.Contains(std::string(name));
}
std::string GetName(OpOrVal op_or_val) override {
DCHECK(false) << "Unimplemented";
return "";
}
const FunctionLibraryDefinition& flib_;
};
class ImporterBase {
protected:
explicit ImporterBase(
const FunctionLibraryDefinition& flib, const GraphDebugInfo& debug_info,
const GraphImportConfig& specs, mlir::ModuleOp module,
std::unordered_map<std::string, std::string>* tf_name_to_mlir_name,
NameUniquifier* function_name_uniquifier,
llvm::StringRef function_name_for_debug_info = "")
: builder_(module.getContext()),
module_(module),
context_(module.getContext()),
tf_name_to_mlir_name_(tf_name_to_mlir_name),
graph_flib_(flib),
specs_(specs),
debug_info_(debug_info),
function_name_for_debug_info_(function_name_for_debug_info),
function_name_uniquifier_(function_name_uniquifier),
error_handler_(module.getContext()) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Importing with: " << specs.str();
for (auto& it : *tf_name_to_mlir_name) {
LOG(INFO) << "\t" << it.first << " -> " << it.second;
}
}
stack_traces_ = LoadTracesFromDebugInfo(debug_info_);
}
absl::StatusOr<mlir::FunctionType> InferLibFunctionType(
const FunctionBody& fbody);
void GetArgsAndRetsFromFunctionBody(
const FunctionBody& fbody,
absl::InlinedVector<OutputTensor, 4>* arg_nodes,
absl::InlinedVector<OutputTensor, 4>* ret_nodes,
absl::InlinedVector<Node*, 4>* control_ret_nodes);
Status PrepareConvert(const Graph& graph,
std::unique_ptr<GraphDef> graph_def = nullptr);
Status Convert(llvm::StringRef func_name, mlir::FunctionType func_type,
const absl::InlinedVector<OutputTensor, 4>& arg_nodes,
const absl::InlinedVector<OutputTensor, 4>& ret_nodes,
const absl::InlinedVector<Node*, 4>& control_ret_nodes,
llvm::ArrayRef<mlir::NamedAttribute> attrs);
Status ConvertLibFunction(llvm::StringRef func_name);
llvm::ArrayRef<Node*> GetOrderedNodes() const { return ordered_nodes_; }
absl::StatusOr<mlir::Type> InferInputType(const Node& node, int idx,
mlir::Builder builder);
absl::StatusOr<mlir::Type> InferOutputType(const Node& node, int idx,
mlir::Builder builder);
Status ConvertDeferredFunctions();
private:
using ElementSubtypes = llvm::SmallVector<TensorType, 1>;
struct DeferredConversionMetaData {
DeferredConversionMetaData(
const std::string& function_name,
const std::vector<mlir::NamedAttribute>& attributes)
: function_name(function_name), attributes(attributes) {}
std::string function_name;
std::vector<mlir::NamedAttribute> attributes;
};
Status AddNodesToShapeRefiner(
std::unordered_map<string, Node*>* node_name_map);
Status PruneUnreachableNodes(
std::unordered_map<string, Node*>* node_name_map);
Status ConvertFeedsToPlaceholders(
std::unordered_map<string, Node*>* node_name_map);
absl::StatusOr<TensorType> ConvertDataTypeAndShape(
DataType dtype, const shape_inference::ShapeHandle& handle,
const std::vector<shape_inference::ShapeAndType>* handle_subtypes,
shape_inference::InferenceContext* context, mlir::Builder builder);
absl::StatusOr<TensorType> ConvertElementTypeAndShape(
mlir::Type element_type, const shape_inference::ShapeHandle& handle,
shape_inference::InferenceContext* context, mlir::Builder builder);
absl::StatusOr<ElementSubtypes> ConvertSubtypes(
const std::vector<shape_inference::ShapeAndType>* handle_subtypes,
shape_inference::InferenceContext* context, mlir::Builder builder);
absl::StatusOr<mlir::ElementsAttr> ConvertTensorProto(
const TensorProto& value) {
return ::tensorflow::ConvertTensorProto(value, &builder_);
}
absl::StatusOr<mlir::FlatSymbolRefAttr> ConvertFunctionCallName(
const std::string& func_name);
absl::StatusOr<mlir::Attribute> ConvertAttributeValue(const AttrValue& value);
Status ConvertFunctionCallAttribute(const std::string& base_name,
const AttrValue& value,
NamedAttrList* attributes);
mlir::Operation* CreateOperation(
const Node& node, llvm::StringRef node_type_name,
const mlir::OperationState& result,
const llvm::SmallVectorImpl<mlir::Value>& control_operands);
Status ConvertNode(const Node& node);
using BackEdge = BackEdgeHelper::BackEdge;
Status RemoveBackedges();
Status AddBackedges();
Status AddBackedge(mlir::Operation* sink, mlir::Operation* dst,
int dst_input);
Status ConvertFunctionArgAndRets(
mlir::func::FuncOp func, mlir::tf_executor::GraphOp graph_op,
llvm::ArrayRef<mlir::Type> arg_types,
const absl::InlinedVector<OutputTensor, 4>& arg_nodes,
const absl::InlinedVector<OutputTensor, 4>& ret_nodes,
const absl::InlinedVector<Node*, 4>& control_ret_nodes);
mlir::Location GetLocation(const Node& node);
Status EmitErrorWithLocationStr(const Node& node, const Status& error_status);
absl::StatusOr<std::pair<Node*, bool>> CreatePlaceholderNodeForFeed(
const TensorShapeProto& shape, DataType dtype, Node* node, int index,
const std::unordered_map<string, Node*>& node_name_map);
Status GetInputOutputNodes(
const std::unordered_map<string, Node*>& node_name_map,
std::unordered_set<const Node*>* nodes);
BackEdgeHelper back_edge_helper_;
absl::flat_hash_map<const Node*, int> back_edge_node_output_;
absl::flat_hash_map<const Node*, BackEdge> back_edge_dst_inputs_;
absl::flat_hash_map<mlir::Operation*, mlir::Operation*>
next_iteration_sink_source_;
std::unique_ptr<Graph> graph_;
std::vector<Node*> ordered_nodes_;
using NodeValueMap = absl::flat_hash_map<int, mlir::Operation*>;
mlir::OpBuilder builder_;
mlir::ModuleOp module_;
mlir::MLIRContext* context_;
std::unordered_map<std::string, std::string>* tf_name_to_mlir_name_;
const FunctionLibraryDefinition& graph_flib_;
const GraphImportConfig& specs_;
const GraphDebugInfo& debug_info_;
StackTracesMap stack_traces_;
llvm::StringRef function_name_for_debug_info_;
NodeValueMap node_values_;
std::unique_ptr<ShapeRefiner> shape_refiner_ = nullptr;
NameUniquifier* function_name_uniquifier_;
mlir::StatusScopedDiagnosticHandler error_handler_;
llvm::DenseSet<mlir::StringAttr> unmodelled_op_names_;
protected:
absl::flat_hash_map<TensorId, absl::string_view> remapped_feeds_;
std::queue<DeferredConversionMetaData> deferred_functions_;
};
bool HasNonPrimaryOutputInUse(const GraphDef& graph_def,
const std::string& node) {
for (const auto& node_def : graph_def.node()) {
for (const auto& input : node_def.input()) {
if (absl::StartsWith(input, node + ":") && input != node + ":0") {
return true;
}
}
}
return false;
}
Status UpdateLegacyFedInputNode(const GraphDef& graph_def,
const GraphImportConfig::InputArrays& inputs,
NodeDef* node) {
const std::string& node_name = node->name();
auto it = inputs.find(node_name);
if (it == inputs.end()) return absl::OkStatus();
if (HasNonPrimaryOutputInUse(graph_def, node_name)) {
return errors::InvalidArgument(
"LegacyFedInput node ", node->name(),
" has non primary output in use and can not be replaced with "
"Placeholder node");
}
DataType dtype = it->second.imported_dtype;
if (dtype == DT_INVALID) {
dtype = node->attr().at("output_types").list().type(0);
}
*node->mutable_op() = "Placeholder";
node->clear_attr();
node->clear_input();
AddNodeAttr("dtype", dtype, node);
AddNodeAttr("shape", it->second.shape, node);
return absl::OkStatus();
}
Status PreprocessGraphDef(const GraphImportConfig* specs, GraphDef* graph_def) {
for (auto& node_def : *graph_def->mutable_node()) {
if (specs && specs->convert_legacy_fed_inputs &&
node_def.op() == "LegacyFedInput") {
TF_RETURN_IF_ERROR(
UpdateLegacyFedInputNode(*graph_def, specs->inputs, &node_def));
}
const tensorflow::OpRegistrationData* op_reg_data =
tensorflow::OpRegistry::Global()->LookUp(node_def.op());
if (!op_reg_data) {
continue;
}
::tensorflow::AddDefaultsToNodeDef(op_reg_data->op_def, &node_def);
}
return absl::OkStatus();
}
using FeedsByNode = absl::flat_hash_map<
absl::string_view,
absl::flat_hash_map<int, const std::pair<std::string, ArrayInfo>*>>;
absl::StatusOr<FeedsByNode> GetFeedsByNode(
const GraphImportConfig::InputArrays& inputs) {
FeedsByNode feeds_by_node;
feeds_by_node.reserve(inputs.size());
for (const auto& input : inputs) {
TensorId tensor = ParseTensorName(input.first);
if (tensor.index() < 0)
return errors::FailedPrecondition(
"Feed output tensor must be a data output '", tensor.ToString(), "'");
auto& node = feeds_by_node[tensor.node()];
if (!node.insert({tensor.index(), &input}).second)
return errors::FailedPrecondition(
"Multiple feeds for the same output tensor '", tensor.ToString(),
"'");
}
return feeds_by_node;
}
std::string GetUniqueNodeName(
absl::string_view node_name, int index,
const std::unordered_map<string, Node*>& node_name_map) {
std::string new_node_name_base = absl::StrCat(node_name, "_", index);
int count = 0;
std::string new_node_name = new_node_name_base;
while (node_name_map.find(new_node_name) != node_name_map.end()) {
new_node_name = absl::StrCat(new_node_name_base, "_", count++);
}
return new_node_name;
}
Status ImporterBase::ConvertDeferredFunctions() {
while (!deferred_functions_.empty()) {
auto conversion_metadata = deferred_functions_.front();
deferred_functions_.pop();
const FunctionDef* func_def =
graph_flib_.Find(conversion_metadata.function_name);
GraphImportConfig specs;
specs.enable_shape_inference = specs_.enable_shape_inference;
specs.unconditionally_use_set_output_shapes =
specs_.unconditionally_use_set_output_shapes;
for (const auto& name_and_value : func_def->attr()) {
if (name_and_value.first == "_input_shapes") {
auto& list = name_and_value.second.list();
auto& signature = func_def->signature();
if (list.shape_size() > 0 &&
list.shape_size() != signature.input_arg_size()) {
return errors::FailedPrecondition(
"Number of input arguments must be equal to the length of "
"_input_shapes attribute in function '",
StringRefToView(conversion_metadata.function_name), "'.");
}
for (int i = 0, e = signature.input_arg_size(); i < e; i++) {
auto& input_arg = signature.input_arg(i);
auto& array_info = specs.inputs[input_arg.name()];
array_info.imported_dtype = input_arg.type();
if (list.shape_size() > 0)
array_info.shape = list.shape(i);
else
array_info.shape.set_unknown_rank(true);
}
}
}
ImporterBase importer(graph_flib_, debug_info_, specs, module_,
tf_name_to_mlir_name_, function_name_uniquifier_,
conversion_metadata.function_name);
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*func_def, AttrSlice(), &graph_flib_, &fbody));
TF_RETURN_IF_ERROR(importer.PrepareConvert(*fbody->graph));
TF_ASSIGN_OR_RETURN(auto func_type, importer.InferLibFunctionType(*fbody));
absl::InlinedVector<OutputTensor, 4> arg_nodes;
absl::InlinedVector<OutputTensor, 4> ret_nodes;
absl::InlinedVector<Node*, 4> control_ret_nodes;
importer.GetArgsAndRetsFromFunctionBody(*fbody, &arg_nodes, &ret_nodes,
&control_ret_nodes);
const std::string& mlir_func_name =
(*tf_name_to_mlir_name_)[conversion_metadata.function_name];
TF_RETURN_IF_ERROR(importer.Convert(mlir_func_name, func_type, arg_nodes,
ret_nodes, control_ret_nodes,
conversion_metadata.attributes));
while (!importer.deferred_functions_.empty()) {
deferred_functions_.push(importer.deferred_functions_.front());
importer.deferred_functions_.pop();
}
}
return absl::OkStatus();
}
Status ImporterBase::RemoveBackedges() {
TF_RETURN_IF_ERROR(back_edge_helper_.Remove(graph_.get()));
VLOG(1) << "Found " << (back_edge_helper_.RemovedEdges().size())
<< " backedges.";
for (const auto& edge : back_edge_helper_.RemovedEdges()) {
if (back_edge_node_output_.find(edge.src) != back_edge_node_output_.end() &&
back_edge_node_output_[edge.src] != edge.src_output) {
return errors::FailedPrecondition(
"More than one of the src node outputs are backedges!");
}
back_edge_node_output_[edge.src] = edge.src_output;
DCHECK(!back_edge_dst_inputs_.contains(edge.dst));
back_edge_dst_inputs_[edge.dst] = edge;
}
ordered_nodes_.clear();
TopologicalOrdering(
*graph_, [&](Node* n) { ordered_nodes_.push_back(n); }, GroupByDevice());
return absl::OkStatus();
}
Status CopyStackTraces(const Graph& from, Graph* to) {
std::unordered_map<string, Node*> node_map = from.BuildNodeNameIndex();
for (Node* node : to->nodes()) {
if (const Node* old_node = node_map[node->name()]) {
if (const std::shared_ptr<AbstractStackTrace>& stack =
old_node->GetStackTrace()) {
DVLOG(2) << "Stack for " << node->name() << " "
<< old_node->GetStackTrace()->ToString(
AbstractStackTrace::TracePrintingOptions());
node->SetStackTrace(stack);
} else {
DVLOG(1) << "No stack for " << node->name() << " (" << node
<< ") in Graph " << &from;
}
} else {
DVLOG(1) << "No stack for " << node->name() << " (" << node
<< ") in Graph " << &from;
}
}
return absl::OkStatus();
}
absl::StatusOr<std::pair<Node*, bool>>
ImporterBase::CreatePlaceholderNodeForFeed(
const TensorShapeProto& shape, DataType dtype, Node* node, int index,
const std::unordered_map<string, Node*>& node_name_map) {
DCHECK_LT(index, node->num_outputs());
const bool update_inplace = node->num_outputs() == 1 && index == 0;
std::string new_node_name =
update_inplace ? node->name()
: GetUniqueNodeName(node->name(), index, node_name_map);
Node* placeholder_node;
NodeBuilder builder(new_node_name, "Placeholder");
builder.Attr("shape", shape);
builder.Attr("dtype", dtype);
TF_RETURN_IF_ERROR(builder.Finalize(graph_.get(), &placeholder_node));
std::vector<const Edge*> data_edges;
std::vector<const Edge*> control_edges;
for (const tensorflow::Edge* edge : node->out_edges()) {
if (edge->src_output() == index) {
data_edges.push_back(edge);
} else if (update_inplace && edge->IsControlEdge()) {
control_edges.push_back(edge);
}
}
for (const auto* edge : data_edges) {
TF_RETURN_IF_ERROR(graph_->UpdateEdge(placeholder_node, 0, edge->dst(),
edge->dst_input()));
}
for (const auto* edge : control_edges) {
graph_->AddControlEdge(placeholder_node, edge->dst());
graph_->RemoveControlEdge(edge);
}
if (update_inplace) {
graph_->RemoveNode(node);
}
return std::pair<Node*, bool>(placeholder_node, update_inplace);
}
Status ImporterBase::GetInputOutputNodes(
const std::unordered_map<string, Node*>& node_name_map,
std::unordered_set<const Node*>* nodes) {
auto add_node = [&](absl::string_view name) {
auto it = node_name_map.find(std::string(name));
if (it == node_name_map.end()) {
return errors::FailedPrecondition(
absl::StrCat("Graph does not contain node: ", name));
}
nodes->insert(it->second);
return absl::OkStatus();
};
for (const auto& input : specs_.inputs) {
TensorId tensor = ParseTensorName(input.first);
auto remapped_it = remapped_feeds_.find(tensor);
if (remapped_it != remapped_feeds_.end()) {
TF_RETURN_IF_ERROR(add_node(remapped_it->second));
} else {
TF_RETURN_IF_ERROR(add_node(tensor.node()));
}
}
for (const auto& output : specs_.outputs) {
TensorId tensor = ParseTensorName(output);
auto remapped_it = remapped_feeds_.find(tensor);
if (remapped_it != remapped_feeds_.end()) {
TF_RETURN_IF_ERROR(add_node(remapped_it->second));
} else {
TF_RETURN_IF_ERROR(add_node(tensor.node()));
}
}
for (const auto& control_output : specs_.control_outputs)
TF_RETURN_IF_ERROR(add_node(control_output));
return absl::OkStatus();
}
Status ImporterBase::AddNodesToShapeRefiner(
std::unordered_map<string, Node*>* node_name_map) {
shape_refiner_ =
std::make_unique<ShapeRefiner>(graph_->versions(), graph_->op_registry());
shape_refiner_->set_require_shape_inference_fns(false);
shape_refiner_->set_function_library_for_shape_inference(&graph_flib_);
TF_ASSIGN_OR_RETURN(auto feeds_by_node, GetFeedsByNode(specs_.inputs));
for (Node* node : ordered_nodes_) {
bool node_added_to_shape_refiner = false;
auto it = feeds_by_node.find(node->name());
if (it != feeds_by_node.end()) {
auto op_name = node->op_def().name();
if (op_name != "Placeholder" && op_name != "LegacyFedInput" &&
op_name != FunctionLibraryDefinition::kArgOp) {
for (const auto& output_tensor : it->second) {
const int index = output_tensor.first;
const ArrayInfo& array_info = output_tensor.second->second;
DataType dtype = array_info.imported_dtype;
if (dtype == DT_INVALID) {
dtype = node->output_type(index);
}
TF_ASSIGN_OR_RETURN(
auto placeholder_node_and_removed,
CreatePlaceholderNodeForFeed(array_info.shape, dtype, node, index,
*node_name_map));
Node* placeholder_node = placeholder_node_and_removed.first;
if (placeholder_node_and_removed.second) {
node = placeholder_node;
node_added_to_shape_refiner = true;
}
remapped_feeds_[{it->first, index}] = placeholder_node->name();
(*node_name_map)[placeholder_node->name()] = placeholder_node;
Status status = shape_refiner_->AddNode(placeholder_node);
if (!status.ok()) {
return EmitErrorWithLocationStr(*placeholder_node, status);
}
}
} else {
auto index_it = it->second.find(0);
if (index_it == it->second.end()) {
return errors::FailedPrecondition(
"Missing feed output tensor at index 0 for node '", node->name(),
"'");
}
node->AddAttr("shape", index_it->second->second.shape);
DataType dtype = index_it->second->second.imported_dtype;
if (dtype == DT_INVALID) {
dtype = node->output_type(0);
}
node->AddAttr("dtype", dtype);
}
}
if (!node_added_to_shape_refiner) {
Status status = shape_refiner_->AddNode(node);
if (!status.ok()) {
return EmitErrorWithLocationStr(*node, status);
}
}
auto set_shape_from_list_attr = [&](const AttrValue* attr) {
auto& list = attr->list();
if (list.shape_size() != node->num_outputs()) {
LOG(WARNING) << "Node '" << node->name() << "' has "
<< node->num_outputs() << " outputs but the "
<< kOutputShapesAttrName
<< " attribute specifies shapes for " << list.shape_size()
<< " outputs";
return absl::OkStatus();
}
for (const auto& shape : llvm::enumerate(list.shape())) {
auto* node_context = shape_refiner_->GetContext(node);
shape_inference::ShapeHandle handle;
Status status =
node_context->MakeShapeFromShapeProto(shape.value(), &handle);
if (!status.ok()) {
return EmitErrorWithLocationStr(*node, status);
}
node_context->set_output(shape.index(), handle);
}
return absl::OkStatus();
};
if (StringPiece(node->type_string()) == FunctionLibraryDefinition::kArgOp) {
auto* node_context = shape_refiner_->GetContext(node);
DCHECK(node_context != nullptr);
if (const AttrValue* attr = node->attrs().Find("shape")) {
shape_inference::ShapeHandle handle;
Status status =
node_context->MakeShapeFromShapeProto(attr->shape(), &handle);
if (!status.ok()) {
return EmitErrorWithLocationStr(*node, status);
}
node_context->set_output(0, handle);
} else if (const AttrValue* attr =
node->attrs().Find(kOutputShapesAttrName)) {
TF_RETURN_IF_ERROR(set_shape_from_list_attr(attr));
} else {
node_context->set_output(0, node_context->UnknownShape());
}
}
if (specs_.unconditionally_use_set_output_shapes ||
node->op_def().name() == "ReadVariableOp") {
if (const AttrValue* attr = node->attrs().Find(kOutputShapesAttrName))
TF_RETURN_IF_ERROR(set_shape_from_list_attr(attr));
}
}
FixupSourceAndSinkEdges(graph_.get());
if (specs_.prune_unused_nodes) {
std::unordered_set<const Node*> prune_start;
TF_RETURN_IF_ERROR(GetInputOutputNodes(*node_name_map, &prune_start));
if (!prune_start.empty()) {
if (PruneForReverseReachability(graph_.get(), prune_start)) {
VLOG(1) << "Pruned unused nodes in graphdef";
} else {
VLOG(1) << "No unused nodes in graphdef to prune";
}
} else {
VLOG(1) << "No output nodes specified, skipping pruning";
}
} else {
VLOG(1) << "Pruning unused nodes in graphdef is disabled";
}
ordered_nodes_.clear();
TopologicalOrdering(
*graph_, [&](Node* n) { ordered_nodes_.push_back(n); }, GroupByDevice());
VLOG(1) << "Inferring graph shapes to fixpoint";
auto same_inferred_shape = [](shape_inference::InferenceContext* c,
shape_inference::ShapeHandle s0,
shape_inference::ShapeHandle s1) -> bool {
if (s0.SameHandle(s1) || (!c->RankKnown(s0) && !c->RankKnown(s1))) {
return true;
}
if (c->Rank(s0) != c->Rank(s1)) {
return false;
}
for (int i = 0; i < c->Rank(s0); ++i) {
if (!c->Dim(s0, i).SameHandle(c->Dim(s1, i))) {
int64_t val0 = c->Value(c->Dim(s0, i));
int64_t val1 = c->Value(c->Dim(s1, i));
if (val0 >= 0 && val1 >= 0 && val0 != val1) return false;
}
}
return true;
};
bool changed = true;
int i = 0;
const int kMaxIterationCount = 2;
while (changed && i != kMaxIterationCount) {
changed = false;
for (const Node* node : ordered_nodes_) {
auto* shape_context = shape_refiner_->GetContext(node);
DCHECK(shape_context != nullptr);
absl::InlinedVector<shape_inference::ShapeHandle, 4> existing;
existing.reserve(shape_context->num_outputs());
for (int o = 0; o < shape_context->num_outputs(); ++o) {
existing.push_back(shape_context->output(o));
}
bool inferred = false;
shape_inference::ShapeHandle handle;
Status status =
shape_refiner_->UpdateNode(node, false, &inferred);
if (!status.ok()) {
return EmitErrorWithLocationStr(*node, status);
}
for (int o = 0; o < shape_context->num_outputs(); ++o) {
if (!same_inferred_shape(shape_context, shape_context->output(o),
existing[o])) {
changed = true;
break;
}
}
}
++i;
}
if (i >= kMaxIterationCount) {
LOG(WARNING) << "Graph shapes did not converge to a fixpoint within "
<< kMaxIterationCount
<< " iterations. Graph shapes may be conservative.";
}
VLOG(1) << "Graph shapes were inferred with " << (i - 1)
<< " extra rounds of analysis to reach a fixpoint.";
return absl::OkStatus();
}
absl::StatusOr<mlir::Type> ImporterBase::InferInputType(const Node& node,
int idx,
mlir::Builder builder) {
if (specs_.enable_shape_inference) {
auto* context = shape_refiner_->GetContext(&node);
DataType dtype = node.input_type(idx);
return ConvertDataTypeAndShape(dtype, context->input(idx),
context->input_handle_shapes_and_types(idx),
context, builder);
}
DataType dtype = node.properties()->input_types[idx];
mlir::Type element_type;
TF_RETURN_IF_ERROR(ConvertDataType(dtype, builder, &element_type));
return mlir::UnrankedTensorType::get(element_type);
}
absl::StatusOr<mlir::Type> ImporterBase::InferOutputType(
const Node& node, int idx, mlir::Builder builder) {
DataType dtype = node.properties()->output_types[idx];
auto shape_ic =
[&](shape_inference::InferenceContext* c) -> absl::StatusOr<mlir::Type> {
if (specs_.unconditionally_use_set_output_shapes) {
if (const AttrValue* attr = node.attrs().Find(kOutputShapesAttrName)) {
auto& list = attr->list();
if (list.shape_size() > idx) {
const TensorShapeProto& p = list.shape()[idx];
shape_inference::ShapeHandle h;
Status s = c->MakeShapeFromShapeProto(p, &h);
if (!s.ok())
return errors::InvalidArgument(
"Node '", node.name(), " has an invalid ",
kOutputShapesAttrName, " attribute (shape #", idx, " error:'",
s.message(), "')");
c->set_output(idx, h);
}
}
}
return ConvertDataTypeAndShape(dtype, c->output(idx),
c->output_handle_shapes_and_types(idx), c,
builder);
};
if (specs_.enable_shape_inference) {
shape_inference::InferenceContext* shape_context =
shape_refiner_->GetContext(&node);
return shape_ic(shape_context);
}
if (node.type_string() == "TensorListReserve" ||
node.type_string() == "EmptyTensorList") {
mlir::Type etype;
if (auto element_dtype = node.attrs().Find("element_dtype")) {
TF_RETURN_IF_ERROR(
ConvertDataType(element_dtype->type(), builder, &etype));
}
return GetTypeFromTFTensorShape(
{}, mlir::TF::VariantType::get({mlir::UnrankedTensorType::get(etype)},
etype.getContext()));
}
if (node.IsWhileNode()) {
auto* output_shapes = node.attrs().Find("output_shapes");
auto* element_types = node.attrs().Find("T");
if (output_shapes && !output_shapes->list().shape().empty()) {
const auto& output_shape = output_shapes->list().shape(idx);
const auto& element_type = element_types->list().type(idx);
return ConvertToMlirTensorType(output_shape, element_type, &builder);
}
}
auto type_from_array_attr = [&node, &idx, &builder](
absl::string_view output_shape_attr,
absl::string_view element_type_attr) {
auto* output_shapes = node.attrs().Find(output_shape_attr);
auto* element_types = node.attrs().Find(element_type_attr);
const auto& output_shape = output_shapes->list().shape(idx);
const auto& element_type = element_types->list().type(idx);
return ConvertToMlirTensorType(output_shape, element_type, &builder);
};
if (node.type_string() == "IteratorGetNext" ||
node.type_string() == "IteratorGetNextSync" ||
node.type_string() == "MultiDeviceIteratorGetNextFromShard")
return type_from_array_attr("output_shapes", "output_types");
if (node.type_string() == "InfeedDequeueTuple")
return type_from_array_attr("shapes", "dtypes");
if (node.type_string() == "InfeedDequeue") {
assert(idx == 0);
const auto& output_shape = node.attrs().Find("shape")->shape();
const auto& element_type = node.attrs().Find("dtype")->type();
return ConvertToMlirTensorType(output_shape, element_type, &builder);
}
auto default_type = [&]() -> absl::StatusOr<mlir::Type> {
mlir::Type element_type;
TF_RETURN_IF_ERROR(ConvertDataType(dtype, builder, &element_type));
if (specs_.unconditionally_use_set_output_shapes) {
if (const AttrValue* attr = node.attrs().Find(kOutputShapesAttrName)) {
auto& list = attr->list();
if (list.shape_size() > idx) {
llvm::SmallVector<int64_t, 4> shape;
const TensorShapeProto& shape_proto = list.shape()[idx];
if (shape_proto.unknown_rank())
return mlir::UnrankedTensorType::get(element_type);
TF_RETURN_IF_ERROR(ConvertToMlirShape(shape_proto, &shape));
return GetTypeFromTFTensorShape(shape, element_type);
}
}
}
return mlir::UnrankedTensorType::get(element_type);
};
if (node.num_inputs() > 0) return default_type();
if (node.IsArg()) {
if (dtype == DT_RESOURCE) {
const AttrValue* dtype_attr = node.attrs().Find("_handle_dtypes");
const AttrValue* shape_attr = node.attrs().Find("_handle_shapes");
if (dtype_attr && shape_attr) {
if (dtype_attr->list().type().empty()) {
return errors::InvalidArgument(
"Invalid \"_handle_dtypes\" attribute value for _Arg node: ",
shape_attr->DebugString());
}
if (shape_attr->list().shape().empty()) {
return errors::InvalidArgument(
"Invalid \"_handle_shapes\" attribute value for _Arg node: ",
shape_attr->DebugString());
}
DataType dtype = dtype_attr->list().type(0);
const TensorShapeProto& shape_proto = shape_attr->list().shape(0);
TF_ASSIGN_OR_RETURN(
auto etype, ConvertToMlirTensorType(shape_proto, dtype, &builder));
return mlir::UnrankedTensorType::get(mlir::TF::ResourceType::get(
{mlir::cast<TensorType>(etype)}, builder.getContext()));
} else {
return mlir::UnrankedTensorType::get(
mlir::TF::ResourceType::get(builder.getContext()));
}
} else if (auto shape = node.attrs().Find("_output_shapes")) {
if (shape->has_list() && shape->list().shape_size() == 1) {
return ConvertToMlirTensorType(shape->list().shape().at(0), dtype,
&builder);
}
}
}
const tensorflow::OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(
graph_->op_registry()->LookUp(node.type_string(), &op_reg_data));
if (!op_reg_data) {
DVLOG(1) << "Skipping inference for unregistered op " << node.type_string();
return default_type();
}
if (op_reg_data->shape_inference_fn == nullptr) {
DVLOG(1) << "Skipping inference for op without shape function "
<< node.type_string();
return default_type();
}
shape_inference::InferenceContext c(graph_->versions().producer(),
node.attrs(), op_reg_data->op_def,
std::vector<PartialTensorShape>{}, {},
{}, {});
TF_RETURN_IF_ERROR(c.Run(op_reg_data->shape_inference_fn));
return shape_ic(&c);
}
absl::StatusOr<TensorType> ImporterBase::ConvertDataTypeAndShape(
DataType dtype, const shape_inference::ShapeHandle& handle,
const std::vector<shape_inference::ShapeAndType>* handle_subtypes,
shape_inference::InferenceContext* context, mlir::Builder builder) {
TF_ASSIGN_OR_RETURN(auto subtypes,
ConvertSubtypes(handle_subtypes, context, builder));
mlir::Type element_type;
if (dtype == DT_VARIANT)
element_type = mlir::TF::VariantType::get(subtypes, context_);
else if (dtype == DT_RESOURCE)
element_type = mlir::TF::ResourceType::get(subtypes, context_);
else
TF_RETURN_IF_ERROR(
::tensorflow::ConvertDataType(dtype, builder, &element_type));
return ConvertElementTypeAndShape(element_type, handle, context, builder);
}
absl::StatusOr<TensorType> ImporterBase::ConvertElementTypeAndShape(
mlir::Type element_type, const shape_inference::ShapeHandle& handle,
shape_inference::InferenceContext* context, mlir::Builder builder) {
if (!context->RankKnown(handle)) {
return mlir::UnrankedTensorType::get(element_type);
}
const int64_t kUnknownDim = -1;
absl::InlinedVector<int64_t, 4> dimensions;
int32_t rank = context->Rank(handle);
dimensions.reserve(rank);
for (int i = 0; i < rank; ++i) {
auto dim_handle = context->Dim(handle, i);
if (!context->ValueKnown(dim_handle))
dimensions.push_back(kUnknownDim);
else
dimensions.push_back(context->Value(dim_handle));
}
return GetTypeFromTFTensorShape(
llvm::ArrayRef(dimensions.begin(), dimensions.end()), element_type);
}
absl::StatusOr<ImporterBase::ElementSubtypes> ImporterBase::ConvertSubtypes(
const std::vector<shape_inference::ShapeAndType>* handle_subtypes,
shape_inference::InferenceContext* context, mlir::Builder builder) {
ElementSubtypes subtypes;
if (!handle_subtypes) return subtypes;
subtypes.reserve(handle_subtypes->size());
for (const auto& subtype : *handle_subtypes) {
mlir::Type element_type;
TF_RETURN_IF_ERROR(
::tensorflow::ConvertDataType(subtype.dtype, builder, &element_type));
TF_ASSIGN_OR_RETURN(TensorType type,
ConvertElementTypeAndShape(element_type, subtype.shape,
context, builder));
subtypes.push_back(type);
}
return subtypes;
}
Status ImporterBase::ConvertFunctionCallAttribute(const std::string& base_name,
const AttrValue& value,
NamedAttrList* attributes) {
TF_ASSIGN_OR_RETURN(auto func_attr,
ConvertFunctionCallName(value.func().name()));
if (!func_attr) return absl::OkStatus();
attributes->push_back(builder_.getNamedAttr(base_name, func_attr));
for (const auto& it : value.func().attr()) {
auto name = absl::StrCat(base_name, ".", it.first);
TF_ASSIGN_OR_RETURN(auto value, ConvertAttributeValue(it.second));
attributes->push_back(builder_.getNamedAttr(name, value));
}
return absl::OkStatus();
}
absl::StatusOr<mlir::FlatSymbolRefAttr> ImporterBase::ConvertFunctionCallName(
const std::string& func_name) {
if (func_name.empty()) return mlir::FlatSymbolRefAttr();
TF_RETURN_IF_ERROR(ConvertLibFunction(func_name));
auto mlir_func_name = (*tf_name_to_mlir_name_)[func_name];
return mlir::SymbolRefAttr::get(builder_.getContext(), mlir_func_name);
}
absl::StatusOr<mlir::Attribute> ImporterBase::ConvertAttributeValue(
const AttrValue& value) {
switch (value.value_case()) {
case AttrValue::kFunc: {
NamedAttrList attrs;
for (const auto& func_attr : value.func().attr()) {
TF_ASSIGN_OR_RETURN(
auto attr, ImporterBase::ConvertAttributeValue(func_attr.second));
attrs.push_back(builder_.getNamedAttr(func_attr.first, attr));
}
auto func_attrs = builder_.getDictionaryAttr(attrs);
return mlir::TF::FuncAttr::get(context_, value.func().name(), func_attrs);
}
case AttrValue::kList: {
if (!value.list().func().empty()) {
absl::InlinedVector<mlir::Attribute, 8> attrs;
for (const auto& item : value.list().func()) {
TF_ASSIGN_OR_RETURN(auto attr, ConvertFunctionCallName(item.name()));
if (item.attr_size() != 0)
return errors::Unimplemented(
"func attributes with non-zero attr.size()");
if (attr) attrs.push_back(attr);
}
return builder_.getArrayAttr(
llvm::ArrayRef(attrs.begin(), attrs.end()));
}
return ConvertNonFuncAttributeValue(value, &builder_);
}
default:
return ConvertNonFuncAttributeValue(value, &builder_);
}
}
void ImporterBase::GetArgsAndRetsFromFunctionBody(
const FunctionBody& fbody, absl::InlinedVector<OutputTensor, 4>* arg_nodes,
absl::InlinedVector<OutputTensor, 4>* ret_nodes,
absl::InlinedVector<Node*, 4>* control_ret_nodes) {
arg_nodes->reserve(fbody.arg_nodes.size());
ret_nodes->reserve(fbody.ret_nodes.size());
for (auto arg : fbody.arg_nodes) {
arg_nodes->emplace_back(arg, 0);
}
for (auto ret : fbody.ret_nodes) {
ret_nodes->emplace_back(ret, 0);
}
*control_ret_nodes = fbody.control_ret_nodes;
}
Status ImporterBase::ConvertLibFunction(llvm::StringRef func_name) {
if (tf_name_to_mlir_name_->find(std::string(func_name)) !=
tf_name_to_mlir_name_->end())
return absl::OkStatus();
std::string mlir_func_name(
function_name_uniquifier_->GetUniqueName(func_name));
(*tf_name_to_mlir_name_)[std::string(func_name)] = mlir_func_name;
const auto& func_lib = graph_flib_;
const auto* func_def = func_lib.Find(std::string(func_name));
if (func_def == nullptr) {
return errors::FailedPrecondition(
absl::StrCat("Failed to find function '", StringRefToView(func_name),
"'. The imported TensorFlow GraphDef is ill-formed."));
}
std::vector<mlir::NamedAttribute> attributes;
attributes.reserve(func_def->attr_size());
for (const auto& name_and_value : func_def->attr()) {
TF_ASSIGN_OR_RETURN(auto attr,
ConvertAttributeValue(name_and_value.second));
std::string attr_name =
mangling_util::MangleAttributeName(name_and_value.first);
attributes.push_back(builder_.getNamedAttr(attr_name, attr));
}
if (func_def->signature().is_stateful()) {
auto stateful_str = mlir::TF::TensorFlowDialect::GetStatefulAttrName();
attributes.push_back(
builder_.getNamedAttr(stateful_str, builder_.getUnitAttr()));
}
auto grad_func_name = func_lib.FindGradient(std::string(func_name));
if (!grad_func_name.empty()) {
TF_RETURN_IF_ERROR(ConvertLibFunction(grad_func_name));
auto mlir_grad_func_name = (*tf_name_to_mlir_name_)[grad_func_name];
auto gradient_attr =
mlir::SymbolRefAttr::get(builder_.getContext(), mlir_grad_func_name);
auto grad_string = mlir::TF::TensorFlowDialect::GetGradientAttrName();
attributes.push_back(builder_.getNamedAttr(grad_string, gradient_attr));
}
deferred_functions_.emplace(func_name.str(), attributes);
return absl::OkStatus();
}
Status ImporterBase::PruneUnreachableNodes(
std::unordered_map<string, Node*>* node_name_map) {
std::unordered_set<const Node*> prune_start;
TF_RETURN_IF_ERROR(GetInputOutputNodes(*node_name_map, &prune_start));
if (!prune_start.empty()) {
if (PruneForReverseReachability(graph_.get(), prune_start)) {
VLOG(1) << "Pruned unused nodes in graphdef";
} else {
VLOG(1) << "No unused nodes in graphdef to prune";
}
} else {
VLOG(1) << "No output nodes specified, skipping pruning";
}
return absl::OkStatus();
}
Status ImporterBase::ConvertFeedsToPlaceholders(
std::unordered_map<string, Node*>* node_name_map) {
TF_ASSIGN_OR_RETURN(auto feeds_by_node, GetFeedsByNode(specs_.inputs));
for (const auto& it : feeds_by_node) {
TensorId tensor = ParseTensorName(it.first);
auto jt = node_name_map->find(std::string(tensor.node()));
if (jt == node_name_map->end()) {
return errors::FailedPrecondition(
absl::StrCat("Graph does not contain node: ", tensor.node()));
}
Node* node = jt->second;
auto op_name = node->op_def().name();
if (op_name != "Placeholder" && op_name != "LegacyFedInput" &&
op_name != FunctionLibraryDefinition::kArgOp) {
for (const auto& output_tensor : it.second) {
const int index = output_tensor.first;
const ArrayInfo& array_info = output_tensor.second->second;
DataType dtype = array_info.imported_dtype;
if (dtype == DT_INVALID) {
dtype = node->output_type(index);
}
TF_ASSIGN_OR_RETURN(
auto placeholder_node_and_removed,
CreatePlaceholderNodeForFeed(array_info.shape, dtype, node, index,
*node_name_map));
Node* placeholder_node = placeholder_node_and_removed.first;
if (placeholder_node->in_edges().empty()) {
graph_->AddControlEdge(graph_->source_node(), placeholder_node,
true );
}
if (placeholder_node->out_edges().empty()) {
graph_->AddControlEdge(placeholder_node, graph_->sink_node(),
true );
}
remapped_feeds_[{it.first, index}] = placeholder_node->name();
(*node_name_map)[placeholder_node->name()] = placeholder_node;
}
}
}
return absl::OkStatus();
}
Status ImporterBase::PrepareConvert(const Graph& graph,
std::unique_ptr<GraphDef> graph_def) {
if (graph_def == nullptr) {
graph_def = std::make_unique<GraphDef>();
graph.ToGraphDef(graph_def.get());
}
graph_ = std::make_unique<Graph>(graph.flib_def());
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
opts.add_default_attributes = true;
TF_RETURN_IF_ERROR(::tensorflow::ConvertGraphDefToGraph(
opts, std::move(*graph_def), graph_.get()));
TF_RETURN_IF_ERROR(RemoveBackedges());
TF_RETURN_IF_ERROR(CopyStackTraces(graph, graph_.get()));
auto node_name_map = graph_->BuildNodeNameIndex();
if (specs_.enable_shape_inference) {
TF_RETURN_IF_ERROR(AddNodesToShapeRefiner(&node_name_map));
} else {
TF_RETURN_IF_ERROR(ConvertFeedsToPlaceholders(&node_name_map));
}
if (specs_.prune_unused_nodes) {
TF_RETURN_IF_ERROR(PruneUnreachableNodes(&node_name_map));
}
if (!specs_.enable_shape_inference) {
ordered_nodes_.clear();
TopologicalOrdering(
*graph_, [&](Node* n) { ordered_nodes_.push_back(n); },
GroupByDevice());
}
return absl::OkStatus();
}
Status ImporterBase::Convert(
llvm::StringRef func_name, mlir::FunctionType func_type,
const absl::InlinedVector<OutputTensor, 4>& arg_nodes,
const absl::InlinedVector<OutputTensor, 4>& ret_nodes,
const absl::InlinedVector<Node*, 4>& control_ret_nodes,
llvm::ArrayRef<mlir::NamedAttribute> attrs) {
auto function = mlir::func::FuncOp::create(mlir::UnknownLoc::get(context_),
func_name, func_type, attrs);
module_.push_back(function);
function.addEntryBlock();
builder_ = mlir::OpBuilder(function.getBody());
auto graph = builder_.create<mlir::tf_executor::GraphOp>(
function.getLoc(), func_type.getResults());
builder_.createBlock(&graph.getBody());
for (const Node* node : ordered_nodes_) {
TF_RETURN_IF_ERROR(ConvertNode(*node));
}
TF_RETURN_IF_ERROR(AddBackedges());
TF_RETURN_IF_ERROR(ConvertFunctionArgAndRets(function, graph,
func_type.getInputs(), arg_nodes,
ret_nodes, control_ret_nodes));
if (!specs_.enable_shape_inference) {
auto fetch = graph.GetFetch();
bool all_equal = true;
for (auto it :
llvm::zip_first(graph.getResults(), fetch.getOperandTypes())) {
auto rt = std::get<1>(it);
if (rt == std::get<0>(it).getType()) continue;
std::get<0>(it).setType(rt);
all_equal = false;
}
if (!all_equal) {
function.setType(mlir::FunctionType::get(function.getContext(),
func_type.getInputs(),
graph.getResultTypes()));
}
}
return absl::OkStatus();
}
Status ImporterBase::ConvertFunctionArgAndRets(
mlir::func::FuncOp func, mlir::tf_executor::GraphOp graph_op,
llvm::ArrayRef<mlir::Type> arg_types,
const absl::InlinedVector<OutputTensor, 4>& arg_nodes,
const absl::InlinedVector<OutputTensor, 4>& ret_nodes,
const absl::InlinedVector<Node*, 4>& control_ret_nodes) {
llvm::SmallVector<mlir::NamedAttrList, 4> arg_attrs;
arg_attrs.resize(func.getNumArguments());
llvm::SmallVector<mlir::NamedAttrList, 4> ret_attrs;
ret_attrs.resize(func.getNumResults());
auto set_attributes_on_func = [&](Node* node, int64_t index, bool is_arg) {
for (const auto& node_attr : node->attrs()) {
const auto& key = node_attr.first;
if (key.empty() || key[0] != '_') continue;
if (IsOutputShapesAttribute(node_attr.second, key) ||
IsResourceOutputShapesAttribute(node_attr.second, key))
continue;
TF_ASSIGN_OR_RETURN(auto converted_attr,
ConvertAttributeValue(node_attr.second));
std::string dialect_attribute = "tf." + key;
if (is_arg) {
arg_attrs[index].set(dialect_attribute, converted_attr);
} else {
func.setResultAttr(index, dialect_attribute, converted_attr);
ret_attrs[index].set(dialect_attribute, converted_attr);
}
}
return absl::OkStatus();
};
auto* bb = &func.front();
llvm::SmallDenseMap<std::pair<Node*, int>, mlir::Value, 4>
arg_nodes_to_values;
for (int i = 0, e = arg_types.size(); i < e; ++i) {
auto& arg_node = arg_nodes[i];
mlir::Operation* island = node_values_.find(arg_node.node->id())->second;
auto bb_arg = bb->getArgument(i);
mlir::Value arg_def = bb_arg;
if (island->getNumResults() != 2)
return errors::InvalidArgument(
"Only feed output tensors of single output nodes are supported");
arg_nodes_to_values.try_emplace({arg_node.node, arg_node.index}, arg_def);
island->getResult(0).replaceAllUsesWith(arg_def);
auto control_uses = island->getResult(1).getUses();
for (auto& control_use : llvm::make_early_inc_range(control_uses))
control_use.getOwner()->eraseOperand(control_use.getOperandNumber());
if (!arg_node.node->requested_device().empty())
arg_attrs[i].set("tf.device", builder_.getStringAttr(
arg_node.node->requested_device()));
if (arg_node.node->IsArg()) {
TF_RETURN_IF_ERROR(
set_attributes_on_func(arg_node.node, i, true));
}
island->dropAllReferences();
island->erase();
}
llvm::SmallVector<mlir::Value, 8> inst_to_return;
for (const auto& ret_and_idx : llvm::enumerate(ret_nodes)) {
const auto& ret = ret_and_idx.value();
auto* inst = node_values_[ret.node->id()];
if (ret.node->IsRetval()) {
if (!ret.node->requested_device().empty())
ret_attrs[ret_and_idx.index()].set(
"tf.device", builder_.getStringAttr(ret.node->requested_device()));
TF_RETURN_IF_ERROR(set_attributes_on_func(ret.node, ret_and_idx.index(),
false));
auto island_op = llvm::cast<mlir::tf_executor::IslandOp>(inst);
mlir::Operation* inner_op = &island_op.GetBody().front();
if (inner_op->getNumOperands() != 1)
return errors::Unimplemented("Return node with multiple inputs.");
inst_to_return.push_back(inner_op->getOperand(0));
inst->dropAllReferences();
inst->erase();
} else {
auto it = arg_nodes_to_values.find({ret.node, ret.index});
if (it != arg_nodes_to_values.end())
inst_to_return.push_back(it->second);
else
inst_to_return.push_back(inst->getResult(ret.index));
}
}
for (Node* control_ret : control_ret_nodes) {
auto* inst = node_values_[control_ret->id()];
inst_to_return.push_back(*std::prev(inst->result_end()));
}
builder_.setInsertionPointToEnd(&graph_op.getBody().front());
builder_.create<mlir::tf_executor::FetchOp>(graph_op.getLoc(),
inst_to_return);
builder_.setInsertionPointToEnd(bb);
builder_.create<mlir::func::ReturnOp>(mlir::UnknownLoc::get(context_),
graph_op.getResults());
func.setAllArgAttrs(
llvm::to_vector<4>(llvm::map_range(arg_attrs, [&](NamedAttrList& list) {
return list.getDictionary(context_);
})));
func.setAllResultAttrs(
llvm::to_vector<4>(llvm::map_range(ret_attrs, [&](NamedAttrList& list) {
return list.getDictionary(context_);
})));
return absl::OkStatus();
}
mlir::Location ImporterBase::GetLocation(const Node& node) {
DVLOG(1) << "Getting location for " << node.name() << " " << &node;
auto create_location = [&](llvm::StringRef name,
llvm::StringRef function_name) -> mlir::Location {
std::string debug_info_key = (name + "@" + function_name).str();
std::string name_for_name_loc =
function_name.empty() ? name.str() : debug_info_key;
auto name_loc_id = mlir::StringAttr::get(context_, name_for_name_loc);
std::shared_ptr<AbstractStackTrace> stack_trace = node.GetStackTrace();
if (stack_trace != nullptr) {
} else if (stack_traces_.contains(name_for_name_loc)) {
stack_trace = stack_traces_.at(name_for_name_loc);
} else if (stack_traces_.contains(debug_info_key)) {
stack_trace = stack_traces_.at(debug_info_key);
} else {
DVLOG(1) << "No stack trace for " << node.name();
}
llvm::SmallVector<mlir::Location, 4> locations;
if (stack_trace != nullptr) {
DVLOG(1) << "Stack available for " << node.name();
for (const StackFrame& frame : stack_trace->ToUncachedFrames()) {
auto file_name = mlir::StringAttr::get(context_, frame.file_name);
auto file_line_loc =
mlir::FileLineColLoc::get(file_name, frame.line_number, 1);
locations.push_back(file_line_loc);
}
}
if (locations.empty()) return mlir::NameLoc::get(name_loc_id);
mlir::Location node_name_loc =
mlir::NameLoc::get(name_loc_id, locations.front());
auto callsite_locs = llvm::ArrayRef(locations).drop_front();
return callsite_locs.empty()
? node_name_loc
: mlir::CallSiteLoc::get(node_name_loc, callsite_locs);
};
auto create_op_type_and_name_locations = [&]() {
return mlir::FusedLoc::get(
context_,
{mlir::NameLoc::get(
mlir::StringAttr::get(context_, node.type_string() + ":")),
create_location(node.name(), function_name_for_debug_info_)});
};
if (node.type_string() == "NextIteration") {
return create_op_type_and_name_locations();
}
const auto& node_def = node.def();
auto original_nodes =
node_def.experimental_debug_info().original_node_names();
auto original_funcs =
node_def.experimental_debug_info().original_func_names();
if (original_nodes.empty()) {
return create_op_type_and_name_locations();
} else {
llvm::SmallVector<mlir::Location, 4> node_locations;
node_locations.reserve(original_nodes.size() + 2);
node_locations.push_back(mlir::NameLoc::get(
mlir::StringAttr::get(context_, node.type_string() + ":")));
for (int i = 0, e = original_nodes.size(); i != e; ++i) {
const auto& node_name = original_nodes[i];
auto func_name = (i < original_funcs.size()) ? original_funcs[i] : "";
node_locations.push_back(create_location(node_name, func_name));
}
node_locations.push_back(
create_location(node.name(), function_name_for_debug_info_));
return mlir::FusedLoc::get(context_, node_locations);
}
}
Status ImporterBase::EmitErrorWithLocationStr(const Node& node,
const Status& error_status) {
const mlir::Location location = GetLocation(node);
mlir::emitError(location);
return error_handler_.Combine(error_status);
}
mlir::Operation* ImporterBase::CreateOperation(
const Node& node, llvm::StringRef node_type_name,
const mlir::OperationState& result,
const llvm::SmallVectorImpl<mlir::Value>& control_operands) {
mlir::SmallVector<mlir::Type, 4> types(result.types);
types.push_back(mlir::tf_executor::ControlType::get(builder_.getContext()));
mlir::SmallVector<mlir::Value, 4> operands(result.operands);
operands.append(control_operands.begin(), control_operands.end());
auto loc = result.location;
if (node.IsSwitch()) {
if (node.op_def().name() == "_SwitchN") {
return builder_.create<mlir::tf_executor::SwitchNOp>(loc, types, operands,
result.attributes);
}
return builder_.create<mlir::tf_executor::SwitchOp>(loc, types, operands,
result.attributes);
}
if (node.IsMerge()) {
return builder_.create<mlir::tf_executor::MergeOp>(loc, types, operands,
result.attributes);
}
if (node.IsNextIteration()) {
mlir::OpBuilder builder_at_begin(builder_.getBlock(),
builder_.getBlock()->begin());
auto source_op =
builder_at_begin.create<mlir::tf_executor::NextIterationSourceOp>(
loc, operands[0].getType(), result.attributes);
return builder_.create<mlir::tf_executor::NextIterationSinkOp>(
loc, source_op.getToken(), operands, result.attributes);
}
if (node.IsLoopCond()) {
return builder_.create<mlir::tf_executor::LoopCondOp>(loc, types, operands,
result.attributes);
}
if (node.IsEnter()) {
return builder_.create<mlir::tf_executor::EnterOp>(loc, types, operands,
result.attributes);
}
if (node.IsExit()) {
return builder_.create<mlir::tf_executor::ExitOp>(loc, types, operands,
result.attributes);
}
if (node.IsControlTrigger()) {
return builder_.create<mlir::tf_executor::ControlTriggerOp>(
loc, mlir::ValueRange(operands), result.attributes);
}
auto island = builder_.create<mlir::tf_executor::IslandOp>(
result.location, types, control_operands,
mlir::ArrayRef<mlir::NamedAttribute>{});
island.getBody().push_back(new mlir::Block);
mlir::OpBuilder island_builder =
mlir::OpBuilder::atBlockEnd(&island.GetBody());
mlir::Operation* inner_op = island_builder.create(result);
const auto set_segment_sizes_attr =
[&](const NameRangeMap& arg_ranges,
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args,
llvm::StringRef attr_name) {
std::vector<int32_t> values;
values.reserve(args.size());
for (const auto& arg : args) {
auto range = arg_ranges.at(arg.name());
values.push_back(range.second - range.first);
}
auto attr_value =
mlir::DenseI32ArrayAttr::get(inner_op->getContext(), values);
inner_op->setAttr(attr_name, attr_value);
};
if (inner_op->hasTrait<mlir::OpTrait::AttrSizedOperandSegments>() ||
inner_op->hasTrait<mlir::OpTrait::AttrSizedResultSegments>()) {
NameRangeMap input_ranges, output_ranges;
TF_CHECK_OK(
NameRangesForNode(node, node.op_def(), &input_ranges, &output_ranges));
if (inner_op->hasTrait<mlir::OpTrait::AttrSizedOperandSegments>()) {
set_segment_sizes_attr(input_ranges, node.op_def().input_arg(),
mlir::OpTrait::AttrSizedOperandSegments<
void>::getOperandSegmentSizeAttr());
}
if (inner_op->hasTrait<mlir::OpTrait::AttrSizedResultSegments>()) {
set_segment_sizes_attr(output_ranges, node.op_def().output_arg(),
mlir::OpTrait::AttrSizedResultSegments<
void>::getResultSegmentSizeAttr());
}
}
if (VLOG_IS_ON(1)) {
mlir::OperationName name = inner_op->getName();
if (!name.isRegistered() &&
(node_type_name != "_Arg" && node_type_name != "_Retval") &&
!unmodelled_op_names_.count(name.getIdentifier())) {
if (node.op_def().is_stateful()) {
VLOG(1) << "[potentially conservative] Op type `" << node.type_string()
<< "` is stateful but effects not modelled";
} else {
bool resource = false;
std::function<bool(mlir::Type)> record_resource;
record_resource = [&](mlir::Type type) {
type.walk([&](mlir::Type t) {
if (resource) return mlir::WalkResult::interrupt();
if (mlir::isa<mlir::TF::ResourceType>(type)) {
resource = true;
return mlir::WalkResult::interrupt();
}
return mlir::WalkResult::advance();
});
return resource;
};
for (mlir::Type t : inner_op->getResultTypes())
if (record_resource(t)) break;
for (mlir::Type t : inner_op->getOperandTypes())
if (record_resource(t)) break;
if (resource) {
unmodelled_op_names_.insert(name.getIdentifier());
VLOG(1) << "[potentially conservative] Op type `"
<< node.type_string()
<< "` has resource operands/results but effects not modelled";
}
}
}
}
island_builder.create<mlir::tf_executor::YieldOp>(result.location,
inner_op->getResults());
return island.getOperation();
}
Status ImporterBase::ConvertNode(const Node& node) {
if (!node.IsOp()) {
return absl::OkStatus();
}
std::string node_type_name = node.type_string();
const auto* func_def = graph_flib_.Find(node_type_name);
bool convert_to_legacy_call = false;
if (func_def) {
TF_RETURN_IF_ERROR(ConvertLibFunction(node_type_name));
node_type_name = (*tf_name_to_mlir_name_)[node_type_name];
convert_to_legacy_call = true;
}
auto get_full_op_name = [&](const std::string& op_name) {
const char* kTfPrefix = "tf.";
return kTfPrefix + op_name;
};
std::string op_name = get_full_op_name(node_type_name);
if (back_edge_node_output_.contains(&node)) {
op_name = op_name + ".sink";
}
mlir::OperationState result(GetLocation(node), op_name);
for (int i = 0; i < node.num_outputs(); ++i) {
if (back_edge_node_output_.contains(&node) &&
back_edge_node_output_[&node] == i) {
continue;
}
TF_ASSIGN_OR_RETURN(auto type, InferOutputType(node, i, builder_));
result.types.push_back(type);
}
absl::InlinedVector<const Edge*, 8> in_edges(node.in_edges().size());
absl::c_copy(node.in_edges(), in_edges.begin());
absl::c_stable_sort(in_edges, [](const Edge* e1, const Edge* e2) {
if (e1->IsControlEdge() && !e2->IsControlEdge()) return false;
if (!e1->IsControlEdge() && e2->IsControlEdge()) return true;
if (e1->IsControlEdge() && e2->IsControlEdge())
return e1->src()->id() < e2->src()->id();
return e1->dst_input() < e2->dst_input();
});
result.operands.reserve(in_edges.size());
mlir::SmallVector<mlir::Value, 8> control_operands;
for (const auto* input_edge : in_edges) {
const Node& input_node = *input_edge->src();
if (input_node.IsSource()) {
if (in_edges.size() != 1) {
return errors::FailedPrecondition(
"The node has other inputs besides the _Source node");
}
continue;
}
if (input_node.IsArg() && input_edge->IsControlEdge()) {
continue;
}
if (node_values_.find(input_node.id()) == node_values_.end())
return errors::FailedPrecondition(
"Graph not traversed in reverse post order; use seen before def!");
mlir::Operation* inst = node_values_[input_node.id()];
if (input_edge->IsControlEdge())
control_operands.push_back(inst->getResult(inst->getNumResults() - 1));
else
result.operands.push_back(inst->getResult(input_edge->src_output()));
}
using FuncPairType = std::pair<const std::string*, const AttrValue*>;
std::vector<FuncPairType> funcs;
result.attributes.reserve(node.attrs().size() + 2);
auto abstract_op = result.name.getRegisteredInfo();
auto derived_op =
abstract_op
? abstract_op->getInterface<mlir::DerivedAttributeOpInterface>()
: nullptr;
for (const auto& name_and_value : node.attrs()) {
const auto& attr_name = name_and_value.first;
if (derived_op && derived_op->isDerivedAttribute(attr_name)) continue;
const AttrValue& attr_value = name_and_value.second;
if (IsOutputShapesAttribute(attr_value, attr_name)) continue;
if (attr_value.value_case() == AttrValue::kFunc) {
funcs.emplace_back(&attr_name, &attr_value);
} else {
TF_ASSIGN_OR_RETURN(auto attr, ConvertAttributeValue(attr_value));
result.attributes.push_back(builder_.getNamedAttr(attr_name, attr));
}
}
auto comparator = [](const FuncPairType& a, const FuncPairType& b) {
return *a.first < *b.first;
};
std::sort(funcs.begin(), funcs.end(), comparator);
for (const auto& func : funcs) {
TF_RETURN_IF_ERROR(ConvertFunctionCallAttribute(*func.first, *func.second,
&result.attributes));
}
const auto& node_def = node.def();
DeviceNameUtils::ParsedName parsed_name;
if (!DeviceNameUtils::ParseFullName(node_def.device(), &parsed_name)) {
return errors::InvalidArgument(
"Op ", op_name, " has invalid device name: ", node_def.device());
}
if (!node_def.device().empty()) {
if (!parsed_name.has_type) {
parsed_name.type = "CPU";
parsed_name.has_type = true;
}
if (!parsed_name.has_id) {
parsed_name.id = 0;
parsed_name.has_id = true;
}
}
result.attributes.push_back(builder_.getNamedAttr(
"device", builder_.getStringAttr(
DeviceNameUtils::ParsedNameToString(parsed_name))));
if (convert_to_legacy_call) {
result.name = mlir::OperationName(get_full_op_name("LegacyCall"), context_);
mlir::SymbolRefAttr val =
mlir::SymbolRefAttr::get(builder_.getContext(), node_type_name);
result.addAttribute("f", val);
if (!result.attributes.get("_disable_call_shape_inference")) {
result.addAttribute("_disable_call_shape_inference",
builder_.getBoolAttr(false));
}
}
auto composite_control_flow_op = [&](const std::string& name) {
result.name = mlir::OperationName(get_full_op_name(name), context_);
bool stateless = absl::StartsWith(node_type_name, "Stateless");
mlir::BoolAttr val = builder_.getBoolAttr(stateless);
result.attributes.push_back(builder_.getNamedAttr("is_stateless", val));
};
if (node.IsCaseNode()) composite_control_flow_op("Case");
if (node.IsIfNode()) composite_control_flow_op("If");
if (node.IsWhileNode()) {
composite_control_flow_op("While");
auto* output_shapes = node.attrs().Find("output_shapes");
if (output_shapes && !output_shapes->list().shape().empty())
result.attributes.push_back(
builder_.getNamedAttr("shape_invariant", builder_.getUnitAttr()));
}
node_values_[node.id()] =
CreateOperation(node, node_type_name, result, control_operands);
return absl::OkStatus();
}
Status ImporterBase::AddBackedges() {
for (auto it : back_edge_dst_inputs_) {
BackEdge& edge = it.second;
if (!edge.src->IsNextIteration() || !edge.dst->IsMerge()) {
return errors::FailedPrecondition(
"Invalid backedge; should be from NextIteration to Merge!");
}
auto* sink = node_values_[edge.src->id()];
auto* dst = node_values_[edge.dst->id()];
TF_RETURN_IF_ERROR(AddBackedge(sink, dst, edge.dst_input));
}
return absl::OkStatus();
}
Status ImporterBase::AddBackedge(mlir::Operation* sink, mlir::Operation* dst,
int dst_input) {
mlir::Operation* source = sink->getOperand(0).getDefiningOp();
mlir::OperationState state(dst->getLoc(), dst->getName());
auto num_operands = dst->getNumOperands();
state.operands.reserve(num_operands + 1);
for (int input = 0, e = num_operands + 1; input != e; ++input) {
if (input < dst_input) {
state.operands.push_back(dst->getOperand(input));
} else if (input == dst_input) {
state.operands.push_back(source->getResult(0));
} else {
state.operands.push_back(dst->getOperand(input - 1));
}
}
state.attributes.assign(dst->getAttrs().begin(), dst->getAttrs().end());
state.types.assign(dst->getResultTypes().begin(),
dst->getResultTypes().end());
builder_.setInsertionPoint(dst);
auto* new_dst = builder_.create(state);
for (unsigned i = 0, e = dst->getNumResults(); i != e; ++i) {
auto new_output = new_dst->getResult(i);
dst->getResult(i).replaceAllUsesWith(new_output);
}
dst->dropAllReferences();
dst->erase();
return absl::OkStatus();
}
absl::StatusOr<mlir::FunctionType> ImporterBase::InferLibFunctionType(
const FunctionBody& fbody) {
mlir::Builder builder(context_);
llvm::SmallVector<mlir::Type, 4> arg_types;
if (specs_.inputs.empty()) {
arg_types.reserve(fbody.arg_types.size());
for (auto arg : fbody.arg_nodes) {
auto* node = graph_->FindNodeId(arg->id());
TF_ASSIGN_OR_RETURN(auto type,
InferOutputType(*node, 0, builder));
arg_types.push_back(type);
}
} else {
arg_types.reserve(fbody.arg_types.size());
for (const auto& it : llvm::enumerate(specs_.inputs)) {
mlir::Type element_type;
const auto& node_info = it.value().second;
DataType dtype = node_info.imported_dtype;
if (dtype == DT_INVALID) {
auto arg = fbody.arg_nodes[it.index()];
auto* node = graph_->FindNodeId(arg->id());
dtype = node->output_type(0);
if (dtype == DT_INVALID) {
return errors::InvalidArgument("Input ", it.index(),
"has invalid data type");
}
}
TF_RETURN_IF_ERROR(
::tensorflow::ConvertDataType(dtype, builder, &element_type));
if (node_info.shape.unknown_rank()) {
arg_types.push_back(mlir::UnrankedTensorType::get(element_type));
} else {
llvm::SmallVector<int64_t, 4> shape;
TF_RETURN_IF_ERROR(ConvertToMlirShape(node_info.shape, &shape));
arg_types.push_back(GetTypeFromTFTensorShape(shape, element_type));
}
}
}
llvm::SmallVector<mlir::Type, 4> ret_types;
ret_types.reserve(fbody.ret_types.size());
for (auto ret : fbody.ret_nodes) {
auto* node = graph_->FindNodeId(ret->id());
TF_ASSIGN_OR_RETURN(auto type, InferInputType(*node, 0, builder));
ret_types.push_back(type);
}
return builder.getFunctionType(arg_types, ret_types);
}
class GraphDefImporter : public ImporterBase {
public:
static absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> Convert(
mlir::MLIRContext* context, const Graph& graph,
const GraphDebugInfo& debug_info,
const FunctionLibraryDefinition& flib_def, const GraphImportConfig& specs,
std::unordered_map<std::string, std::string>& tf_name_to_mlir_name,
bool disable_crash_analysis = false);
private:
explicit GraphDefImporter(
const FunctionLibraryDefinition& flib, const GraphDebugInfo& debug_info,
const GraphImportConfig& specs, mlir::ModuleOp module,
std::unordered_map<std::string, std::string>* tf_name_to_mlir_name,
NameUniquifier* function_name_uniquifier)
: ImporterBase(flib, debug_info, specs, module, tf_name_to_mlir_name,
function_name_uniquifier) {}
absl::StatusOr<mlir::FunctionType> InferMainFunctionType(
const GraphImportConfig& specs, mlir::MLIRContext* context,
absl::InlinedVector<OutputTensor, 4>* arg_nodes,
absl::InlinedVector<OutputTensor, 4>* ret_nodes);
absl::StatusOr<mlir::FunctionType> GetArgsRetsAndTypesFromFunctionGraph(
mlir::MLIRContext* context,
absl::InlinedVector<OutputTensor, 4>* arg_nodes,
absl::InlinedVector<OutputTensor, 4>* ret_nodes);
Status GetControlRetsFromGraph(
llvm::ArrayRef<std::string> control_outputs,
absl::InlinedVector<Node*, 4>* control_ret_nodes);
};
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GraphDefImporter::Convert(
mlir::MLIRContext* context, const Graph& graph,
const GraphDebugInfo& debug_info, const FunctionLibraryDefinition& flib_def,
const GraphImportConfig& specs,
std::unordered_map<std::string, std::string>& tf_name_to_mlir_name,
bool disable_crash_analysis) {
LoadImporterDialects(*context);
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::ModuleOp::create(mlir::UnknownLoc::get(context));
NameUniquifier function_name_uniquifier(flib_def);
auto graph_def = std::make_unique<GraphDef>();
graph.ToGraphDef(graph_def.get(), false);
auto scope_exit = [&]() {
std::function<void()> cleanup = []() {};
if (!disable_crash_analysis) {
static std::atomic<uint32> counter(0);
uint32 current_file_prefix = counter++;
const auto* graph_crash_handle = crash_analysis::ReportProtoDataOnCrash(
absl::StrCat(current_file_prefix, "_mlir_import_graph.pbtxt"),
*graph_def);
auto reachable_flib = flib_def.ReachableDefinitions(*graph_def);
const auto* flib_crash_handle = crash_analysis::ReportProtoDataOnCrash(
absl::StrCat(current_file_prefix, "_mlir_import_flib.pbtxt"),
reachable_flib.ToProto());
cleanup = [=]() {
crash_analysis::RemoveReportData(graph_crash_handle);
crash_analysis::RemoveReportData(flib_crash_handle);
};
}
return llvm::make_scope_exit(std::move(cleanup));
}();
VLOG(2) << "Importing: "
<< ::tensorflow::DumpGraphToFile("tf_mlir_importer_base", graph,
&flib_def);
GraphDefImporter importer(flib_def, debug_info, specs, module.get(),
&tf_name_to_mlir_name, &function_name_uniquifier);
TF_RETURN_IF_ERROR(importer.PrepareConvert(graph, std::move(graph_def)));
mlir::FunctionType func_type;
absl::InlinedVector<OutputTensor, 4> arg_nodes;
absl::InlinedVector<OutputTensor, 4> ret_nodes;
absl::InlinedVector<Node*, 4> control_ret_nodes;
llvm::SmallVector<mlir::NamedAttribute, 1> attrs;
if (specs.graph_as_function) {
if (specs.prune_unused_nodes || !specs.inputs.empty() ||
!specs.outputs.empty())
return errors::InvalidArgument(
"Pruning of graph is currently unsupported when the main graph is "
"converted to a function.");
TF_ASSIGN_OR_RETURN(func_type,
importer.GetArgsRetsAndTypesFromFunctionGraph(
context, &arg_nodes, &ret_nodes));
TF_RETURN_IF_ERROR(importer.GetControlRetsFromGraph(specs.control_outputs,
&control_ret_nodes));
mlir::Builder b(context);
std::string s;
llvm::raw_string_ostream ss(s);
auto node_name = [&](const OutputTensor& tensor) {
ss << tensor.node->name();
};
llvm::interleave(arg_nodes, ss, node_name, ",");
auto inputs = b.getNamedAttr("inputs", b.getStringAttr(ss.str()));
s.clear();
llvm::interleave(ret_nodes, ss, node_name, ",");
auto outputs = b.getNamedAttr("outputs", b.getStringAttr(ss.str()));
s.clear();
llvm::interleave(specs.control_outputs, ss, ",");
auto control_outputs =
b.getNamedAttr("control_outputs", b.getStringAttr(ss.str()));
attrs.push_back(b.getNamedAttr(
"tf.entry_function",
b.getDictionaryAttr({inputs, outputs, control_outputs})));
if (!specs.xla_compile_device_type.empty()) {
attrs.push_back(
b.getNamedAttr("_xla_compile_device_type",
b.getStringAttr(specs.xla_compile_device_type)));
}
attrs.push_back(b.getNamedAttr("allow_soft_placement",
b.getBoolAttr(specs.enable_soft_placement)));
} else {
TF_ASSIGN_OR_RETURN(func_type, importer.InferMainFunctionType(
specs, context, &arg_nodes, &ret_nodes));
TF_RETURN_IF_ERROR(importer.GetControlRetsFromGraph(specs.control_outputs,
&control_ret_nodes));
if (!specs.inputs.empty() || !specs.outputs.empty() ||
!specs.control_outputs.empty()) {
mlir::Builder b(context);
std::string s;
llvm::raw_string_ostream ss(s);
llvm::interleave(
specs.inputs, ss,
[&](const std::pair<std::string, ArrayInfo>& v) { ss << v.first; },
",");
auto inputs = b.getNamedAttr("inputs", b.getStringAttr(ss.str()));
s.clear();
llvm::interleave(specs.outputs, ss, ",");
auto outputs = b.getNamedAttr("outputs", b.getStringAttr(ss.str()));
s.clear();
llvm::interleave(specs.control_outputs, ss, ",");
auto control_outputs =
b.getNamedAttr("control_outputs", b.getStringAttr(ss.str()));
attrs.push_back(b.getNamedAttr(
"tf.entry_function",
b.getDictionaryAttr({inputs, outputs, control_outputs})));
}
}
PopulateTfVersions(module.get(), graph.versions());
const llvm::StringRef& graph_func_name =
specs.graph_func_name.empty() ? kImportModelDefaultGraphFuncName
: specs.graph_func_name;
TF_RETURN_IF_ERROR(importer.ImporterBase::Convert(graph_func_name, func_type,
arg_nodes, ret_nodes,
control_ret_nodes, attrs));
if (specs.convert_all_functions_to_mlir) {
auto fn_names = graph.flib_def().ListFunctionNames();
for (const auto& fn_name : fn_names) {
TF_RETURN_IF_ERROR(importer.ConvertLibFunction(fn_name));
}
}
TF_RETURN_IF_ERROR(importer.ImporterBase::ConvertDeferredFunctions());
for (auto function : module.get().getOps<mlir::func::FuncOp>()) {
auto visibility = function.getName() == graph_func_name
? mlir::func::FuncOp::Visibility::Public
: mlir::func::FuncOp::Visibility::Private;
function.setVisibility(visibility);
}
VLOG(2) << "Imported: "
<< tensorflow::DumpMlirOpToFile("tf_mlir_imported_base",
module.get());
return module;
}
absl::StatusOr<mlir::FunctionType> GraphDefImporter::InferMainFunctionType(
const GraphImportConfig& specs, mlir::MLIRContext* context,
absl::InlinedVector<OutputTensor, 4>* arg_nodes,
absl::InlinedVector<OutputTensor, 4>* ret_nodes) {
absl::flat_hash_map<absl::string_view, int> inputs;
for (const auto& input_and_idx : llvm::enumerate(specs.inputs)) {
TensorId tensor = ParseTensorName(input_and_idx.value().first);
auto remapped_it = remapped_feeds_.find(tensor);
if (remapped_it != remapped_feeds_.end()) {
inputs.insert({remapped_it->second, input_and_idx.index()});
} else {
inputs.insert({tensor.node(), input_and_idx.index()});
}
}
absl::flat_hash_set<absl::string_view> output_node_names;
std::vector<TensorId> outputs;
output_node_names.reserve(specs.outputs.size());
for (const auto& output : specs.outputs) {
TensorId tensor = ParseTensorName(output);
auto remapped_it = remapped_feeds_.find(tensor);
if (remapped_it != remapped_feeds_.end()) {
output_node_names.insert(remapped_it->second);
outputs.push_back({remapped_it->second, 0});
} else {
output_node_names.insert(tensor.node());
outputs.push_back(tensor);
}
}
if (!inputs.empty() || !outputs.empty()) {
arg_nodes->resize(inputs.size());
ret_nodes->resize(outputs.size());
for (Node* n : GetOrderedNodes()) {
auto input_it = inputs.find(n->name());
if (input_it != inputs.end()) {
(*arg_nodes)[input_it->second] = {n, 0};
}
if (output_node_names.contains(n->name())) {
for (int i = 0, e = outputs.size(); i != e; ++i) {
TensorId tensor = outputs[i];
if (n->name() != tensor.node()) continue;
(*ret_nodes)[i] = {n, tensor.index()};
}
}
}
}
mlir::Builder builder(context);
llvm::SmallVector<mlir::Type, 4> arg_types;
arg_types.reserve(specs.inputs.size());
int i = 0;
for (const auto& it : specs.inputs) {
Node* arg_node = arg_nodes->at(i).node;
if (arg_node == nullptr) {
return errors::InvalidArgument("Input ", it.first,
" was not found in graph");
}
mlir::Type element_type;
const auto& node_info = it.second;
DataType imported_dtype = node_info.imported_dtype;
if (imported_dtype == DT_INVALID) {
imported_dtype = arg_node->output_type(0);
if (imported_dtype == DT_INVALID) {
return errors::InvalidArgument("Input ", i, "has invalid data type");
}
}
if (!node_info.subtypes.empty()) {
std::vector<mlir::TensorType> subtypes;
for (const auto& st : node_info.subtypes) {
mlir::Type st_data_type;
llvm::SmallVector<int64_t> shape;
TF_RETURN_IF_ERROR(ConvertToMlirShape(st.shape, &shape));
TF_RETURN_IF_ERROR(
ConvertDataType(st.imported_dtype, builder, &st_data_type));
subtypes.push_back(GetTypeFromTFTensorShape(shape, st_data_type));
}
if (imported_dtype == DT_RESOURCE) {
element_type =
mlir::TF::ResourceType::get(subtypes, builder.getContext());
} else if (imported_dtype == DT_VARIANT) {
element_type =
mlir::TF::VariantType::get(subtypes, builder.getContext());
} else {
return errors::InvalidArgument(DataType_Name(imported_dtype),
" takes no subtypes.");
}
} else {
TF_RETURN_IF_ERROR(
ConvertDataType(imported_dtype, builder, &element_type));
}
if (node_info.shape.unknown_rank()) {
arg_types.push_back(mlir::UnrankedTensorType::get(element_type));
} else {
llvm::SmallVector<int64_t, 4> shape;
TF_RETURN_IF_ERROR(ConvertToMlirShape(node_info.shape, &shape));
arg_types.push_back(GetTypeFromTFTensorShape(shape, element_type));
}
i++;
}
llvm::SmallVector<mlir::Type, 4> ret_types;
ret_types.reserve(specs.outputs.size());
for (int i = 0, e = specs.outputs.size(); i != e; ++i) {
if (ret_nodes->at(i).node == nullptr) {
return errors::InvalidArgument("Output ", specs.outputs[i],
" was not found in graph");
}
}
for (const auto& ret : *ret_nodes) {
if (ret.node->num_outputs() <= ret.index) {
return errors::InvalidArgument("Invalid output index ", ret.index,
" specified for node: ", ret.node->name());
}
TF_ASSIGN_OR_RETURN(auto type,
InferOutputType(*ret.node, ret.index, builder));
ret_types.push_back(type);
}
return builder.getFunctionType(arg_types, ret_types);
}
absl::StatusOr<mlir::FunctionType>
GraphDefImporter::GetArgsRetsAndTypesFromFunctionGraph(
mlir::MLIRContext* context, absl::InlinedVector<OutputTensor, 4>* arg_nodes,
absl::InlinedVector<OutputTensor, 4>* ret_nodes) {
auto add_node = [](Node* node, absl::InlinedVector<OutputTensor, 4>* nodes) {
auto* attr = node->attrs().Find("index");
if (!attr)
return errors::InvalidArgument(node->type_string(), " node '",
node->name(),
"' is missing attribute 'index'");
auto index = attr->i();
const int num_nodes = nodes->size();
if (num_nodes < index + 1) nodes->resize(index + 1);
if ((*nodes)[index].node != nullptr)
return errors::InvalidArgument(node->type_string(), " node '",
node->name(), "' has attribute 'index' ",
index, " that conflicts with node '",
(*nodes)[index].node->name(), "'");
(*nodes)[index] = {node, 0};
return absl::OkStatus();
};
for (auto* node : GetOrderedNodes())
if (node->IsArg())
TF_RETURN_IF_ERROR(add_node(node, arg_nodes));
else if (node->IsRetval())
TF_RETURN_IF_ERROR(add_node(node, ret_nodes));
mlir::Builder builder(context);
llvm::SmallVector<mlir::Type, 4> arg_types;
arg_types.reserve(arg_nodes->size());
for (const auto& arg_node_and_idx : llvm::enumerate(*arg_nodes)) {
auto& arg_node = arg_node_and_idx.value();
if (arg_node.node == nullptr)
return errors::InvalidArgument("Graph missing _Arg at index ",
arg_node_and_idx.index());
TF_ASSIGN_OR_RETURN(auto type,
InferOutputType(*arg_node.node, 0, builder));
arg_types.push_back(type);
}
llvm::SmallVector<mlir::Type, 4> ret_types;
ret_types.reserve(ret_nodes->size());
for (const auto& ret_node_and_idx : llvm::enumerate(*ret_nodes)) {
auto& ret_node = ret_node_and_idx.value();
if (ret_node.node == nullptr)
return errors::InvalidArgument("Graph missing _Retval at index ",
ret_node_and_idx.index());
TF_ASSIGN_OR_RETURN(auto type,
InferInputType(*ret_node.node, 0, builder));
ret_types.push_back(type);
}
return builder.getFunctionType(arg_types, ret_types);
}
Status GraphDefImporter::GetControlRetsFromGraph(
llvm::ArrayRef<std::string> control_outputs,
absl::InlinedVector<Node*, 4>* control_ret_nodes) {
if (control_outputs.empty()) return absl::OkStatus();
llvm::SmallDenseMap<llvm::StringRef, int32_t> controls_to_idx;
for (const auto& control_and_idx : llvm::enumerate(control_outputs))
controls_to_idx.insert({control_and_idx.value(), control_and_idx.index()});
if (controls_to_idx.size() != control_outputs.size())
return errors::InvalidArgument("Control outputs must be unique");
control_ret_nodes->resize(controls_to_idx.size());
for (auto* node : GetOrderedNodes()) {
auto it = controls_to_idx.find(node->name());
if (it != controls_to_idx.end()) (*control_ret_nodes)[it->second] = node;
}
for (auto node_and_name : llvm::zip(*control_ret_nodes, control_outputs))
if (std::get<0>(node_and_name) == nullptr)
return errors::InvalidArgument(
"Control output '", std::get<1>(node_and_name), "' is missing");
return absl::OkStatus();
}
class ObjectNames {
public:
explicit ObjectNames(const SavedObjectGraph& object_graph,
absl::Span<std::string> exported_names);
llvm::ArrayRef<llvm::StringRef> GetExportedNames(int node_id) const;
llvm::StringRef GetSymbolTableName(int node_id) const;
private:
std::string GetDefaultSymbolTableName(int node_id) const;
bool IsExported(const std::string& name);
void RecursivelyVisitObjectGraph(int node_id);
llvm::StringRef SaveString(const std::string& s) const;
const SavedObjectGraph& object_graph_;
std::unordered_set<std::string> names_to_export_;
llvm::SmallVector<std::string, 8> path_segments_;
absl::flat_hash_set<int> on_stack_nodes_;
absl::flat_hash_map<int, std::vector<std::string>> object_names_;
absl::flat_hash_map<int, llvm::SmallVector<llvm::StringRef, 1>>
exported_names_;
absl::flat_hash_map<int, llvm::StringRef> pretty_symbol_table_name_;
mutable std::unordered_set<std::string> saved_strings_;
};
ObjectNames::ObjectNames(const SavedObjectGraph& object_graph,
absl::Span<std::string> exported_names)
: object_graph_(object_graph),
names_to_export_(exported_names.begin(), exported_names.end()) {
RecursivelyVisitObjectGraph(0);
for (auto& kv : object_names_) {
std::sort(kv.second.begin(), kv.second.end(),
[](absl::string_view a, absl::string_view b) {
return std::make_tuple(isdigit(a.back()), a.size(), a) <
std::make_tuple(isdigit(b.back()), b.size(), b);
});
for (const std::string& name : kv.second) {
if (IsExported(name)) {
exported_names_[kv.first].push_back(SaveString(name));
}
}
}
for (auto& kv : object_names_) {
int node_id = kv.first;
std::string internal_name =
absl::StrCat(GetDefaultSymbolTableName(node_id), "__");
if (exported_names_.find(node_id) != exported_names_.end()) {
internal_name += exported_names_[node_id][0].str();
} else {
internal_name += object_names_[node_id][0];
}
pretty_symbol_table_name_[node_id] = SaveString(internal_name);
}
}
llvm::ArrayRef<llvm::StringRef> ObjectNames::GetExportedNames(
int node_id) const {
auto it = exported_names_.find(node_id);
if (it != exported_names_.end()) {
return it->second;
}
return {};
}
llvm::StringRef ObjectNames::GetSymbolTableName(int node_id) const {
auto it = pretty_symbol_table_name_.find(node_id);
if (it != pretty_symbol_table_name_.end()) {
return it->second;
}
return SaveString(GetDefaultSymbolTableName(node_id));
}
std::string ObjectNames::GetDefaultSymbolTableName(int node_id) const {
return absl::StrCat("__sm_node", node_id);
}
bool ObjectNames::IsExported(const std::string& name) {
if (names_to_export_.empty()) {
return true;
}
return names_to_export_.find(name) != names_to_export_.end();
}
void ObjectNames::RecursivelyVisitObjectGraph(int node_id) {
const SavedObject& object = object_graph_.nodes(node_id);
switch (object.kind_case()) {
case SavedObject::kConstant:
case SavedObject::kFunction:
case SavedObject::kVariable: {
object_names_[node_id].push_back(absl::StrJoin(path_segments_, "."));
break;
}
default:
break;
}
for (const auto& child_ref : object.children()) {
bool on_stack = !on_stack_nodes_.insert(child_ref.node_id()).second;
if (on_stack) {
continue;
}
path_segments_.push_back(child_ref.local_name());
RecursivelyVisitObjectGraph(child_ref.node_id());
path_segments_.pop_back();
on_stack_nodes_.erase(child_ref.node_id());
}
}
llvm::StringRef ObjectNames::SaveString(const std::string& s) const {
return llvm::StringRef(*saved_strings_.insert(s).first);
}
const TensorProto* ExtractConstTensorFromGraph(const GraphDef& graph_def,
const std::string& op_name) {
const NodeDef* match_node = nullptr;
for (const auto& node : graph_def.node()) {
if (node.name() == op_name) {
match_node = &node;
}
}
if (!match_node) {
return nullptr;
}
auto value_it = match_node->attr().find("value");
if (value_it == match_node->attr().end()) {
return nullptr;
}
if (!value_it->second.has_tensor()) {
return nullptr;
}
return &value_it->second.tensor();
}
const TrackableObjectGraph::TrackableObject::SerializedTensor*
FindSerializedTensorInTrackable(
const TrackableObjectGraph::TrackableObject& trackable_object,
StringPiece name) {
for (const auto& maybe_serialized_tensor : trackable_object.attributes()) {
if (maybe_serialized_tensor.name() == name) {
return &maybe_serialized_tensor;
}
}
return nullptr;
}
Status DiagnoseMultipleConcreteFunctions(const SavedObjectGraph& object_graph,
const ObjectNames& object_names) {
for (int node_id = 0; node_id < object_graph.nodes_size(); node_id++) {
const SavedObject& object = object_graph.nodes(node_id);
if (object_names.GetExportedNames(node_id).empty()) {
continue;
}
if (object.kind_case() == SavedObject::kFunction) {
if (object.function().concrete_functions_size() != 1) {
llvm::SmallVector<std::string, 4> names;
for (llvm::StringRef s : object_names.GetExportedNames(node_id)) {
names.push_back("'" + s.str() + "'");
}
return errors::InvalidArgument(
"Exported function with exported name(s) ",
absl::StrJoin(names, ", "),
" with multiple concrete functions. Add "
"@tf.function(input_signature=[...]) on this function, or use a "
"narrower list of exported names that excludes this function.");
}
}
}
return absl::OkStatus();
}
class StructuredValueLinearizer {
public:
StructuredValueLinearizer(const StructuredValue& value,
mlir::MLIRContext* context);
absl::StatusOr<llvm::ArrayRef<mlir::ArrayAttr>> GetLeafIndexPaths(
llvm::StringRef error_context) const;
private:
void RecursivelyFindLeaves(const StructuredValue& value);
mlir::Builder builder_;
llvm::SmallVector<mlir::Attribute, 4> current_index_path_;
llvm::SmallVector<mlir::ArrayAttr, 4> leaf_index_paths_;
std::string error_message_;
};
StructuredValueLinearizer::StructuredValueLinearizer(
const StructuredValue& value, mlir::MLIRContext* context)
: builder_(context) {
RecursivelyFindLeaves(value);
}
absl::StatusOr<llvm::ArrayRef<mlir::ArrayAttr>>
StructuredValueLinearizer::GetLeafIndexPaths(
llvm::StringRef error_context) const {
if (error_message_.empty()) {
return llvm::ArrayRef(leaf_index_paths_);
}
return errors::InvalidArgument(
error_context.str(), error_message_,
"This likely means that you have @tf.function "
"on an exported function instead of "
"@tf.function(input_signature=[...]). Consider annotating an "
"input_signature or narrowing your set of "
"exported names to not include this function.");
}
void StructuredValueLinearizer::RecursivelyFindLeaves(
const StructuredValue& value) {
switch (value.kind_case()) {
case StructuredValue::kDictValue: {
const DictValue& dict = value.dict_value();
using FieldTy = protobuf::MapPair<std::string, StructuredValue>;
llvm::SmallVector<const FieldTy*, 4> fields;
for (auto& field : dict.fields()) {
fields.push_back(&field);
}
llvm::sort(fields, [](const FieldTy* a, const FieldTy* b) {
return a->first < b->first;
});
for (auto& field : fields) {
current_index_path_.push_back(builder_.getStringAttr(field->first));
RecursivelyFindLeaves(field->second);
current_index_path_.pop_back();
}
return;
}
case StructuredValue::kTupleValue: {
const TupleValue& tuple = value.tuple_value();
for (int i = 0, e = tuple.values_size(); i < e; i++) {
current_index_path_.push_back(builder_.getI64IntegerAttr(i));
RecursivelyFindLeaves(tuple.values(i));
current_index_path_.pop_back();
}
return;
}
case StructuredValue::kListValue: {
const ListValue& list = value.list_value();
for (int i = 0, e = list.values_size(); i < e; i++) {
current_index_path_.push_back(builder_.getI64IntegerAttr(i));
RecursivelyFindLeaves(list.values(i));
current_index_path_.pop_back();
}
return;
}
case StructuredValue::kTensorSpecValue: {
leaf_index_paths_.push_back(builder_.getArrayAttr(current_index_path_));
return;
}
case StructuredValue::kNoneValue: {
return;
}
default: {
llvm::raw_string_ostream os(error_message_);
os << "Unhandled structured value kind " << value.kind_case()
<< " at index path: <value>";
for (auto path_element : current_index_path_) {
os << ".";
if (auto integer = mlir::dyn_cast<mlir::IntegerAttr>(path_element)) {
os << integer.getValue();
} else {
auto str = mlir::cast<mlir::StringAttr>(path_element);
os << str.getValue();
}
}
os << "\n";
}
}
}
void AdjustBoundInputArgTypes(mlir::ModuleOp module) {
mlir::SymbolTable symbol_table(module);
for (auto func : module.getOps<mlir::func::FuncOp>()) {
if (!mlir::tf_saved_model::IsExported(func)) continue;
mlir::OpBuilder builder(func.getBody());
llvm::SmallVector<mlir::Type, 4> new_input_types;
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
auto arg = func.getArgument(i);
auto global_tensor = mlir::tf_saved_model::LookupBoundInputOfType<
mlir::tf_saved_model::GlobalTensorOp>(func, i, symbol_table);
if (global_tensor) {
auto old_type = arg.getType();
auto new_type =
mlir::tf_saved_model::GetBoundInputArgTypeFor(global_tensor);
arg.setType(new_type);
if (global_tensor.getIsMutable()) {
auto arg_with_original_type = builder.create<mlir::TF::CastOp>(
global_tensor.getLoc(), old_type, arg,
builder.getBoolAttr(false));
arg.replaceAllUsesWith(arg_with_original_type);
arg_with_original_type.setOperand(arg);
} else {
auto arg_with_original_type =
builder.create<mlir::TF::ReadVariableOp>(global_tensor.getLoc(),
old_type, arg);
arg.replaceAllUsesWith(arg_with_original_type);
arg_with_original_type.setOperand(arg);
}
}
new_input_types.push_back(arg.getType());
}
func.setType(mlir::FunctionType::get(module.getContext(), new_input_types,
func.getFunctionType().getResults()));
}
}
void MarkSavedModelFunctionVisibility(mlir::ModuleOp module) {
for (auto func : module.getOps<mlir::func::FuncOp>()) {
auto visibility = mlir::tf_saved_model::IsExported(func)
? mlir::func::FuncOp::Visibility::Public
: mlir::func::FuncOp::Visibility::Private;
func.setVisibility(visibility);
}
}
void SortSavedModelModule(mlir::ModuleOp module) {
struct NamedGlobalTensor {
llvm::StringRef name;
GlobalTensorOp global_tensor;
};
llvm::SmallVector<NamedGlobalTensor, 8> named_global_tensors;
for (auto global_tensor : module.getOps<GlobalTensorOp>()) {
auto exported_names = mlir::tf_saved_model::GetExportedNames(global_tensor);
named_global_tensors.push_back(
{exported_names.empty() ? "" : exported_names.front(), global_tensor});
}
llvm::stable_sort(named_global_tensors,
[](const NamedGlobalTensor& a, const NamedGlobalTensor& b) {
return std::make_tuple(a.name.empty(), a.name) <
std::make_tuple(b.name.empty(), b.name);
});
struct NamedFunc {
llvm::StringRef name;
mlir::func::FuncOp func;
};
llvm::SmallVector<NamedFunc, 8> named_funcs;
llvm::SmallVector<mlir::func::FuncOp, 8> private_funcs;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
auto exported_names = mlir::tf_saved_model::GetExportedNames(func);
if (!exported_names.empty())
named_funcs.push_back({exported_names.front(), func});
else
private_funcs.push_back(func);
}
llvm::stable_sort(named_funcs, [](const NamedFunc& a, const NamedFunc& b) {
return a.name < b.name;
});
llvm::stable_sort(private_funcs,
[](mlir::func::FuncOp a, mlir::func::FuncOp b) {
return a.getName() < b.getName();
});
struct NamedAsset {
llvm::StringRef name;
AssetOp asset;
};
llvm::SmallVector<NamedAsset, 4> assets;
for (auto asset : module.getOps<AssetOp>()) {
assets.push_back({asset.getName(), asset});
}
llvm::stable_sort(assets, [](const NamedAsset& a, const NamedAsset& b) {
return a.name < b.name;
});
for (auto func : llvm::reverse(private_funcs)) {
func.getOperation()->moveBefore(&module.getBody()->front());
}
for (auto named_func : llvm::reverse(named_funcs)) {
named_func.func.getOperation()->moveBefore(&module.getBody()->front());
}
for (auto named_global_tensor : llvm::reverse(named_global_tensors)) {
named_global_tensor.global_tensor.getOperation()->moveBefore(
&module.getBody()->front());
}
for (auto asset : assets) {
asset.asset.getOperation()->moveBefore(&module.getBody()->front());
}
auto initializers = module.getOps<SessionInitializerOp>();
if (!initializers.empty()) {
(*initializers.begin())
.getOperation()
->moveBefore(&module.getBody()->front());
}
}
Status CreateSavedModelIR(
const ObjectNames& object_names, mlir::ModuleOp module,
const SavedObjectGraph& object_graph,
const std::unordered_map<std::string, std::string>& tf_name_to_mlir_name,
SavedModelV2Bundle* saved_model, MLIRImportOptions import_options) {
mlir::OpBuilder builder(module.getBodyRegion());
mlir::SymbolTable symbol_table(module);
absl::flat_hash_map<int, const TrackableObjectGraph::TrackableObject*>
restored_objects;
TF_RETURN_IF_ERROR(saved_model->VisitObjectsToRestore(
[&](int saved_node_id,
const TrackableObjectGraph::TrackableObject& trackable_object) {
restored_objects.insert(
std::make_pair(saved_node_id, &trackable_object));
return absl::OkStatus();
}));
for (int node_id = 0; node_id < object_graph.nodes_size(); node_id++) {
const SavedObject& object = object_graph.nodes(node_id);
if (object.kind_case() == SavedObject::kFunction) {
if (object_names.GetExportedNames(node_id).empty()) {
continue;
}
std::string error_context =
"While importing SavedModel function '" +
object_names.GetExportedNames(node_id)[0].str() + "': ";
const SavedFunction& function = object.function();
auto orig_func = symbol_table.lookup<mlir::func::FuncOp>(
tf_name_to_mlir_name.find(function.concrete_functions(0))->second);
mlir::func::FuncOp func = orig_func;
if (!mlir::SymbolTable::symbolKnownUseEmpty(orig_func.getSymNameAttr(),
&module.getBodyRegion())) {
func = orig_func.cloneWithoutRegions();
module.insert(module.getBody()->begin(), func);
func.addEntryBlock();
func.setName(builder.getStringAttr("__sm_exported_" +
orig_func.getName().str()));
llvm::SmallVector<mlir::Value, 4> args_as_values;
for (auto block_argument : func.getArguments()) {
args_as_values.push_back(block_argument);
}
mlir::OpBuilder body_builder(&func.getBody());
auto call = body_builder.create<mlir::TF::StatefulPartitionedCallOp>(
func.getLoc(), orig_func.getFunctionType().getResults(),
args_as_values,
mlir::SymbolRefAttr::get(builder.getContext(), orig_func.getName()),
builder.getStringAttr(""),
builder.getStringAttr(""),
builder.getStringAttr(""));
body_builder.create<mlir::func::ReturnOp>(func.getLoc(),
call.getResults());
}
func->setAttr(
kTfSavedModelExportedNamesAttr,
builder.getStrArrayAttr(object_names.GetExportedNames(node_id)));
const SavedConcreteFunction& concrete_function =
object_graph.concrete_functions().at(function.concrete_functions(0));
auto positional_arg_structure =
concrete_function.canonicalized_input_signature()
.tuple_value()
.values(0);
StructuredValueLinearizer input_linearizer(positional_arg_structure,
builder.getContext());
int bound_input_base =
func.getNumArguments() - concrete_function.bound_inputs_size();
TF_ASSIGN_OR_RETURN(auto input_index_paths,
input_linearizer.GetLeafIndexPaths(
error_context + "in input signature: "));
const int input_index_paths_size = input_index_paths.size();
if (bound_input_base != input_index_paths_size) {
return errors::InvalidArgument(
error_context,
"Argument mismatch between concrete function input signature "
"vs underlying FunctionDef for concrete function '",
function.concrete_functions(0), "' (", input_index_paths.size(),
" vs ", bound_input_base, ")");
}
for (const auto& index_path : llvm::enumerate(input_index_paths)) {
func.setArgAttr(index_path.index(), kTfSavedModelIndexPathAttr,
index_path.value());
}
for (const auto& bound_input :
llvm::enumerate(concrete_function.bound_inputs())) {
int arg_index = bound_input_base + bound_input.index();
auto symbol_ref = mlir::SymbolRefAttr::get(
builder.getContext(),
object_names.GetSymbolTableName(bound_input.value()));
func.setArgAttr(arg_index, "tf_saved_model.bound_input", symbol_ref);
}
StructuredValueLinearizer output_linearizer(
concrete_function.output_signature(), builder.getContext());
TF_ASSIGN_OR_RETURN(auto output_index_paths,
output_linearizer.GetLeafIndexPaths(
error_context + "in output signature: "));
if (func.getNumResults() != output_index_paths.size()) {
return errors::InvalidArgument(
error_context,
"Result mismatch between concrete function output signature "
"vs underlying FunctionDef for concrete function '",
function.concrete_functions(0), "' (", output_index_paths.size(),
" vs ", func.getNumResults(), ")");
}
for (const auto& index_path : llvm::enumerate(output_index_paths)) {
func.setResultAttr(index_path.index(), kTfSavedModelIndexPathAttr,
index_path.value());
}
} else if (object.kind_case() == SavedObject::kVariable) {
const SavedVariable& variable = object.variable();
auto variable_trackable_it = restored_objects.find(node_id);
TF_ASSIGN_OR_RETURN(
auto type, ConvertToMlirTensorType(variable.shape(), variable.dtype(),
&builder));
if (variable_trackable_it == restored_objects.end()) {
if (!import_options.allow_uninitialized_variables) {
return errors::FailedPrecondition(
"Could not restore saved variable: ", variable.name());
}
auto op = builder.create<mlir::tf_saved_model::GlobalTensorOp>(
builder.getUnknownLoc(),
builder.getStringAttr(object_names.GetSymbolTableName(node_id)),
mlir::ElementsAttr(),
mlir::TypeAttr::get(type),
builder.getUnitAttr());
op->setAttr(
kTfSavedModelExportedNamesAttr,
builder.getStrArrayAttr(object_names.GetExportedNames(node_id)));
} else {
const auto* serialized_tensor_attr = FindSerializedTensorInTrackable(
*variable_trackable_it->second, "VARIABLE_VALUE");
if (!serialized_tensor_attr) {
return errors::FailedPrecondition(
"Could not find serialized tensor for saved variable: ",
variable.name());
}
const auto& checkpoint_key = serialized_tensor_attr->checkpoint_key();
Tensor value;
TF_RETURN_WITH_CONTEXT_IF_ERROR(
saved_model->variable_reader()->Lookup(checkpoint_key, &value),
"Could not read checkpoint key from variables bundle: ",
checkpoint_key);
TF_ASSIGN_OR_RETURN(auto value_attr, ConvertTensor(value, &builder));
auto op = builder.create<GlobalTensorOp>(
builder.getUnknownLoc(),
builder.getStringAttr(object_names.GetSymbolTableName(node_id)),
value_attr,
mlir::TypeAttr::get(type),
builder.getUnitAttr());
op->setAttr(
kTfSavedModelExportedNamesAttr,
builder.getStrArrayAttr(object_names.GetExportedNames(node_id)));
}
} else if (object.kind_case() == SavedObject::kConstant) {
const SavedConstant& constant = object.constant();
const TensorProto* value = ExtractConstTensorFromGraph(
saved_model->meta_graph_def().graph_def(), constant.operation());
if (!value) {
return errors::FailedPrecondition(
"Unable to find const node referenced in object graph: ",
constant.operation());
}
TF_ASSIGN_OR_RETURN(auto value_attr,
ConvertTensorProto(*value, &builder));
auto op = builder.create<GlobalTensorOp>(
builder.getUnknownLoc(),
builder.getStringAttr(object_names.GetSymbolTableName(node_id)),
value_attr,
mlir::TypeAttr::get(value_attr.getType()),
nullptr);
op->setAttr(
kTfSavedModelExportedNamesAttr,
builder.getStrArrayAttr(object_names.GetExportedNames(node_id)));
}
}
AdjustBoundInputArgTypes(module);
module->setAttr("tf_saved_model.semantics", builder.getUnitAttr());
SortSavedModelModule(module);
MarkSavedModelFunctionVisibility(module);
return absl::OkStatus();
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelObjectGraph(
SavedModelV2Bundle* saved_model, absl::Span<std::string> exported_names,
mlir::MLIRContext* context, MLIRImportOptions import_options) {
LoadImporterDialects(*context);
GraphDebugInfo dummy_debug_info;
const GraphDebugInfo& debug_info =
saved_model->debug_info() ? *saved_model->debug_info() : dummy_debug_info;
GraphImportConfig specs;
specs.prune_unused_nodes = true;
specs.unconditionally_use_set_output_shapes =
import_options.unconditionally_use_set_output_shapes;
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::ModuleOp::create(mlir::UnknownLoc::get(context));
std::unordered_map<std::string, std::string> tf_name_to_mlir_name;
const auto& graphdef = saved_model->meta_graph_def().graph_def();
PopulateTfVersions(module.get(), graphdef.versions());
GraphConstructorOptions options;
options.allow_internal_ops = true;
options.add_default_attributes = import_options.add_default_attributes;
Graph graph(OpRegistry::Global());
GraphDef preprocessed_graphdef(graphdef);
if (import_options.add_default_attributes) {
TF_RETURN_IF_ERROR(PreprocessGraphDef(nullptr, &preprocessed_graphdef));
}
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
options, std::move(preprocessed_graphdef), &graph));
NameUniquifier function_name_uniquifier(graph.flib_def());
for (const auto& fn_name : graph.flib_def().ListFunctionNames()) {
std::string mlir_func_name(function_name_uniquifier.GetUniqueName(fn_name));
(tf_name_to_mlir_name)[std::string(fn_name)] = mlir_func_name;
}
specs.convert_all_functions_to_mlir = true;
TF_ASSIGN_OR_RETURN(
module, ConvertGraphToMlir(graph, debug_info, graph.flib_def(), specs,
module->getContext()));
if (!saved_model->meta_graph_def().has_object_graph_def()) {
return errors::InvalidArgument(
"SavedModel does not have an object graph. Please use TF2.");
}
auto& object_graph = saved_model->meta_graph_def().object_graph_def();
ObjectNames object_names(object_graph, exported_names);
for (auto func :
llvm::make_early_inc_range(module->getOps<mlir::func::FuncOp>())) {
if (func.getName().starts_with("__inference__traced_save_") ||
func.getName().starts_with("__inference__traced_restore_") ||
func.getName().starts_with("__inference_signature_wrapper_") ||
func.getName().starts_with("main")) {
func.erase();
}
}
TF_RETURN_IF_ERROR(
DiagnoseMultipleConcreteFunctions(object_graph, object_names));
TF_RETURN_IF_ERROR(CreateSavedModelIR(object_names, module.get(),
object_graph, tf_name_to_mlir_name,
saved_model, import_options));
assert(mlir::succeeded(mlir::verify(module.get())));
return module;
}
class SimpleSavedModelMLIRImportInput : public SavedModelMLIRImportInput {
public:
static absl::StatusOr<SimpleSavedModelMLIRImportInput> Create(
const MLIRImportOptions& import_options,
const MetaGraphDef* meta_graph_def, const GraphDebugInfo& debug_info) {
DCHECK(meta_graph_def);
GraphDef graph_def(meta_graph_def->graph_def());
auto graph = std::make_unique<Graph>(OpRegistry::Global());
if (import_options.upgrade_legacy) {
TF_RETURN_IF_ERROR(GenerateResourceSharedNameIfEmpty(
graph_def, graph->flib_def().default_registry()));
}
GraphConstructorOptions graph_ctor_options;
graph_ctor_options.allow_internal_ops = true;
graph_ctor_options.add_default_attributes = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
graph_ctor_options, std::move(graph_def), graph.get()));
if (import_options.upgrade_legacy) {
TF_RETURN_IF_ERROR(UpgradeLegacyGraph(
graph.get(),
const_cast<FunctionLibraryDefinition*>(&graph->flib_def()),
false));
}
return SimpleSavedModelMLIRImportInput(meta_graph_def, debug_info,
std::move(graph));
}
SimpleSavedModelMLIRImportInput(const MetaGraphDef* meta_graph_def,
const GraphDebugInfo& debug_info,
std::unique_ptr<Graph> graph)
: SavedModelMLIRImportInput(meta_graph_def, debug_info),
graph_(std::move(graph)) {}
absl::StatusOr<const Graph*> GetSubGraph(absl::string_view name,
GraphImportConfig& specs) override {
DCHECK(CheckGraphNameValidity(name));
DCHECK(CheckGraphContainsFeedsAndFetches(specs));
return graph_.get();
}
private:
bool CheckGraphContainsFeedsAndFetches(const GraphImportConfig& specs) const {
absl::flat_hash_set<std::string> feed_fetch_nodes;
for (const auto& iter : specs.inputs) {
TensorId tensor_id = ParseTensorName(iter.first);
feed_fetch_nodes.insert(std::string(tensor_id.node()));
}
for (const auto& output : llvm::concat<const std::string>(
specs.outputs, specs.control_outputs)) {
TensorId tensor_id = ParseTensorName(output);
feed_fetch_nodes.insert(std::string(tensor_id.node()));
}
for (Node* node : graph_->op_nodes()) {
feed_fetch_nodes.erase(node->name());
}
return feed_fetch_nodes.empty();
}
bool CheckGraphNameValidity(absl::string_view name) const {
const auto& signature_defs = meta_graph_def().signature_def();
if (signature_defs.contains(std::string(name))) return true;
if (meta_graph_def().has_saver_def() &&
meta_graph_def().saver_def().restore_op_name() == name)
return true;
std::string init_op_name;
if (internal::GetInitOp("", meta_graph_def(), &init_op_name).ok()) {
if (init_op_name == name) return true;
}
return false;
}
std::unique_ptr<Graph> graph_;
};
static absl::flat_hash_set<std::string> GetOriginalTfFuncNamesFromGraphDef(
const GraphDef& graph_def) {
absl::flat_hash_set<std::string> original_func_tf_names;
for (const auto& function : graph_def.library().function()) {
original_func_tf_names.insert(function.signature().name());
}
return original_func_tf_names;
}
class SavedModelSignatureDefImporterLite {
public:
static absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> Convert(
SavedModelMLIRImportInput& input,
std::optional<absl::Span<const std::string>> exported_names,
mlir::MLIRContext* context, bool import_restore = true,
bool unconditionally_use_set_output_shapes = false) {
SavedModelSignatureDefImporterLite importer(
input, exported_names, context, import_restore,
unconditionally_use_set_output_shapes);
return importer.ConvertSignatures();
}
private:
SavedModelSignatureDefImporterLite(
SavedModelMLIRImportInput& input,
std::optional<absl::Span<const std::string>> exported_names,
mlir::MLIRContext* context, bool import_restore,
bool unconditionally_use_set_output_shapes)
: input_(input),
original_func_tf_names_(GetOriginalTfFuncNamesFromGraphDef(
input.meta_graph_def().graph_def())),
exported_names_(exported_names),
module_(mlir::ModuleOp::create(mlir::UnknownLoc::get(context))),
symbol_table_(module_.get()),
import_restore_(import_restore),
unconditionally_use_set_output_shapes_(
unconditionally_use_set_output_shapes) {}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSignatures();
Status ConvertSignature(const std::string& sig_def_key,
const SignatureDef& signature_def);
struct AssetInfo {
std::string tensor_name;
mlir::tf_saved_model::AssetOp op;
};
absl::StatusOr<std::vector<AssetInfo>> ConvertAssets();
Status ConvertInitializer(const std::string& target_node_name,
const std::vector<AssetInfo>& assets,
llvm::StringRef initializer_type);
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertGraph(
const std::string& name,
const std::vector<std::pair<std::string, TensorInfo>>& inputs,
const std::vector<std::pair<std::string, TensorInfo>>& outputs,
std::vector<std::string> control_outputs,
std::unordered_map<std::string, std::string>& tf_name_to_mlir_name);
Status MoveConvertedFunctionsToModule(
absl::string_view name, mlir::ModuleOp sub_module,
const std::unordered_map<std::string, std::string>& tf_name_to_mlir_name);
absl::StatusOr<GraphImportConfig::InputArrays> ParseInputArrays(
llvm::ArrayRef<std::pair<std::string, TensorInfo>> inputs);
private:
SavedModelMLIRImportInput& input_;
absl::flat_hash_set<std::string> original_func_tf_names_;
std::optional<absl::Span<const std::string>> exported_names_;
mlir::OwningOpRef<mlir::ModuleOp> module_;
absl::Mutex symbol_table_mu_;
mlir::SymbolTable symbol_table_ ABSL_GUARDED_BY(symbol_table_mu_);
bool import_restore_ = true;
bool unconditionally_use_set_output_shapes_ = false;
};
absl::StatusOr<std::vector<SavedModelSignatureDefImporterLite::AssetInfo>>
SavedModelSignatureDefImporterLite::ConvertAssets() {
std::vector<AssetFileDef> asset_file_defs;
TF_RETURN_IF_ERROR(
internal::GetAssetFileDefs(input_.meta_graph_def(), &asset_file_defs));
std::vector<AssetInfo> results;
results.reserve(asset_file_defs.size());
mlir::OpBuilder builder(module_->getBodyRegion());
unsigned i = 0;
for (const auto& asset : asset_file_defs) {
auto asset_op = builder.create<mlir::tf_saved_model::AssetOp>(
module_->getLoc(),
builder.getStringAttr(
absl::StrCat("__tf_saved_model_asset", i++, "_", asset.filename())),
builder.getStringAttr(
io::JoinPath(kSavedModelAssetsDirectory, asset.filename())));
results.push_back({asset.tensor_info().name(), asset_op});
}
return results;
}
Status SavedModelSignatureDefImporterLite::MoveConvertedFunctionsToModule(
absl::string_view name, mlir::ModuleOp sub_module,
const std::unordered_map<std::string, std::string>& tf_name_to_mlir_name) {
mlir::Builder builder(sub_module.getContext());
mlir::SymbolTable sub_module_symbol_table(sub_module);
absl::flat_hash_set<std::string> original_func_mlir_names;
for (const auto& kv : tf_name_to_mlir_name) {
if (original_func_tf_names_.contains(kv.first))
original_func_mlir_names.insert(kv.second);
}
for (auto func : sub_module.getOps<mlir::func::FuncOp>()) {
if (mlir::tf_saved_model::IsExported(func)) continue;
if (original_func_mlir_names.count(func.getSymName().str())) continue;
std::string new_sym_name = absl::StrCat(name, "/", func.getSymName().str());
mlir::StringAttr new_sym_name_attr = builder.getStringAttr(new_sym_name);
if (mlir::failed(sub_module_symbol_table.replaceAllSymbolUses(
func, new_sym_name_attr, sub_module)))
return tensorflow::errors::InvalidArgument(absl::StrCat(
"SavedModelSignatureDefImporterLite: failed to assign a unique "
"name to the private function used in a signature: ",
func.getSymName().str()));
mlir::SymbolTable::setSymbolName(func, new_sym_name);
}
for (auto func : sub_module.getOps<mlir::func::FuncOp>()) {
absl::MutexLock l(&symbol_table_mu_);
symbol_table_.insert(func.clone());
}
return absl::OkStatus();
}
Status SavedModelSignatureDefImporterLite::ConvertInitializer(
const std::string& target_node_name, const std::vector<AssetInfo>& assets,
llvm::StringRef initializer_type) {
std::vector<std::pair<std::string, TensorInfo>> inputs;
inputs.reserve(assets.size());
for (const auto& asset : assets) {
TensorInfo tensor_info;
tensor_info.set_name(asset.tensor_name);
tensor_info.set_dtype(DT_STRING);
tensor_info.mutable_tensor_shape();
inputs.push_back({asset.tensor_name, tensor_info});
}
std::unordered_map<std::string, std::string> tf_name_to_mlir_name;
TF_ASSIGN_OR_RETURN(auto sub_module,
ConvertGraph(target_node_name, inputs, {},
{target_node_name}, tf_name_to_mlir_name));
mlir::SymbolTable sub_symbol_table(*sub_module);
auto init_func_op =
sub_symbol_table.lookup<mlir::func::FuncOp>(target_node_name);
init_func_op->removeAttr("tf.entry_function");
mlir::OpBuilder builder(module_->getBodyRegion());
DCHECK_EQ(init_func_op.getNumArguments(), assets.size());
for (const auto& iter : llvm::enumerate(assets)) {
auto asset_op = iter.value().op;
init_func_op.setArgAttr(
iter.index(), "tf_saved_model.bound_input",
mlir::SymbolRefAttr::get(builder.getContext(), asset_op.getName()));
}
init_func_op->setAttr(
kTfSavedModelExportedNamesAttr,
builder.getStrArrayAttr({absl::StrCat(
"__tf_saved_model_session_initializer_", target_node_name)}));
init_func_op->setAttr(kTfSavedModelInitializerTypeAttr,
builder.getStringAttr(initializer_type));
return MoveConvertedFunctionsToModule(target_node_name, *sub_module,
tf_name_to_mlir_name);
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>>
SavedModelSignatureDefImporterLite::ConvertGraph(
const std::string& name,
const std::vector<std::pair<std::string, TensorInfo>>& inputs,
const std::vector<std::pair<std::string, TensorInfo>>& outputs,
const std::vector<std::string> control_outputs,
std::unordered_map<std::string, std::string>& tf_name_to_mlir_name) {
VLOG(1) << "Importing Signature: " << name;
GraphImportConfig specs;
specs.graph_func_name = name;
specs.prune_unused_nodes = true;
TF_ASSIGN_OR_RETURN(specs.inputs, ParseInputArrays(inputs));
for (auto& output : outputs) {
TF_ASSIGN_OR_RETURN(std::string name,
GetDenseTensorNameFromTensorInfo(output.second));
specs.outputs.push_back(std::move(name));
}
specs.control_outputs = control_outputs;
specs.enable_shape_inference = false;
specs.unconditionally_use_set_output_shapes =
unconditionally_use_set_output_shapes_;
TF_ASSIGN_OR_RETURN(const auto* subgraph, input_.GetSubGraph(name, specs));
return GraphDefImporter::Convert(module_->getContext(), *subgraph,
input_.debug_info(), subgraph->flib_def(),
specs, tf_name_to_mlir_name,
true);
}
Status SavedModelSignatureDefImporterLite::ConvertSignature(
const std::string& sig_def_key, const SignatureDef& signature_def) {
std::vector<std::pair<std::string, TensorInfo>> inputs(
signature_def.inputs().begin(), signature_def.inputs().end());
llvm::sort(inputs, [](const auto& lhs, const auto& rhs) {
return tensorflow::Fingerprint64(lhs.first) <
tensorflow::Fingerprint64(rhs.first);
});
std::vector<std::pair<std::string, TensorInfo>> outputs(
signature_def.outputs().begin(), signature_def.outputs().end());
llvm::sort(outputs, [](const auto& lhs, const auto& rhs) {
return tensorflow::Fingerprint64(lhs.first) <
tensorflow::Fingerprint64(rhs.first);
});
std::unordered_map<std::string, std::string> tf_name_to_mlir_name;
TF_ASSIGN_OR_RETURN(
auto sub_module,
ConvertGraph(sig_def_key, inputs, outputs, {}, tf_name_to_mlir_name));
mlir::OpBuilder builder(sub_module->getBodyRegion());
mlir::SymbolTable sub_symbol_table(*sub_module);
auto func_op = sub_symbol_table.lookup<mlir::func::FuncOp>(sig_def_key);
TF_RET_CHECK(func_op)
<< "Graphdef importer should have created a function named "
<< sig_def_key << ".";
func_op->setAttr(kTfSavedModelExportedNamesAttr,
builder.getStrArrayAttr({sig_def_key}));
for (const auto& input_and_idx : llvm::enumerate(inputs)) {
func_op.setArgAttr(input_and_idx.index(), kTfSavedModelIndexPathAttr,
builder.getStrArrayAttr({input_and_idx.value().first}));
}
for (const auto& output_and_idx : llvm::enumerate(outputs)) {
func_op.setResultAttr(
output_and_idx.index(), kTfSavedModelIndexPathAttr,
builder.getStrArrayAttr({output_and_idx.value().first}));
}
for (const auto& [tf_name, mlir_name] : tf_name_to_mlir_name) {
auto func_op = sub_symbol_table.lookup<mlir::func::FuncOp>(mlir_name);
TF_RET_CHECK(func_op)
<< "Graphdef importer should have created a function named "
<< mlir_name << ".";
func_op->setAttr("tf._original_func_name", builder.getStringAttr(tf_name));
}
return MoveConvertedFunctionsToModule(sig_def_key, *sub_module,
tf_name_to_mlir_name);
}
absl::StatusOr<GraphImportConfig::InputArrays>
SavedModelSignatureDefImporterLite::ParseInputArrays(
llvm::ArrayRef<std::pair<std::string, TensorInfo>> inputs) {
GraphImportConfig::InputArrays results;
for (const auto& iter : inputs) {
const auto& tensor_info = iter.second;
TF_ASSIGN_OR_RETURN(std::string name,
GetDenseTensorNameFromTensorInfo(tensor_info));
VLOG(1) << "Importing Signature Input: input_name = " << iter.first
<< ", tensor_info = " << tensor_info.DebugString();
ArrayInfo array_info;
array_info.imported_dtype = tensor_info.dtype();
if (tensor_info.has_tensor_shape()) {
array_info.shape = tensor_info.tensor_shape();
} else {
array_info.shape.set_unknown_rank(true);
}
results.insert(std::pair<std::string, ArrayInfo>(std::move(name),
std::move(array_info)));
}
return results;
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>>
SavedModelSignatureDefImporterLite::ConvertSignatures() {
LoadImporterDialects(*module_->getContext());
const auto& signatures = input_.meta_graph_def().signature_def();
PopulateTfVersions(module_.get(),
input_.meta_graph_def().graph_def().versions());
llvm::DenseSet<llvm::StringRef> exported_name_set;
bool import_all_signatures = !exported_names_.has_value();
if (exported_names_.has_value()) {
exported_name_set.insert(exported_names_->begin(), exported_names_->end());
}
absl::Mutex error_status_mu;
tensorflow::Status error_status;
{
thread::ThreadPool thread_pool(Env::Default(), "ConvertSignatures",
kNumThreadToConvertSignatures);
for (const auto& key_and_signature_def : signatures) {
const std::string& sig_def_key = key_and_signature_def.first;
const SignatureDef& signature_def = key_and_signature_def.second;
if (sig_def_key == "__saved_model_init_op") {
continue;
}
if (!import_all_signatures && exported_name_set.count(sig_def_key) == 0) {
continue;
}
thread_pool.Schedule([&]() {
auto status = ConvertSignature(sig_def_key, signature_def);
if (!status.ok()) {
absl::MutexLock l(&error_status_mu);
error_status = std::move(status);
}
});
}
}
TF_RETURN_IF_ERROR(error_status);
TF_ASSIGN_OR_RETURN(auto assets, ConvertAssets());
mlir::OpBuilder builder(module_->getBodyRegion());
llvm::SmallVector<mlir::Attribute, 2> init_sym_refs;
if (import_restore_ && input_.meta_graph_def().has_saver_def()) {
std::vector<AssetInfo> variable_and_assets;
auto variable_filename_op = builder.create<mlir::tf_saved_model::AssetOp>(
module_->getLoc(),
builder.getStringAttr("__tf_saved_model_variables"),
builder.getStringAttr(io::JoinPath(kSavedModelVariablesDirectory,
kSavedModelVariablesFilename)));
variable_and_assets.push_back(
{input_.meta_graph_def().saver_def().filename_tensor_name(),
variable_filename_op});
variable_and_assets.insert(variable_and_assets.end(), assets.begin(),
assets.end());
const auto& restore_op_name =
input_.meta_graph_def().saver_def().restore_op_name();
TF_RETURN_IF_ERROR(ConvertInitializer(restore_op_name, variable_and_assets,
kTfSavedModelInitializerRestoreType));
init_sym_refs.push_back(
mlir::SymbolRefAttr::get(builder.getContext(), restore_op_name));
}
std::string init_op_name;
TF_RETURN_IF_ERROR(
internal::GetInitOp("", input_.meta_graph_def(), &init_op_name));
if (!init_op_name.empty()) {
TF_RETURN_IF_ERROR(ConvertInitializer(init_op_name, assets,
kTfSavedModelInitializerInitType));
init_sym_refs.push_back(
mlir::SymbolRefAttr::get(builder.getContext(), init_op_name));
}
builder.create<mlir::tf_saved_model::SessionInitializerOp>(
module_->getLoc(), builder.getArrayAttr(init_sym_refs));
(*module_)->setAttr("tf_saved_model.semantics", builder.getUnitAttr());
SortSavedModelModule(*module_);
MarkSavedModelFunctionVisibility(*module_);
return std::move(module_);
}
class SavedModelSignatureDefImporter {
public:
static absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> Convert(
const SavedModelBundle& bundle,
std::optional<absl::Span<const std::string>> exported_names,
mlir::MLIRContext* context, tensorflow::MLIRImportOptions options) {
GraphDebugInfo debug_info;
if (bundle.debug_info != nullptr) debug_info = *bundle.debug_info;
TF_ASSIGN_OR_RETURN(auto input,
SimpleSavedModelMLIRImportInput::Create(
options, &bundle.meta_graph_def, debug_info));
TF_ASSIGN_OR_RETURN(auto module,
SavedModelSignatureDefImporterLite::Convert(
input, exported_names, context,
false));
mlir::OpBuilder builder(module->getContext());
(*module)->setAttr("tf_saved_model.under_construction",
builder.getUnitAttr());
TF_RETURN_IF_ERROR(
LiftVariables(bundle, *module, options.lift_variables,
options.include_variables_in_initializers));
(*module)->removeAttr("tf_saved_model.under_construction");
return module;
}
private:
static Status LiftVariables(const SavedModelBundle& bundle,
mlir::ModuleOp module,
bool lift_varhandle_ops_to_args,
bool include_variables_in_initializers);
};
Status SavedModelSignatureDefImporter::LiftVariables(
const SavedModelBundle& bundle, mlir::ModuleOp module,
const bool lift_varhandle_ops_to_args,
const bool include_variables_in_initializers) {
mlir::StatusScopedDiagnosticHandler diag_handler(module.getContext());
mlir::PassManager pm(module.getContext());
SetCrashReproducer(pm);
pm.addNestedPass<mlir::func::FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass());
pm.addNestedPass<mlir::func::FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
if (!include_variables_in_initializers) {
pm.addPass(
mlir::tf_saved_model::CreateRemoveVariablesInSessionInitializerPass());
}
pm.addNestedPass<mlir::func::FuncOp>(
mlir::TF::
CreateConvertReadonlyReferenceVariablesToResourceVariablesPass());
if (mlir::failed(pm.run(module)))
return diag_handler.Combine(
errors::Internal("Failed to prepare to lift variables."));
if (lift_varhandle_ops_to_args) {
if (failed(mlir::tf_saved_model::MarkInitializedVariablesInFunction(
module, bundle.GetSession())))
return diag_handler.Combine(
errors::Internal("Failed to prepare to mark initialized variables."));
pm.clear();
pm.addPass(mlir::TF::CreatePromoteVarHandlesToArgsPass());
if (mlir::failed(pm.run(module)))
return diag_handler.Combine(
errors::Internal("Failed to promote var handles to args."));
if (failed(
mlir::tf_saved_model::LiftVariables(module, bundle.GetSession())))
return diag_handler.Combine(
errors::Internal("Failed to lift variables."));
} else {
if (failed(mlir::tf_saved_model::InitializeVariablesInSessionInitializer(
module, bundle.GetSession())))
return diag_handler.Combine(
errors::Internal("Failed to initialize variables in session init."));
}
pm.clear();
pm.addNestedPass<mlir::func::FuncOp>(
mlir::tf_saved_model::CreateDedupBoundInputBindingPass());
if (mlir::failed(pm.run(module)))
return diag_handler.Combine(
errors::Internal("Failed to dedup bound inputs."));
return absl::OkStatus();
}
}
SavedModelMLIRImportInput::~SavedModelMLIRImportInput() = default;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertGraphdefToMlir(
const GraphDef& graphdef, const GraphDebugInfo& debug_info,
const GraphImportConfig& specs, mlir::MLIRContext* context) {
GraphConstructorOptions options;
options.allow_internal_ops = true;
Graph graph(OpRegistry::Global());
GraphDef preprocessed_graphdef(graphdef);
TF_RETURN_IF_ERROR(PreprocessGraphDef(&specs, &preprocessed_graphdef));
if (specs.upgrade_legacy) {
TF_RETURN_IF_ERROR(GenerateResourceSharedNameIfEmpty(
preprocessed_graphdef, graph.flib_def().default_registry()));
}
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
options, std::move(preprocessed_graphdef), &graph));
return ConvertGraphToMlir(graph, debug_info, graph.flib_def(), specs,
context);
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertGraphToMlir(
const Graph& graph, const GraphDebugInfo& debug_info,
const FunctionLibraryDefinition& flib_def, const GraphImportConfig& specs,
mlir::MLIRContext* context) {
if (specs.upgrade_legacy) {
TF_RETURN_IF_ERROR(
UpgradeLegacyGraph(const_cast<Graph*>(&graph),
const_cast<FunctionLibraryDefinition*>(&flib_def),
specs.restrict_functionalization_to_compiled_nodes));
}
std::unordered_map<std::string, std::string> tf_name_to_mlir_name;
TF_ASSIGN_OR_RETURN(auto module, GraphDefImporter::Convert(
context, graph, debug_info, flib_def,
specs, tf_name_to_mlir_name));
if (specs.set_original_tf_func_name) {
mlir::Builder builder(module->getContext());
mlir::SymbolTable symbol_table(*module);
for (const auto& [tf_name, mlir_name] : tf_name_to_mlir_name) {
auto func_op = symbol_table.lookup<mlir::func::FuncOp>(mlir_name);
TF_RET_CHECK(func_op)
<< "Graphdef importer should have created a function named "
<< mlir_name << ".";
func_op->setAttr("tf._original_func_name",
builder.getStringAttr(tf_name));
}
}
return module;
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertFunctionToMlir(
const FunctionBody* fbody, const FunctionLibraryDefinition& flib_def,
mlir::MLIRContext* context) {
tensorflow::GraphDebugInfo dummy_debug_info;
tensorflow::GraphImportConfig specs;
specs.graph_func_name = fbody->record->fdef().signature().name();
specs.enable_shape_inference = false;
specs.graph_as_function = true;
for (const auto* control_ret_node : fbody->control_ret_nodes)
specs.control_outputs.push_back(control_ret_node->name());
return ConvertGraphToMlir(*fbody->graph, dummy_debug_info, flib_def, specs,
context);
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelToMlir(
SavedModelV2Bundle* saved_model, mlir::MLIRContext* context,
absl::Span<std::string> exported_names, MLIRImportOptions options) {
return ConvertSavedModelObjectGraph(saved_model, exported_names, context,
options);
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelV1ToMlir(
const SavedModelBundle& saved_model, absl::Span<std::string> exported_names,
mlir::MLIRContext* context, MLIRImportOptions options) {
std::optional<absl::Span<const std::string>> optional_exported_names;
if (!exported_names.empty()) optional_exported_names = exported_names;
return SavedModelSignatureDefImporter::Convert(
saved_model, optional_exported_names, context, options);
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelV1ToMlirLite(
const MetaGraphDef& meta_graph_def, const GraphDebugInfo& debug_info,
std::optional<absl::Span<const std::string>> exported_names,
mlir::MLIRContext* context, MLIRImportOptions options) {
TF_ASSIGN_OR_RETURN(auto input, SimpleSavedModelMLIRImportInput::Create(
options, &meta_graph_def, debug_info));
return ConvertSavedModelV1ToMlirLite(
input, exported_names, context,
options.unconditionally_use_set_output_shapes);
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> ConvertSavedModelV1ToMlirLite(
SavedModelMLIRImportInput& input,
std::optional<absl::Span<const std::string>> exported_names,
mlir::MLIRContext* context, bool unconditionally_use_set_output_shapes) {
return SavedModelSignatureDefImporterLite::Convert(
input, exported_names, context,
true, unconditionally_use_set_output_shapes);
}
std::string MlirModuleToString(mlir::ModuleOp module,
mlir::OpPrintingFlags flags) {
std::string txt_module;
{
llvm::raw_string_ostream os{txt_module};
module.print(os, flags);
}
return txt_module;
}
std::string MlirModuleToString(mlir::ModuleOp module, bool show_debug_info) {
mlir::OpPrintingFlags flags;
if (show_debug_info) flags.enableDebugInfo();
return MlirModuleToString(module, flags);
}
} | #include "tensorflow/compiler/mlir/tfrt/translate/import_model.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/mlir/tfrt/translate/tfrt_compile_options.h"
namespace tensorflow {
namespace {
TEST(GetTfrtPipelineOptions, BatchPaddingPolicy) {
tensorflow::TfrtCompileOptions options;
options.batch_padding_policy = "PAD_TEST_OPTION";
auto pipeline_options = GetTfrtPipelineOptions(options);
EXPECT_EQ(pipeline_options->batch_padding_policy, "PAD_TEST_OPTION");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/import_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/import_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6d6e20f-9b2b-4830-9059-b6284133322f | cpp | tensorflow/tensorflow | mlir_to_bytecode | tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.cc | tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode_test.cc | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstdint>
#include <cstring>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Region.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/function.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/kernel.h"
namespace mlrt {
namespace {
bool CanBeInlined(mlir::Attribute attr, absl::string_view data) {
return mlir::isa<mlir::IntegerAttr, mlir::FloatAttr, mlir::FlatSymbolRefAttr>(
attr) &&
data.size() <= sizeof(uint32_t);
}
template <typename T>
std::string EncodeIntegerOrFloat(T attr) {
std::string data(sizeof(attr), '\0');
std::memcpy(data.data(), &attr, sizeof(attr));
return data;
}
template <typename T>
std::optional<std::string> EncodeListOfInteger(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
mlir::Type type;
for (int i = 0; i < array.size(); ++i) {
if (auto integer_attr = mlir::dyn_cast<mlir::IntegerAttr>(array[i])) {
if (type && integer_attr.getType() != type) return std::nullopt;
type = integer_attr.getType();
llvm::APInt value = integer_attr.getValue();
if (value.getBitWidth() != sizeof(T) * 8) return std::nullopt;
ctor.ConstructAt(i, value.getZExtValue());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfSymbolRef(
const ModuleEmitterContext& module_context, mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint32_t>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto symbol_ref = mlir::dyn_cast<mlir::FlatSymbolRefAttr>(array[i])) {
ctor.ConstructAt(i, module_context.GetFunctionId(symbol_ref.getValue()));
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
template <typename T>
std::optional<std::string> EncodeDenseArray(llvm::ArrayRef<T> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<T>>(&allocator, array.size());
if (!array.empty()) {
ctor.Place(reinterpret_cast<const char*>(array.data()),
array.size() * sizeof(T));
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeDenseBoolArray(llvm::ArrayRef<bool> array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<uint8_t>>(&allocator, array.size());
if (!array.empty()) {
std::vector<uint8_t> data(array.size());
int i = 0;
for (auto v : array) {
data[i++] = static_cast<uint8_t>(v);
}
ctor.Place(reinterpret_cast<const char*>(data.data()), data.size());
}
return std::string(buffer.data(), buffer.size());
}
std::optional<std::string> EncodeListOfString(mlir::ArrayAttr array) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto ctor = bc::New<bc::Vector<bc::String>>(&allocator, array.size());
for (int i = 0; i < array.size(); ++i) {
if (auto string_attr = mlir::dyn_cast<mlir::StringAttr>(array[i])) {
ctor.ConstructAt(i, string_attr.getValue().str());
} else {
return std::nullopt;
}
}
return std::string(buffer.data(), buffer.size());
}
struct FunctionEmitterContext {
explicit FunctionEmitterContext(const ModuleEmitterContext* module_context)
: module_context(*module_context) {}
const ModuleEmitterContext& module_context;
struct RegInfo {
int num_uses = 0;
int id = -1;
};
int next_reg_id = 0;
llvm::DenseMap<mlir::Value, RegInfo> register_table;
std::vector<int> free_regs;
int AssignRegId() {
if (free_regs.empty()) {
return next_reg_id++;
}
int id = free_regs.back();
free_regs.pop_back();
return id;
}
void FreeRegId(int id) { free_regs.push_back(id); }
};
void EmitKernel(FunctionEmitterContext& function_context,
bc::Kernel::Constructor& constructor, mlir::Operation& op,
std::vector<uint32_t>& function_output_regs,
std::vector<uint8_t>& function_output_last_uses) {
std::vector<uint32_t> results;
results.reserve(op.getNumResults());
for (auto result : op.getResults()) {
auto iter = function_context.register_table.find(result);
CHECK(iter != function_context.register_table.end());
CHECK_EQ(iter->second.id, -1);
iter->second.id = function_context.AssignRegId();
results.push_back(iter->second.id);
}
constructor.construct_results(results.size())
.Assign(results.begin(), results.end());
std::vector<uint32_t> arguments;
std::vector<uint8_t> last_uses;
arguments.reserve(op.getNumOperands());
last_uses.reserve(op.getNumOperands());
for (auto operand : op.getOperands()) {
auto iter = function_context.register_table.find(operand);
CHECK(iter != function_context.register_table.end());
int id = iter->second.id;
CHECK_NE(id, -1);
last_uses.push_back(0);
if (--iter->second.num_uses == 0) {
function_context.FreeRegId(id);
last_uses.back() = 1;
}
arguments.push_back(id);
}
constructor.construct_arguments(arguments.size())
.Assign(arguments.begin(), arguments.end());
constructor.construct_last_uses(last_uses.size())
.Assign(last_uses.begin(), last_uses.end());
std::vector<uint32_t> attributes;
attributes.reserve(op.getAttrs().size());
for (auto attr : op.getAttrs()) {
int attr_id =
function_context.module_context.GetAttributeId(attr.getValue());
absl::string_view attr_data =
function_context.module_context.attributes().at(attr_id);
if (CanBeInlined(attr.getValue(), attr_data)) {
uint32_t data = 0;
std::memcpy(&data, attr_data.data(), attr_data.size());
attributes.push_back(data);
} else {
attributes.push_back(attr_id);
}
}
constructor.construct_attributes(attributes.size())
.Assign(attributes.begin(), attributes.end());
if (llvm::isa<mlir::func::ReturnOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("return"));
function_output_regs = std::move(arguments);
function_output_last_uses = std::move(last_uses);
} else if (llvm::isa<mlir::func::CallOp>(&op)) {
constructor.set_code(function_context.module_context.GetKernelId("call"));
} else {
llvm::StringRef op_name = op.getName().getStringRef();
constructor.set_code(function_context.module_context.GetKernelId(op_name));
}
}
void EmitFunction(const ModuleEmitterContext& module_context,
bc::Function::Constructor& constructor, llvm::StringRef name,
mlir::Region& region) {
FunctionEmitterContext function_context(&module_context);
constructor.construct_name(name.str());
DCHECK(llvm::hasSingleElement(region)) << "should have a single block";
auto& block = region.front();
auto& register_table = function_context.register_table;
std::vector<uint32_t> input_regs;
input_regs.reserve(block.getNumArguments());
for (auto arg : block.getArguments()) {
int id = function_context.AssignRegId();
input_regs.push_back(id);
register_table[arg] = {static_cast<int>(std::distance(arg.getUses().begin(),
arg.getUses().end())),
id};
}
constructor.construct_input_regs(input_regs);
for (auto& op : block) {
for (auto result : op.getResults()) {
register_table[result] = {static_cast<int>(
std::distance(result.getUses().begin(), result.getUses().end()))};
}
}
auto kernels_constructor =
constructor.construct_kernels(block.getOperations().size());
std::vector<uint32_t> output_regs;
std::vector<uint8_t> output_last_uses;
for (const auto& iter : llvm::enumerate(block.getOperations())) {
int i = iter.index();
mlir::Operation& op = iter.value();
auto kernel_ctor = kernels_constructor.ConstructAt(i);
EmitKernel(function_context, kernel_ctor, op, output_regs,
output_last_uses);
}
constructor.set_num_regs(function_context.next_reg_id);
constructor.construct_output_regs(output_regs);
constructor.construct_output_last_uses(output_last_uses);
}
absl::Status EmitExecutable(ModuleEmitterContext& module_context,
bc::Executable::Constructor& constructor,
mlir::ModuleOp module) {
module.walk(
[&](mlir::func::FuncOp func) { module_context.AddFunction(func); });
auto functions = module_context.functions();
for (auto func : functions) {
if (!llvm::hasSingleElement(func.getRegion())) {
return absl::InvalidArgumentError("function should have a single block.");
}
auto& block = func.getRegion().front();
for (auto& op : block) {
if (llvm::isa<mlir::func::CallOp>(&op)) {
module_context.AddKernelName("call");
} else if (llvm::isa<mlir::func::ReturnOp>(&op)) {
if (op.getNumResults() != 0) {
return absl::InvalidArgumentError(
"Block terminator must be a return op.");
}
module_context.AddKernelName("return");
} else {
module_context.AddKernelName(op.getName().getStringRef().str());
}
for (auto attr : op.getAttrs()) {
if (auto status = module_context.AddAttribute(&op, attr.getValue());
!status.ok()) {
return status;
}
}
}
}
constructor.construct_kernel_names(module_context.kernels().size())
.Assign(module_context.kernels().begin(), module_context.kernels().end());
auto functions_constructor =
constructor.construct_functions(functions.size());
for (int i = 0; i < functions.size(); ++i) {
auto func = functions[i];
auto function_ctor = functions_constructor.ConstructAt(i);
EmitFunction(module_context, function_ctor, func.getSymName(),
func.getRegion());
}
constructor.construct_attributes(module_context.attributes().size())
.Assign(module_context.attributes().begin(),
module_context.attributes().end());
return absl::OkStatus();
}
}
absl::Status ModuleEmitterContext::AddAttribute(mlir::Operation* op,
mlir::Attribute attr) {
absl::StatusOr<std::string> attr_data;
if (auto* encoder = attribute_encoder_registry_.Get(
op->getName().getDialectNamespace())) {
attr_data = (*encoder)(*this, attr);
} else {
attr_data = DefaultEncodeAttribute(attr);
}
if (!attr_data.ok()) return std::move(attr_data).status();
int id = AddData(std::move(*attr_data), attributes_, attribute_data_id_map_);
attribute_id_map_[attr] = id;
return absl::OkStatus();
}
int ModuleEmitterContext::AddFunction(mlir::func::FuncOp func) {
int id = functions_.size();
functions_.push_back(func);
DCHECK(!function_name_id_map_.contains(func.getSymName()));
function_name_id_map_[func.getSymName()] = id;
return id;
}
std::optional<std::string> EncodeSimpleAttribute(
const ModuleEmitterContext& module_context, mlir::Attribute attr) {
return llvm::TypeSwitch<mlir::Attribute, std::optional<std::string>>(attr)
.Case<mlir::StringAttr>(
[](const auto& str_attr) { return str_attr.str(); })
.Case<mlir::IntegerAttr>(
[](const auto& integer_attr) -> std::optional<std::string> {
switch (llvm::APInt value = integer_attr.getValue();
value.getBitWidth()) {
case 1:
return EncodeIntegerOrFloat<uint8_t>(value.getZExtValue());
case 32:
return EncodeIntegerOrFloat<uint32_t>(value.getZExtValue());
case 64:
return EncodeIntegerOrFloat<uint64_t>(value.getZExtValue());
default:
return std::nullopt;
}
})
.Case<mlir::FloatAttr>(
[](const auto& float_attr) -> std::optional<std::string> {
llvm::APFloat value = float_attr.getValue();
if (float_attr.getType().isF32()) {
return EncodeIntegerOrFloat<float>(value.convertToFloat());
}
return std::nullopt;
})
.Case<mlir::ArrayAttr>([&](const auto& array_attr)
-> std::optional<std::string> {
if (auto encoded_list_i32 = EncodeListOfInteger<uint32_t>(array_attr)) {
return std::move(*encoded_list_i32);
} else if (auto encoded_list_i64 =
EncodeListOfInteger<uint64_t>(array_attr)) {
return std::move(*encoded_list_i64);
} else if (auto encoded_list_string = EncodeListOfString(array_attr)) {
return std::move(*encoded_list_string);
} else if (auto encoded_list_symbol_ref =
EncodeListOfSymbolRef(module_context, array_attr)) {
return std::move(*encoded_list_symbol_ref);
} else {
return std::nullopt;
}
})
.Case<mlir::DenseI32ArrayAttr>(
[](const auto& dense_array_i32) -> std::optional<std::string> {
return EncodeDenseArray<int32_t>(dense_array_i32);
})
.Case<mlir::DenseI64ArrayAttr>(
[](const auto& dense_array_i64) -> std::optional<std::string> {
return EncodeDenseArray<int64_t>(dense_array_i64);
})
.Case<mlir::DenseBoolArrayAttr>(
[](const auto& dense_array_bool) -> std::optional<std::string> {
return EncodeDenseBoolArray(dense_array_bool.asArrayRef());
})
.Case<mlir::FlatSymbolRefAttr>([&](const auto& symbol_ref) {
return EncodeIntegerOrFloat<uint32_t>(
module_context.GetFunctionId(symbol_ref.getValue()));
})
.Default([](const auto& attr) { return std::nullopt; });
}
absl::StatusOr<std::string> ModuleEmitterContext::DefaultEncodeAttribute(
mlir::Attribute attr) {
if (auto result = EncodeSimpleAttribute(*this, attr)) {
return std::move(*result);
}
std ::string attr_str;
llvm::raw_string_ostream os(attr_str);
attr.print(os);
return absl::InvalidArgumentError(
absl::StrCat("Try to encode unsupported attribute: ", attr_str));
}
absl::StatusOr<bc::Buffer> EmitExecutable(
const AttributeEncoderRegistry& attribute_encoder_registry,
mlir::ModuleOp module) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
ModuleEmitterContext module_context(&attribute_encoder_registry);
auto executable_ctor = bc::New<bc::Executable>(&allocator);
if (auto status = EmitExecutable(module_context, executable_ctor, module);
!status.ok()) {
return status;
}
buffer.shrink_to_fit();
return buffer;
}
} | #include "tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.h"
#include <cstring>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
#include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include "tensorflow/core/tfrt/mlrt/interpreter/attribute_span.h"
#include "tsl/platform/resource_loader.h"
#include "tsl/platform/status_matchers.h"
namespace mlrt {
namespace {
using ::testing::ElementsAreArray;
using ::testing::FloatEq;
using ::testing::IsEmpty;
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
TEST(MlirToByteCodeTest, Basic) {
constexpr char kBasicMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/basic.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto kernel_names = executable.kernel_names();
EXPECT_THAT(kernel_names,
ElementsAreArray({"test_mlbc.add.i32", "test_mlbc.sub.i32",
"call", "return"}));
auto functions = executable.functions();
ASSERT_GE(functions.size(), 1);
auto function = functions[0];
EXPECT_EQ(function.name().str(), "add_i32_10");
EXPECT_EQ(function.num_regs(), 5);
EXPECT_THAT(function.input_regs(), ElementsAreArray({0}));
EXPECT_THAT(function.output_regs(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(function.output_last_uses(),
ElementsAreArray({true, false, true}));
auto kernels = function.kernels();
ASSERT_EQ(kernels.size(), 11);
EXPECT_EQ(kernels[0].code(), 0);
EXPECT_THAT(kernels[0].arguments(), ElementsAreArray({0, 0}));
EXPECT_THAT(kernels[0].results(), ElementsAreArray({1}));
EXPECT_THAT(kernels[0].last_uses(), ElementsAreArray({0, 0}));
for (int i = 1; i < 9; i++) {
EXPECT_EQ(kernels[i].code(), i % 2);
EXPECT_THAT(kernels[i].arguments(), ElementsAreArray({(i - 1) % 2 + 1, 0}));
EXPECT_THAT(kernels[i].results(), ElementsAreArray({i % 2 + 1}));
EXPECT_THAT(kernels[i].last_uses(), ElementsAreArray({1, 0}));
}
EXPECT_EQ(kernels[9].code(), 2);
EXPECT_THAT(kernels[9].arguments(), ElementsAreArray({1}));
EXPECT_THAT(kernels[9].last_uses(), ElementsAreArray({true}));
EXPECT_THAT(kernels[9].results(), ElementsAreArray({2, 3, 4}));
EXPECT_EQ(kernels[10].code(), 3);
EXPECT_THAT(kernels[10].arguments(), ElementsAreArray({0, 2, 2}));
EXPECT_THAT(kernels[10].last_uses(), ElementsAreArray({true, false, true}));
EXPECT_TRUE(kernels[10].results().empty());
}
template <typename T>
absl::StatusOr<T> DecodeAttribute(absl::string_view data) {
if (data.size() < sizeof(T))
return absl::InvalidArgumentError("Invalid data size for attribute.");
T value;
std::memcpy(&value, data.data(), sizeof(T));
return value;
}
TEST(MlirToByteCodeTest, BasicAttributes) {
constexpr char kBasicAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"basic_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kBasicAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 15);
auto attr_iter = attributes.begin();
EXPECT_EQ(*attr_iter, "test string");
++attr_iter;
EXPECT_EQ(*attr_iter, "ts");
++attr_iter;
EXPECT_THAT(DecodeAttribute<int32_t>(*attr_iter), IsOkAndHolds(100));
++attr_iter;
EXPECT_THAT(DecodeAttribute<int64_t>(*attr_iter), IsOkAndHolds(200));
++attr_iter;
EXPECT_THAT(DecodeAttribute<float>(*attr_iter), IsOkAndHolds(FloatEq(3.0)));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint8_t>(*attr_iter), IsOkAndHolds(0));
++attr_iter;
bc::Vector<int64_t> list_of_i64((*attr_iter).data());
EXPECT_THAT(list_of_i64, ElementsAreArray({0, 1, 2, 3, 4}));
++attr_iter;
bc::Vector<int32_t> list_of_i32((*attr_iter).data());
EXPECT_THAT(list_of_i32, ElementsAreArray({0, 1, 2, 3}));
++attr_iter;
bc::Vector<bc::String> list_of_str((*attr_iter).data());
EXPECT_THAT(list_of_str, ElementsAreArray({"string 0", "string 1"}));
++attr_iter;
EXPECT_THAT(DecodeAttribute<uint32_t>(*attr_iter), IsOkAndHolds(1));
EXPECT_EQ(executable.functions()[1].name().Get(), "callee");
++attr_iter;
bc::Vector<int32_t> list_of_symbol_ref((*attr_iter).data());
EXPECT_EQ(executable.functions()[2].name().Get(), "callee0");
EXPECT_EQ(executable.functions()[3].name().Get(), "callee1");
EXPECT_THAT(list_of_symbol_ref, ElementsAreArray({2, 3}));
++attr_iter;
bc::Vector<int32_t> dense_array_of_i32((*attr_iter).data());
EXPECT_THAT(dense_array_of_i32, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int64_t> dense_array_of_i64((*attr_iter).data());
EXPECT_THAT(dense_array_of_i64, ElementsAreArray({0, 1, 2}));
++attr_iter;
bc::Vector<int32_t> empty_dense_array((*attr_iter).data());
EXPECT_TRUE(empty_dense_array.empty());
++attr_iter;
bc::Vector<uint8_t> dense_array_of_bool((*attr_iter).data());
EXPECT_THAT(dense_array_of_bool, ElementsAreArray({true, false}));
auto kernels = executable.functions()[0].kernels();
ASSERT_EQ(kernels.size(), 16);
auto kernel_iter = kernels.begin();
auto attribute_span = [&](auto kernel_iter) {
return mlrt::AttributeSpan((*kernel_iter).attributes(), attributes);
};
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(),
"test string");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<bc::String>(0).Get(), "ts");
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int32_t>(0), 100);
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<int64_t>(0), 200);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<float>(0), FloatEq(3.0));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint8_t>(0), false);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2, 3, 4}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bc::String>>(0),
ElementsAreArray({"string 0", "string 1"}));
++kernel_iter;
EXPECT_EQ(attribute_span(kernel_iter).GetAs<uint32_t>(0), 1);
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({2, 3}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int64_t>>(0),
ElementsAreArray({0, 1, 2}));
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<int32_t>>(0),
IsEmpty());
++kernel_iter;
EXPECT_THAT(attribute_span(kernel_iter).GetAs<bc::Vector<bool>>(0),
ElementsAreArray({true, false}));
}
TEST(MlirToByteCodeTest, UnsupportedAttributes) {
constexpr char kUnsupportedAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"unsupported_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kUnsupportedAttributesMlir),
&mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
EXPECT_THAT(EmitExecutable(attribute_encoder_registry, mlir_module.get()),
StatusIs(absl::StatusCode::kInvalidArgument,
"Try to encode unsupported attribute: unit"));
}
class CustomDense {
public:
struct StorageType {
using Self = StorageType;
DEFINE_BYTECODE_FIELD(bc::Vector<int64_t>, shape);
DEFINE_BYTECODE_FIELD(bc::Vector<uint32_t>, data);
};
class Constructor {
public:
Constructor(bc::Allocator* allocator, bc::BcAddr_t address)
: allocator_(allocator), address_(address) {}
template <typename... Args>
auto construct_shape(Args&&... args) {
return StorageType::construct_shape(allocator_, address_,
std::forward<Args>(args)...);
}
template <typename... Args>
auto construct_data(Args&&... args) {
return StorageType::construct_data(allocator_, address_,
std::forward<Args>(args)...);
}
bc::BcAddr_t address() const { return address_; }
private:
bc::Allocator* allocator_;
bc::BcAddr_t address_;
};
using NonTrivialConstructorType = Constructor;
explicit CustomDense(const char* p) : p_(p) {}
bc::Vector<int64_t> shape() const { return StorageType::read_shape(p_); }
bc::Vector<uint32_t> data() const { return StorageType::read_data(p_); }
private:
const char* p_ = nullptr;
};
absl::StatusOr<std::string> EncodeCustomDense(const ModuleEmitterContext&,
mlir::Attribute attr) {
auto dense_int_attr = mlir::dyn_cast<mlir::DenseIntElementsAttr>(attr);
if (!dense_int_attr)
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an integer.");
if (mlir::cast<mlir::IntegerType>(dense_int_attr.getElementType())
.getWidth() != 32) {
return absl::InvalidArgumentError(
"The element of the custom dense attribute must be an i32 integer.");
}
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
auto custom_dense_ctor = bc::New<CustomDense>(&allocator);
auto shaped_type = dense_int_attr.getType();
std::vector<int64_t> shape(shaped_type.getShape().begin(),
shaped_type.getShape().end());
custom_dense_ctor.construct_shape(shape);
custom_dense_ctor.construct_data(shaped_type.getNumElements())
.Place(dense_int_attr.getRawData().data(),
dense_int_attr.getRawData().size());
return std::string(buffer.data(), buffer.size());
}
TEST(MlirToByteCodeTest, CustomDense) {
constexpr char kCustomAttributesMlir[] =
"tensorflow/compiler/mlir/tfrt/translate/mlrt/testdata/"
"custom_attributes.mlir";
mlir::DialectRegistry registry;
registry.insert<mlir::func::FuncDialect>();
mlir::MLIRContext mlir_context(registry);
mlir_context.allowUnregisteredDialects();
auto mlir_module = mlir::parseSourceFile<mlir::ModuleOp>(
tsl::GetDataDependencyFilepath(kCustomAttributesMlir), &mlir_context);
AttributeEncoderRegistry attribute_encoder_registry;
attribute_encoder_registry.Register("test_custom", &EncodeCustomDense);
bc::Buffer buffer =
EmitExecutable(attribute_encoder_registry, mlir_module.get()).value();
bc::Executable executable(buffer.data());
auto attributes = executable.attributes();
ASSERT_EQ(attributes.size(), 10);
for (int i = 0; i < 10; ++i) {
bc::String attr_data = attributes[i];
CustomDense custom_dense(attr_data.data());
EXPECT_THAT(custom_dense.shape(), ElementsAreArray({1}));
EXPECT_THAT(custom_dense.data(), ElementsAreArray({i}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/translate/mlrt/mlir_to_bytecode_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2f9e7e83-870e-495c-80a5-8d17755d9c9a | cpp | tensorflow/tensorflow | tfrt_fallback_util | tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.cc | tensorflow/compiler/mlir/tfrt/tests/ir/tfrt_fallback_util_test.cc | #include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
namespace tfrt {
namespace fallback_async {
bool IsArgConsumedByFallback(mlir::func::FuncOp func, int arg_index) {
auto arg = func.getArgument(arg_index);
for (mlir::Operation *user : arg.getUsers()) {
if (llvm::isa<FallbackAsyncDialect>(user->getDialect())) return true;
}
return false;
}
void ForEachArgConsumedByFallback(
mlir::func::FuncOp func, llvm::function_ref<void(int arg_index)> action) {
for (int arg_index = 0; arg_index < func.getNumArguments(); ++arg_index) {
if (IsArgConsumedByFallback(func, arg_index)) action(arg_index);
}
}
void ForEachArgConsumedByFallback(
mlir::ModuleOp module,
llvm::function_ref<void(llvm::StringRef func_name, int arg_index)> action) {
for (auto func : module.getOps<mlir::func::FuncOp>()) {
ForEachArgConsumedByFallback(
func, [func_name = func.getName(), action](int arg_index) {
action(func_name, arg_index);
});
}
}
}
} | #include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.h"
#include <string>
#include <utility>
#include <vector>
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tfrt/init_tfrt_dialects.h"
namespace tfrt {
namespace fallback_async {
namespace {
TEST(SavedModelTest, MapFallbackArgs) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/ir/testdata/test.mlir");
mlir::DialectRegistry registry;
RegisterTFRTDialects(registry);
registry.insert<tfrt::fallback_async::FallbackAsyncDialect>();
registry.insert<tfrt::fallback_sync::FallbackSyncDialect>();
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
std::vector<std::pair<std::string, int>> func_and_index;
ForEachArgConsumedByFallback(
module.get(),
[&func_and_index](llvm::StringRef func_name, int arg_index) {
func_and_index.push_back({func_name.str(), arg_index});
});
ASSERT_EQ(func_and_index.size(), 1);
EXPECT_EQ(func_and_index[0].first, "test");
EXPECT_EQ(func_and_index[0].second, 2);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/tests/ir/tfrt_fallback_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
204d4e8c-692c-433f-a27f-07a914eac2a7 | cpp | tensorflow/tensorflow | update_op_cost_in_tfrt_mlir | tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.cc | tensorflow/compiler/mlir/tfrt/tests/analysis/update_op_cost_in_tfrt_mlir_test.cc | #include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "tensorflow/compiler/mlir/tfrt/analysis/cost_analysis.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
namespace tensorflow {
namespace tfrt_compiler {
constexpr char kCostAttrName[] = "_tfrt_cost";
constexpr char kOpKeyAttrName[] = "op_key";
void UpdateOpCostInTfrtMlir(mlir::ModuleOp op,
const tfrt_stub::CostRecorder& cost_recorder) {
mlir::Builder builder(op);
op.walk([&](mlir::Operation* op) {
if (HasCostFunctionRegistered(op->getName().getStringRef())) return;
const auto cost_attr = op->getAttrOfType<mlir::IntegerAttr>(kCostAttrName);
if (!cost_attr) return;
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (!op_key_attr) return;
const int64_t op_key = op_key_attr.getInt();
op->setAttr(kCostAttrName, builder.getI64IntegerAttr(
cost_recorder.GetCost(op_key)));
});
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.h"
#include <cstdint>
#include <cstdlib>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_async.h"
#include "tensorflow/compiler/mlir/tfrt/ir/tfrt_fallback_sync.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/fallback/cost_recorder.h"
#include "tfrt/init_tfrt_dialects.h"
namespace tensorflow {
namespace {
constexpr char kCostAttrName[] = "_tfrt_cost";
constexpr char kOpKeyAttrName[] = "op_key";
absl::flat_hash_map<int64_t, uint64_t> GetOpCostMap(mlir::ModuleOp op) {
absl::flat_hash_map<int64_t, uint64_t> op_cost_map;
op.walk([&](mlir::Operation* op) {
const auto cost_attr = op->getAttrOfType<mlir::IntegerAttr>(kCostAttrName);
if (!cost_attr) return;
const auto op_key_attr =
op->getAttrOfType<mlir::IntegerAttr>(kOpKeyAttrName);
if (!op_key_attr) return;
op_cost_map[op_key_attr.getInt()] = cost_attr.getInt();
});
return op_cost_map;
}
TEST(CostUpdateTest, Basic) {
std::string saved_model_mlir_path = tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tfrt/tests/analysis/testdata/test.mlir");
mlir::DialectRegistry registry;
tfrt::RegisterTFRTDialects(registry);
registry.insert<tfrt::fallback_async::FallbackAsyncDialect>();
registry.insert<tfrt::fallback_sync::FallbackSyncDialect>();
mlir::MLIRContext context(registry);
auto module =
mlir::parseSourceFile<mlir::ModuleOp>(saved_model_mlir_path, &context);
ASSERT_TRUE(module);
auto expected_op_cost_map = GetOpCostMap(module.get());
EXPECT_EQ(expected_op_cost_map.size(), 1);
unsigned int seed = 23579;
for (auto& [op_key, cost] : expected_op_cost_map) {
cost = rand_r(&seed) % 1000;
}
tensorflow::tfrt_stub::CostRecorder cost_recorder;
for (const auto& [op_key, cost] : expected_op_cost_map) {
cost_recorder.RecordCost(op_key, cost);
}
tfrt_compiler::UpdateOpCostInTfrtMlir(module.get(), cost_recorder);
const auto got_op_cost_map = GetOpCostMap(module.get());
EXPECT_THAT(got_op_cost_map, ::testing::ContainerEq(expected_op_cost_map));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/update_op_cost_in_tfrt_mlir.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/tests/analysis/update_op_cost_in_tfrt_mlir_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4b6aa12d-4153-40bd-b957-b2c2f6d2f921 | cpp | tensorflow/tensorflow | tf2hlo | tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.cc | tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo_test.cc | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/legalize_tf.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_constants.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/layout_util.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/client/client_library.h"
#include "xla/hlo/translate/hlo_to_mhlo/hlo_to_mlir_hlo.h"
#include "xla/python/ifrt/client.h"
#include "xla/service/computation_placer.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/tpu/kernels/tpu_compile_op_support.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
static constexpr absl::string_view kEntryFuncName = "main";
}
absl::Status UpdateCompileMetadata(
tensorflow::tpu::TPUCompileMetadataProto& metadata,
absl::Span<const DtypeAndShape> inputs) {
VLOG(3) << "TpuCompileMetadata before shape is populated " << metadata;
if (metadata.num_replicas() < 1 || metadata.num_cores_per_replica() < 1) {
return absl::InternalError(
absl::StrCat("Number of replicas ", metadata.num_replicas(),
" and number of cores per replica ",
metadata.num_cores_per_replica(), " must be >= 1"));
}
if (metadata.args_size() != inputs.size()) {
return absl::InternalError(
absl::StrCat("Number of inputs mismatched! Expected ",
metadata.args_size(), " got ", inputs.size()));
}
for (int i = 0; i < metadata.args_size(); ++i) {
if (metadata.args(i).kind() !=
tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER) {
return absl::InternalError(absl::StrCat(
"Only support PARAMETER, but got ", metadata.args(i).kind()));
}
if (metadata.args(i).dtype() != inputs[i].dtype) {
return absl::InternalError(absl::StrCat("Dtype mismatched! Expected ",
metadata.args(i).dtype(), " got ",
inputs[i].dtype));
}
*metadata.mutable_args(i)->mutable_shape() = inputs[i].shape.AsProto();
}
return absl::OkStatus();
}
absl::StatusOr<tensorflow::tpu::TPUCompileMetadataProto> GetCompileMetadata(
mlir::ModuleOp module, const xla::ifrt::Client& ifrt_client) {
tensorflow::tpu::TPUCompileMetadataProto metadata;
auto op = module.lookupSymbol<mlir::func::FuncOp>(kEntryFuncName);
if (!op) {
return absl::InternalError("Could not find entry function in MLIR Module.");
}
auto metadata_text_attr =
op->getAttrOfType<mlir::StringAttr>(kMetadataTextAttrName);
if (metadata_text_attr && !metadata_text_attr.getValue().empty()) {
VLOG(1) << "Parsing from attribute " << kMetadataTextAttrName
<< metadata_text_attr.getValue().str();
if (!tsl::protobuf::TextFormat::ParseFromString(
metadata_text_attr.getValue().str(), &metadata)) {
return absl::InvalidArgumentError(absl::StrCat(
"Attribute ", kMetadataTextAttrName, ":",
metadata_text_attr.getValue().str(), " cannot be parsed"));
}
} else {
return absl::InvalidArgumentError(
absl::StrCat("Missing ", kMetadataTextAttrName));
}
if (!metadata.has_device_assignment()) {
TF_ASSIGN_OR_RETURN(
auto device_assignment,
ifrt_client.GetDefaultDeviceAssignment(
metadata.num_replicas(), metadata.num_cores_per_replica()));
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
*metadata.mutable_device_assignment() = device_assignment_proto;
}
return metadata;
}
absl::StatusOr<Tf2HloResult> CompileTfToHlo(
mlir::ModuleOp module, absl::Span<const DtypeAndShape> inputs,
absl::string_view entry_function_name, const xla::ifrt::Client& ifrt_client,
const tensorflow::tpu::TPUCompileMetadataProto& compile_metadata,
tensorflow::XlaHelpers::ShapeRepresentationFn shape_representation_fn) {
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_before_bridge_phase2", module);
}
tpu::MlirToHloArgs mlir_to_hlo_args;
std::string module_str = tensorflow::SerializeMlirModule(module);
mlir_to_hlo_args.mlir_module = module_str;
mlir_to_hlo_args.rollout_state =
ConfigProto::Experimental::MLIR_BRIDGE_ROLLOUT_DISABLED;
TF_ASSIGN_OR_RETURN(
auto* platform,
stream_executor::PlatformManager::PlatformWithName("Host"));
TF_ASSIGN_OR_RETURN(
auto* client, xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform));
std::vector<TensorShape> arg_shapes;
for (const auto& input : inputs) {
arg_shapes.push_back(input.shape);
}
bool use_tuple_args = false;
std::vector<tpu::ShardingAndIndex> arg_core_mapping;
std::vector<std::vector<xla::Shape>> per_core_arg_shapes;
std::vector<std::unique_ptr<mlir::Pass>> custom_legalization_passes;
TF_ASSIGN_OR_RETURN(
tensorflow::XlaCompiler::CompilationResult compilation_result,
tensorflow::tf2xla::v2::LegalizeMlirToHlo(
mlir_to_hlo_args, compile_metadata, use_tuple_args,
"XLA_TPU_JIT", custom_legalization_passes,
tensorflow::XlaShapeLayoutHelpers::ShapeDeterminationFns(
tensorflow::UseNoPreferenceLayoutFn(), shape_representation_fn),
arg_shapes, &arg_core_mapping, &per_core_arg_shapes, client));
for (auto arg_shapes_iter = per_core_arg_shapes.begin() + 1;
arg_shapes_iter != per_core_arg_shapes.end(); ++arg_shapes_iter) {
if (per_core_arg_shapes.front() != *arg_shapes_iter) {
return absl::UnimplementedError(
"Only support even sharding SPMD, but get "
"different shapes across cores");
}
}
Tf2HloResult result;
result.mlir_hlo_module = xla::llvm_ir::CreateMlirModuleOp(module->getLoc());
result.compile_metadata = std::move(compile_metadata);
result.host_compute_metadata = compilation_result.host_compute_metadata;
TF_RETURN_IF_ERROR(xla::ConvertHloToMlirHlo(
*result.mlir_hlo_module, &compilation_result.computation->proto()));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_after_bridge_phase2",
result.mlir_hlo_module.get());
}
return result;
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.h"
#include <memory>
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/AsmState.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_types.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p,
::testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
TEST(Tf2HloTest, Empty) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_empty.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, {}));
auto result =
CompileTfToHlo(mlir_module.get(), {}, "main", *client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
}
TEST(Tf2HloTest, Tuple) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_tuple.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {1, 3}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {3, 1}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
}
TEST(Tf2HloTest, Spmd) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_spmd_with_device_assignment.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {4, 64}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
LOG(INFO) << result->compile_metadata;
TF_ASSERT_OK(result.status());
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape {
dim { size: 4 }
dim { size: 64 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
device_assignment {
replica_count: 1
computation_count: 2
computation_devices { replica_device_ids: 0 }
computation_devices { replica_device_ids: 1 }
}
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(result->compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST(Tf2HloTest, UsingDefaultDeviceAssignment) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/tf2hlo_spmd_no_device_assignment.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {4, 64}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {64, 10}});
dtype_and_shapes.push_back(DtypeAndShape{DT_FLOAT, {1, 4}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
LOG(INFO) << result->compile_metadata;
TF_ASSERT_OK(result.status());
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape {
dim { size: 4 }
dim { size: 64 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
args {
dtype: DT_FLOAT
shape {
dim { size: 64 }
dim { size: 10 }
}
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
args {
dtype: DT_FLOAT
shape {
dim { size: 1 }
dim { size: 4 }
}
kind: PARAMETER
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
device_assignment {
replica_count: 1
computation_count: 2
computation_devices { replica_device_ids: 0 }
computation_devices { replica_device_ids: 1 }
}
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(result->compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST(Tf2HloTest, XlaCallHostCallback) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/xla_call_host_callback.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path,
mlir::ParserConfig(&context));
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::vector<DtypeAndShape> dtype_and_shapes;
dtype_and_shapes.push_back(DtypeAndShape{DT_INT32, {1}});
dtype_and_shapes.push_back(DtypeAndShape{DT_INT32, {1}});
TF_ASSERT_OK_AND_ASSIGN(
tensorflow::tpu::TPUCompileMetadataProto compile_metadata,
GetCompileMetadata(mlir_module.get(), *client));
TF_ASSERT_OK(UpdateCompileMetadata(compile_metadata, dtype_and_shapes));
auto result = CompileTfToHlo(mlir_module.get(), dtype_and_shapes, "main",
*client, compile_metadata,
tensorflow::IdentityShapeRepresentationFn());
TF_ASSERT_OK(result.status());
ASSERT_EQ((*result).host_compute_metadata.device_to_host().size(), 1);
ASSERT_EQ(
(*result).host_compute_metadata.device_to_host().begin()->metadata_size(),
2);
ASSERT_EQ((*result).host_compute_metadata.host_to_device().size(), 0);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf2hlo_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6217fc7f-afb5-4911-bd8b-acdfb33518e1 | cpp | tensorflow/tensorflow | ifrt_backend_compiler | tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.cc | tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler_test.cc | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/Verifier.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/visitor.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/cluster_tf.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/tf_ifrt_passes.h"
#include "tensorflow/compiler/mlir/tfrt/transforms/tpu_passes.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_executable_registry.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_executable.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
absl::StatusOr<std::vector<ServingExecutableRegistry::Handle>>
CompileAndRegisterIfrtPrograms(absl::string_view model_name,
mlir::ModuleOp module,
IfrtModelContext& ifrt_model_context) {
std::vector<ServingExecutableRegistry::Handle> handles;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
int64_t program_id;
if (auto attr = func->getAttrOfType<mlir::IntegerAttr>(
"tfrt_ifrt_serving.program_id")) {
program_id = attr.getInt();
} else {
continue;
}
mlir::StatusScopedDiagnosticHandler diag_handler(module->getContext());
auto entry_function_name = func.getSymName();
auto submodule = mlir::TF::CreatePrunedModule(module, entry_function_name);
if (mlir::failed(submodule)) {
return diag_handler.ConsumeStatus();
}
submodule->get()->removeAttr("tf_saved_model.semantics");
submodule->get().walk([&](mlir::func::FuncOp func) {
if (func.getSymName() == entry_function_name) {
func.setName("main");
func.setSymName("main");
func.setPublic();
}
});
TF_ASSIGN_OR_RETURN(
auto executable,
IfrtServingExecutable::Create(
program_id, model_name, entry_function_name.str(),
*std::move(submodule), ifrt_model_context.GetClient(),
&ifrt_model_context.GetThreadPool(),
&ifrt_model_context.GetLoadedVariableRegistry(),
&ifrt_model_context.GetRestoreTensorRegistry(),
ifrt_model_context.checkpoint_loader_queue(),
ifrt_model_context.GetDeviceMgr(),
ifrt_model_context.GetShapeRepresentationFn(),
ifrt_model_context.GetIfrtServingCoreSelector(),
ifrt_model_context.GetCompilationEnvironmentProto()));
TF_ASSIGN_OR_RETURN(auto handle, ServingExecutableRegistry::Register(
program_id, std::move(executable)));
handles.push_back(std::move(handle));
}
return handles;
}
absl::Status CompileTensorflowForIfrtServing(
absl::string_view model_name, IfrtModelContext& ifrt_model_context,
mlir::ModuleOp module) {
tsl::profiler::TraceMe trace_me("CompileTensorflowForIfrtServing");
mlir::Builder builder(module.getContext());
TF_RETURN_IF_ERROR(
RunClusterToIfrtRuntimeOpsPassPipeline(module, model_name));
TF_ASSIGN_OR_RETURN(
auto handles,
CompileAndRegisterIfrtPrograms(model_name, module, ifrt_model_context));
for (auto& handle : handles) {
ifrt_model_context.RegisterHandle(std::move(handle));
}
return absl::OkStatus();
}
}
absl::Status IfrtBackendCompiler::CompileTensorflow(
tensorflow::tfrt_stub::ModelRuntimeContext& model_context,
mlir::ModuleOp module) const {
auto ifrt_model_context =
model_context.resource_context().GetResource<IfrtModelContext>(
kIfrtModelContextName);
if (!ifrt_model_context.has_value()) {
return absl::InternalError(
"Failed to find model context for ifrt serving.");
}
if ((*ifrt_model_context)->IsFrozen()) {
return absl::FailedPreconditionError(
"Cannot compile IFRT programs after the model is frozen. Please make "
"sure warmup covers all signatures by following go/tf-model-warmup.");
}
mlir::StatusScopedDiagnosticHandler diag_handler(module->getContext());
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_tpu_bct_conversion_before", module);
}
TfrtTpuCompileOptions options;
options.disable_set_default_tpu_device_and_device_assignment_attributes =
compile_options_
.disable_set_default_tpu_device_and_device_assignment_attributes;
options.support_multi_dims_sharding = true;
if (tpu_compiler_ != nullptr) {
if (mlir::failed(
tpu_compiler_->RunTPUBackwardCompatConversion(module, options))) {
return diag_handler.Combine(
absl::InternalError("Failed to handle legacy TPU Ops"));
}
}
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("ifrt_tpu_bct_conversion_after", module);
}
TF_RETURN_IF_ERROR(tensorflow::tf2xla::v2::RunFunctionTf2xlaClusteringBridge(
module, true,
false));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("before_ifrt_outlining", module);
}
TF_RETURN_IF_ERROR(CompileTensorflowForIfrtServing(
model_context.name(), **ifrt_model_context, module));
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("after_ifrt_outlining", module);
}
llvm::SmallVector<mlir::func::FuncOp> to_erase;
for (auto func : module.getOps<mlir::func::FuncOp>()) {
if (func->getAttr("tfrt_ifrt_serving.program_id")) {
to_erase.push_back(func);
}
}
for (auto func : to_erase) {
func->erase();
}
if (VLOG_IS_ON(1)) {
tensorflow::DumpMlirOpToFile("after_ifrt_program_removal", module);
}
if (mlir::failed(mlir::verify(module))) {
return diag_handler.ConsumeStatus();
}
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.h"
#include <memory>
#include <optional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/InitAllDialects.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "xla/python/ifrt/client.h"
#include "xla/python/ifrt/test_util.h"
#include "xla/tsl/framework/test_util/mock_serving_device_selector.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tfrt/graph_executor/graph_execution_options.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_model_context.h"
#include "tensorflow/core/tfrt/ifrt/ifrt_serving_core_selector.h"
#include "tensorflow/core/tfrt/runtime/runtime.h"
#include "tensorflow/core/tfrt/saved_model/saved_model_testutil.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tfrt/host_context/resource_context.h"
namespace tensorflow {
namespace ifrt_serving {
namespace {
using ::testing::HasSubstr;
using ::tsl::testing::StatusIs;
tsl::thread::ThreadPool& GetThreadPool() {
constexpr int kMaxParallelism = 16;
static tsl::thread::ThreadPool* thread_pool =
new tsl::thread::ThreadPool(tsl::Env::Default(), tsl::ThreadOptions(),
"IfrtSharding", kMaxParallelism);
return *thread_pool;
}
TEST(IfrtBackendCompilerTest, Basic) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/ifrt_cluster.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::unique_ptr<tensorflow::tfrt_stub::Runtime> runtime =
tensorflow::tfrt_stub::DefaultTfrtRuntime(1);
tensorflow::tfrt_stub::GraphExecutionOptions graph_execution_options(
runtime.get());
tfrt::ResourceContext resource_context;
tensorflow::tfrt_stub::ModelRuntimeContext runtime_context(
&graph_execution_options, "", &resource_context);
tsl::test_util::MockServingDeviceSelector mock_serving_device_selector;
IfrtServingCoreSelector core_selector(&mock_serving_device_selector,
client->addressable_device_count());
runtime_context.resource_context().CreateResource<IfrtModelContext>(
"IfrtModelContext", client, &core_selector, &GetThreadPool(),
nullptr);
IfrtBackendCompiler compiler;
TF_ASSERT_OK(compiler.CompileTensorflow(runtime_context, mlir_module.get()));
}
TEST(IfrtBackendCompilerTest, CompileShallFailAfterModelIsFrozen) {
constexpr absl::string_view kDataDirectory =
"tensorflow/compiler/mlir/tfrt/transforms/ifrt/testdata";
std::string mlir_module_path = tensorflow::GetDataDependencyFilepath(
absl::StrCat(kDataDirectory, "/ifrt_cluster.mlir"));
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::MLIRContext context(registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
ASSERT_TRUE(mlir_module);
ASSERT_TRUE(mlir_module.get() != nullptr);
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<xla::ifrt::Client> client,
xla::ifrt::test_util::GetClient());
std::unique_ptr<tensorflow::tfrt_stub::Runtime> runtime =
tensorflow::tfrt_stub::DefaultTfrtRuntime(1);
tensorflow::tfrt_stub::GraphExecutionOptions graph_execution_options(
runtime.get());
tfrt::ResourceContext resource_context;
tensorflow::tfrt_stub::ModelRuntimeContext runtime_context(
&graph_execution_options, "", &resource_context);
tsl::test_util::MockServingDeviceSelector mock_serving_device_selector;
IfrtServingCoreSelector core_selector(&mock_serving_device_selector,
client->addressable_device_count());
runtime_context.resource_context().CreateResource<IfrtModelContext>(
"IfrtModelContext", client, &core_selector, &GetThreadPool(),
nullptr);
IfrtBackendCompiler compiler;
TF_ASSERT_OK(compiler.CompileTensorflow(runtime_context, mlir_module.get()));
std::optional<IfrtModelContext*> ifrt_model_context =
runtime_context.resource_context().GetResource<IfrtModelContext>(
"IfrtModelContext");
ASSERT_TRUE(ifrt_model_context.has_value());
TF_ASSERT_OK((*ifrt_model_context)->Freeze());
mlir::OwningOpRef<mlir::ModuleOp> another_mlir_module =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context);
EXPECT_THAT(
compiler.CompileTensorflow(runtime_context, another_mlir_module.get()),
StatusIs(
absl::StatusCode::kFailedPrecondition,
HasSubstr("Cannot compile IFRT programs after the model is frozen")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tfrt/transforms/ifrt/ifrt_backend_compiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c68067e9-e2ae-494f-9ab3-807b249c39ba | cpp | tensorflow/tensorflow | node_order | tensorflow/compiler/mlir/tensorflow/translate/node_order.cc | tensorflow/compiler/mlir/tensorflow/translate/node_order_test.cc | #include "tensorflow/compiler/mlir/tensorflow/translate/node_order.h"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iterator>
#include <set>
#include <string>
#include <unordered_map>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
namespace tensorflow {
void TopologicalOrdering(
const Graph& g, const std::function<void(Node*)>& emit,
const std::function<std::string(Node*)>& get_grouping_key) {
std::unordered_map<std::string, int> group_key_string_to_integer;
absl::flat_hash_map<Node*, int> node_to_group;
absl::flat_hash_map<Node*, int> remaining_incoming_nodes;
absl::flat_hash_map<Node*, int> node_to_position;
using Ready = std::vector<Node*>;
std::vector<Ready> group_members_that_are_ready;
std::set<int> groups_that_are_ready;
int i = 0;
DFS(
g, [](Node*) {},
[&](Node* n) {
std::string group_key_string = get_grouping_key(n);
auto entry = group_key_string_to_integer.try_emplace(
group_key_string, group_key_string_to_integer.size());
int group_key = entry.first->second;
node_to_position[n] = i++;
node_to_group[n] = group_key;
if (entry.second) {
group_members_that_are_ready.push_back({});
}
auto in_nodes = n->in_nodes();
int num_incoming = std::distance(in_nodes.begin(), in_nodes.end());
remaining_incoming_nodes[n] = num_incoming;
if (num_incoming == 0) {
group_members_that_are_ready[group_key].push_back(n);
groups_that_are_ready.emplace(group_key);
}
},
[](const Node* n1, const Node* n2) { return n1->name() < n2->name(); });
assert(group_key_string_to_integer.size() ==
group_members_that_are_ready.size());
int num_nodes = remaining_incoming_nodes.size();
int current_group = 0;
for (int i = 0; i < num_nodes; i++) {
if (groups_that_are_ready.find(current_group) ==
groups_that_are_ready.end()) {
current_group = *groups_that_are_ready.begin();
}
int size = group_members_that_are_ready[current_group].size();
assert(size);
Node* node = group_members_that_are_ready[current_group][--size];
group_members_that_are_ready[current_group].pop_back();
if (size == 0) {
groups_that_are_ready.erase(current_group);
}
emit(node);
auto out_nodes = node->out_nodes();
std::vector<Node*> nodes_sorted(out_nodes.begin(), out_nodes.end());
std::sort(nodes_sorted.begin(), nodes_sorted.end(), [&](Node* a, Node* b) {
return node_to_position[a] < node_to_position[b];
});
for (Node* out : nodes_sorted) {
remaining_incoming_nodes[out]--;
if (remaining_incoming_nodes[out] == 0) {
int group_key = node_to_group[out];
if (group_members_that_are_ready[group_key].empty()) {
groups_that_are_ready.emplace(group_key);
}
group_members_that_are_ready[group_key].push_back(out);
}
}
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/translate/node_order.h"
#include <string>
#include <utility>
#include <vector>
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
REGISTER_OP("TestParams").Output("o: float");
REGISTER_OP("TestInput").Output("a: float").Output("b: float");
REGISTER_OP("TestMul").Input("a: float").Input("b: float").Output("o: float");
REGISTER_OP("TestUnary").Input("a: float").Output("o: float");
REGISTER_OP("TestTwoOutputs").Output("a: float").Output("b: float");
REGISTER_OP("TestBinary")
.Input("a: float")
.Input("b: float")
.Output("o: float");
bool ExpectBefore(const std::vector<std::pair<string, string>>& ordered_pairs,
const std::vector<Node*>& inputs, string* error) {
for (const std::pair<string, string>& pair : ordered_pairs) {
const string& before_node = pair.first;
const string& after_node = pair.second;
bool seen_before = false;
bool seen_both = false;
for (const Node* node : inputs) {
if (!seen_before && after_node == node->name()) {
*error = std::string("Saw ") + after_node + std::string(" before ") +
before_node;
return false;
}
if (before_node == node->name()) {
seen_before = true;
} else if (after_node == node->name()) {
seen_both = seen_before;
break;
}
}
if (!seen_both) {
*error = std::string("didn't see either ") + before_node +
std::string(" or ") + after_node;
return false;
}
}
return true;
}
TEST(AlgorithmTest, TopologicalOrdering) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestParams", b.opts().WithName("n1"));
Node* n2 =
SourceOp("TestParams", b.opts().WithName("n2").WithControlInput(n1));
Node* n3 =
SourceOp("TestParams", b.opts().WithName("n3").WithControlInput(n2));
Node* n4 = BinaryOp("TestMul", n1, {n3, 0}, b.opts().WithName("n4"));
Node* n5 = BinaryOp("TestMul", n1, {n3, 0},
b.opts().WithName("n5").WithControlInput(n1));
Node* n6 = BinaryOp("TestMul", n2, {n3, 0}, b.opts().WithName("n6"));
n3->set_requested_device("a");
n4->set_requested_device("a");
n5->set_requested_device("b");
n6->set_requested_device("b");
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<std::pair<string, string>> desired_order = {
{"n1", "n2"},
{"n2", "n3"},
{"n3", "n4"},
{"n1", "n4"},
{"n1", "n5"},
{"n2", "n6"},
{"n3", "n4"},
{"n3", "n5"},
{"n3", "n6"},
};
string error;
EXPECT_TRUE(ExpectBefore(desired_order, order, &error)) << error;
}
TEST(AlgorithmTest, TopologicalOrderingOnShallowTree) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestParams", b.opts().WithName("n1").WithDevice("a"));
Node* n2 =
SourceOp("TestParams",
b.opts().WithName("n2").WithDevice("b").WithControlInput(n1));
Node* n3 =
SourceOp("TestParams",
b.opts().WithName("n3").WithDevice("c").WithControlInput(n2));
Node* n4 =
SourceOp("TestParams",
b.opts().WithName("n4").WithDevice("a").WithControlInput(n1));
Node* n5 =
SourceOp("TestParams",
b.opts().WithName("n5").WithDevice("b").WithControlInput(n2));
Node* n6 =
SourceOp("TestParams",
b.opts().WithName("n6").WithDevice("c").WithControlInput(n3));
Node* n7 =
SourceOp("TestParams",
b.opts().WithName("n7").WithDevice("a").WithControlInput(n4));
Node* n8 =
SourceOp("TestParams",
b.opts().WithName("n8").WithDevice("b").WithControlInput(n5));
Node* n9 =
SourceOp("TestParams",
b.opts().WithName("n9").WithDevice("c").WithControlInput(n6));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<Node*> desired_order = {
g.source_node(), n1, n4, n7, n2, n5, n8, n3, n6, n9, g.sink_node()};
for (int i = 0; i < desired_order.size(); i++) {
desired_order[i] = g.FindNodeId(desired_order[i]->id());
}
EXPECT_EQ(order, desired_order);
}
TEST(AlgorithmTest, TopologicalOrderingGivesTheSameResultIfCalledTwice) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
SourceOp("TestParams", b.opts().WithName("n1"));
SourceOp("TestParams", b.opts().WithName("n2"));
SourceOp("TestParams", b.opts().WithName("n3"));
SourceOp("TestParams", b.opts().WithName("n4"));
SourceOp("TestParams", b.opts().WithName("n5"));
SourceOp("TestParams", b.opts().WithName("n6"));
SourceOp("TestParams", b.opts().WithName("n7"));
SourceOp("TestParams", b.opts().WithName("n8"));
SourceOp("TestParams", b.opts().WithName("n9"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order1;
std::vector<Node*> order2;
TopologicalOrdering(
g, [&](Node* n) { order1.push_back(n); },
[&](const Node* node) { return std::string("same"); });
TopologicalOrdering(
g, [&](Node* n) { order2.push_back(n); },
[&](const Node* node) { return std::string("same"); });
EXPECT_EQ(order1, order2);
}
TEST(AlgorithmTest, TopologicalOrderingOnChain) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestParams", b.opts().WithName("n1"));
Node* n2 = UnaryOp("TestUnary", n1, b.opts().WithName("n2"));
Node* n3 = UnaryOp("TestUnary", n2, b.opts().WithName("n3"));
Node* n4 = UnaryOp("TestUnary", n3, b.opts().WithName("n4"));
Node* n5 = UnaryOp("TestUnary", n4, b.opts().WithName("n5"));
Node* n6 = UnaryOp("TestUnary", n5, b.opts().WithName("n6"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<Node*> desired_order = {g.source_node(), n1, n2, n3, n4, n5, n6,
g.sink_node()};
for (int i = 0; i < desired_order.size(); i++) {
desired_order[i] = g.FindNodeId(desired_order[i]->id());
}
EXPECT_EQ(order, desired_order);
}
TEST(AlgorithmTest, TopologicalOrderingOnMultipleOutputs) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n1 = SourceOp("TestTwoOutputs", b.opts().WithName("n1"));
UnaryOp("TestUnary", {n1, 0}, b.opts().WithName("n2"));
UnaryOp("TestUnary", {n1, 1}, b.opts().WithName("n3"));
UnaryOp("TestUnary", {n1, 0}, b.opts().WithName("n4"));
UnaryOp("TestUnary", {n1, 1}, b.opts().WithName("n5"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<std::pair<string, string>> desired_order = {
{"n1", "n2"},
{"n1", "n3"},
{"n1", "n4"},
{"n1", "n5"},
};
string error;
EXPECT_TRUE(ExpectBefore(desired_order, order, &error)) << error;
}
TEST(AlgorithmTest, TopologicalOrderingSameAsReversePostOrder) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
Node* n = SourceOp("TestTwoOutputs", b.opts().WithName("n"));
Node* n0 = UnaryOp("TestUnary", {n, 0}, b.opts().WithName("n2"));
Node* n1 = UnaryOp("TestUnary", {n, 1}, b.opts().WithName("n1"));
UnaryOp("TestUnary", n0, b.opts().WithName("n1a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n8a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n2a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n7a"));
UnaryOp("TestUnary", n1, b.opts().WithName("n1b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n8b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n2b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n7b"));
UnaryOp("TestUnary", n0, b.opts().WithName("n3a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n6a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n4a"));
UnaryOp("TestUnary", n0, b.opts().WithName("n5a"));
UnaryOp("TestUnary", n1, b.opts().WithName("n3b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n6b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n4b"));
UnaryOp("TestUnary", n1, b.opts().WithName("n5b"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
std::vector<Node*> order;
TopologicalOrdering(g, [&](Node* n) { order.push_back(n); }, GroupByDevice());
std::vector<Node*> desired_order;
GetReversePostOrder(g, &desired_order, [](const Node* n1, const Node* n2) {
return n1->name() < n2->name();
});
EXPECT_EQ(desired_order, order);
}
TEST(AlgorithmTest, TopologicalOrderingWithEachDeviceUsedOnce) {
GraphDefBuilder b(GraphDefBuilder::kFailImmediately);
using namespace ::tensorflow::ops;
SourceOp("TestParams", b.opts().WithName("n1").WithDevice("a"));
SourceOp("TestParams", b.opts().WithName("n2").WithDevice("b"));
SourceOp("TestParams", b.opts().WithName("n3").WithDevice("c"));
SourceOp("TestParams", b.opts().WithName("n4").WithDevice("d"));
Graph g(OpRegistry::Global());
TF_ASSERT_OK(GraphDefBuilderToGraph(b, &g));
int count = 0;
TopologicalOrdering(g, [&](Node* n) { count++; }, GroupByDevice());
EXPECT_EQ(count, 6);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/node_order.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/node_order_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0b41c07f-45b3-4650-abe4-781be34d4dd6 | cpp | tensorflow/tensorflow | tf_mlir_translate_registration | tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc | tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration_test.cc | #include <memory>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Tools/mlir-translate/Translation.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/client_library.h"
#include "xla/client/compile_only_client.h"
#include "xla/stream_executor/host/host_platform_id.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tsl/platform/protobuf.h"
namespace mlir {
using tsl::Status;
using tsl::StatusOr;
static constexpr char kMlirToGraphCompilationCheckName[] =
"mlir-to-graph-compilation-check";
static constexpr char kArbitraryDeviceName[] = "XLA_CPU_JIT";
namespace {
inline absl::string_view StringRefToView(llvm::StringRef ref) {
return {ref.data(), ref.size()};
}
}
static OwningOpRef<mlir::ModuleOp> GraphdefToMlirTranslateFunction(
llvm::StringRef input, MLIRContext* context) {
tensorflow::GraphdefToMlirOptions options{
debug_info_file, xla_compile_device_type,
prune_unused_nodes, convert_legacy_fed_inputs,
graph_as_function, upgrade_legacy,
enable_shape_inference, unconditionally_use_set_output_shapes,
enable_soft_placement, set_original_tf_func_name};
auto module_or = tensorflow::GraphdefToMlirTranslateFunction(
input, input_arrays, input_dtypes, input_shapes, output_arrays,
control_output_arrays, options, context);
if (!module_or.status().ok()) return nullptr;
return std::move(module_or).value();
}
static TranslateToMLIRRegistration GraphdefToMlirTranslate(
"graphdef-to-mlir", "graphdef-to-mlir", GraphdefToMlirTranslateFunction);
static OwningOpRef<mlir::ModuleOp> GraphdefToSplattedMlirTranslateFunction(
llvm::StringRef input, MLIRContext* context) {
tensorflow::GraphdefToMlirOptions options{
debug_info_file, xla_compile_device_type,
prune_unused_nodes, convert_legacy_fed_inputs,
graph_as_function, upgrade_legacy,
enable_shape_inference, unconditionally_use_set_output_shapes};
auto module_or = tensorflow::GraphdefToSplattedMlirTranslateFunction(
input, input_arrays, input_dtypes, input_shapes, output_arrays,
control_output_arrays, options, context);
if (!module_or.status().ok()) return nullptr;
return std::move(module_or).value();
}
static TranslateToMLIRRegistration GraphdefToSplattedMlirTranslate(
"graphdef-to-splatted-mlir", "graphdef-to-splatted-mlir",
GraphdefToSplattedMlirTranslateFunction);
static Status CompileGraph(tensorflow::Graph* graph,
xla::CompileOnlyClient* client) {
if (!graph || !client) {
return Status(absl::StatusCode::kInvalidArgument,
"Invalid graph or client");
}
tensorflow::FunctionDefLibrary flib;
auto flib_def = std::make_unique<tensorflow::FunctionLibraryDefinition>(
tensorflow::OpRegistry::Global(), flib);
tensorflow::XlaCompiler::Options options;
options.device_type = tensorflow::DeviceType(kArbitraryDeviceName);
options.client = client;
options.flib_def = flib_def.get();
tensorflow::XlaCompiler compiler(options);
std::unique_ptr<tensorflow::Graph> graph_copy(
new tensorflow::Graph(tensorflow::OpRegistry::Global()));
tensorflow::CopyGraph(*graph, graph_copy.get());
tensorflow::XlaCompiler::CompileOptions compile_options;
tensorflow::XlaCompiler::CompilationResult result;
return compiler.CompileGraph(compile_options,
kMlirToGraphCompilationCheckName,
std::move(graph_copy), {}, &result);
}
static LogicalResult MlirToGraphTranslateFunction(ModuleOp module,
llvm::raw_ostream& output) {
if (!module) return failure();
tensorflow::GraphExportConfig confs;
confs.export_entry_func_to_flib = export_entry_func_to_flib;
confs.export_original_tf_func_name = export_original_tf_func_name;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> flib_def;
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
absl::flat_hash_set<tensorflow::Node*> control_ret_nodes;
auto status = tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module, confs, &graph, flib_def.get(), &control_ret_nodes);
if (!status.ok()) {
LOG(ERROR) << "Export to Graph failed: " << status;
return mlir::failure();
}
auto platform = stream_executor::PlatformManager::PlatformWithId(
stream_executor::host::kHostPlatformId);
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform.value());
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
if (!CompileGraph(graph.get(), client.value()).ok()) {
return mlir::failure();
}
auto graphdef = std::make_unique<tensorflow::GraphDef>();
graph->ToGraphDef(graphdef.get());
output << tsl::LegacyUnredactedDebugString(*graphdef);
return success();
}
static TranslateFromMLIRRegistration mlir_to_graph_translate(
"mlir-to-graph", "convert mlir to graph",
MlirToGraphTranslateFunction, [](DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
});
static LogicalResult MlirToGraphdefTranslateFunction(
ModuleOp module, llvm::raw_ostream& output) {
if (!module) return failure();
tensorflow::GraphExportConfig confs;
confs.export_entry_func_to_flib = export_entry_func_to_flib;
confs.export_original_tf_func_name = export_original_tf_func_name;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), tensorflow::FunctionDefLibrary());
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
absl::flat_hash_set<tensorflow::Node*> control_ret_nodes;
auto status = tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module, confs, &graph, &flib_def, &control_ret_nodes);
if (!status.ok()) {
LOG(ERROR) << "Export to Graph failed: " << status;
return mlir::failure();
}
tensorflow::GraphDef graphdef;
graph->ToGraphDef(&graphdef);
output << tsl::LegacyUnredactedDebugString(graphdef);
return success();
}
static TranslateFromMLIRRegistration mlir_to_graphdef_translate(
"mlir-to-graphdef", "mlir-to-graphdef", MlirToGraphdefTranslateFunction,
[](DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
});
} | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Tools/mlir-translate/Translation.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace {
class MlirTranslationTest : public ::testing::Test {
private:
static constexpr char kMlirToGraphFlag[] = "-mlir-to-graph";
public:
MlirTranslationTest() : translation_(RegisterTranslation()) {
std::vector<const char*> argv = {""};
argv.push_back(kMlirToGraphFlag);
llvm::cl::ParseCommandLineOptions(argv.size(), &argv[0],
"TF MLIR translation test\n");
}
LogicalResult Translate(StringRef source, std::string& sink) {
auto source_manager = std::make_shared<llvm::SourceMgr>();
auto source_buffer = llvm::MemoryBuffer::getMemBuffer(source);
source_manager->AddNewSourceBuffer(std::move(source_buffer), llvm::SMLoc());
mlir::MLIRContext context;
llvm::raw_string_ostream os(sink);
return (**translation_)(source_manager, os, &context);
}
private:
llvm::cl::opt<const mlir::Translation*, false, mlir::TranslationParser>*
RegisterTranslation() {
static const auto requested_translation =
new llvm::cl::opt<const mlir::Translation*, false,
mlir::TranslationParser>(
llvm::cl::desc("Translation to perform"));
return requested_translation;
}
llvm::cl::opt<const mlir::Translation*, false, mlir::TranslationParser>*
translation_;
};
TEST_F(MlirTranslationTest, TranslatesMlirToGraph) {
static constexpr char kMlirSource[] = R"(
func.func @main() -> (tensor<1x2xf16>, tensor<2xf16>) {
%graph:2 = tf_executor.graph {
%0:2 = tf_executor.island wraps "tf.Const"() {device = "", dtype = "tfdtype$DT_HALF", value = dense<1.0> : tensor<1x2xf16>} : () -> tensor<1x2xf16> loc("const1")
%1:2 = tf_executor.island wraps "tf.Const"() {device = "", dtype = "tfdtype$DT_HALF", value = dense<[1.0, 2.0]> : tensor<2xf16>} : () -> tensor<2xf16> loc("const2")
tf_executor.fetch %0#0, %1#0 : tensor<1x2xf16>, tensor<2xf16>
}
func.return %graph#0, %graph#1 : tensor<1x2xf16>, tensor<2xf16>
})";
std::string result;
auto status = Translate(kMlirSource, result);
ASSERT_TRUE(status.succeeded());
EXPECT_TRUE(absl::StrContains(result, "node {"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1a4c8fab-2f1c-4fda-8ce2-fa1ddfe8ac15 | cpp | tensorflow/tensorflow | tf_saved_model | tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc | tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model_test.cc | #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include <algorithm>
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
namespace mlir {
namespace tf_saved_model {
static bool IsStrArrayAttr(Attribute attr) {
auto array = mlir::dyn_cast<ArrayAttr>(attr);
if (!array) return false;
return llvm::all_of(
array, [](Attribute attr) { return mlir::isa<StringAttr>(attr); });
}
LogicalResult VerifyTensorTypesCompatible(Type t1, Type t2) {
if (!mlir::isa<TensorType>(t1) || !mlir::isa<TensorType>(t2)) {
return failure();
}
return verifyCompatibleShape(mlir::cast<TensorType>(t1),
mlir::cast<TensorType>(t2));
}
LogicalResult GlobalTensorOp::verify() {
GlobalTensorOp global_tensor = *this;
if (global_tensor.getValue()) {
if (failed(VerifyTensorTypesCompatible(
global_tensor.getType(), global_tensor.getValue()->getType()))) {
return global_tensor.emitError()
<< "'type' and 'value' attributes should "
"have compatible tensor types";
}
}
if (!global_tensor.getIsMutable()) {
if (!mlir::cast<TensorType>(global_tensor.getType()).hasStaticShape()) {
return global_tensor.emitError()
<< "'type' attribute for immutable 'tf_saved_model.global_tensor' "
"should have a static shape";
}
}
return success();
}
LogicalResult SessionInitializerOp::verify() {
SessionInitializerOp session_initializer = *this;
mlir::SymbolTable symbol_table(
session_initializer->getParentOfType<ModuleOp>());
for (auto sym_ref : session_initializer.getInitializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue());
if (!init_func_op)
return session_initializer.emitOpError()
<< "the initializer function does not exist";
if (!init_func_op.getFunctionType().getResults().empty())
return session_initializer.emitOpError()
<< "the initializer function should have no output";
auto exported_names = GetExportedNames(init_func_op);
if (exported_names.empty())
return session_initializer.emitOpError()
<< "the initializer function should be exported";
if (exported_names.size() != 1)
return session_initializer.emitOpError()
<< "the initializer function should have only one exported names";
}
return success();
}
}
}
#define GET_OP_CLASSES
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc.inc"
namespace mlir {
namespace tf_saved_model {
TensorFlowSavedModelDialect::TensorFlowSavedModelDialect(MLIRContext *context)
: Dialect("tf_saved_model", context,
TypeID::get<TensorFlowSavedModelDialect>()) {
context->loadDialect<TF::TensorFlowDialect>();
addOperations<
#define GET_OP_LIST
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc.inc"
>();
}
static LogicalResult VerifyIndexPath(Operation *op, NamedAttribute named_attr) {
auto attr = mlir::dyn_cast<ArrayAttr>(named_attr.getValue());
if (!attr) {
return op->emitError() << "'" << kTfSavedModelIndexPathAttr
<< "' attribute should be an ArrayAttr";
}
for (auto element : attr) {
if (mlir::isa<StringAttr>(element)) {
continue;
}
if (auto integer = mlir::dyn_cast<IntegerAttr>(element)) {
if (integer.getValue().getBitWidth() == 64) {
continue;
}
}
return op->emitError() << "'" << kTfSavedModelIndexPathAttr
<< "' elements should be strings or 64-bit integers";
}
return mlir::success();
}
Type GetBoundInputArgTypeFor(mlir::Operation *op) {
if (auto global_tensor = llvm::dyn_cast<GlobalTensorOp>(op)) {
auto type = mlir::cast<TensorType>(global_tensor.getType());
return RankedTensorType::get(
{}, TF::ResourceType::get({type}, type.getContext()));
}
if (auto asset = llvm::dyn_cast<AssetOp>(op)) {
return RankedTensorType::get({}, TF::StringType::get(asset.getContext()));
}
op->emitError() << "unknown symbol operation";
return {};
}
static LogicalResult VerifyBoundInputArgType(Operation *op_for_diagnostics,
Type arg_type,
mlir::Operation *symbol_op) {
auto expected_type = GetBoundInputArgTypeFor(symbol_op);
if (!expected_type) return failure();
if (arg_type != expected_type) {
return op_for_diagnostics->emitError()
<< "bound input with type " << arg_type << " expected to have type "
<< expected_type;
}
return success();
}
LogicalResult TensorFlowSavedModelDialect::verifyRegionArgAttribute(
Operation *op, unsigned region_index, unsigned arg_index,
NamedAttribute named_attr) {
if (named_attr.getName() == "tf_saved_model.bound_input") {
if (!mlir::isa<FlatSymbolRefAttr>(named_attr.getValue())) {
return op->emitError() << "'tf_saved_model.bound_input' attribute should "
"be a FlatSymbolRefAttr";
}
auto symbol_name =
mlir::cast<FlatSymbolRefAttr>(named_attr.getValue()).getValue();
auto module = op->getParentOfType<ModuleOp>();
mlir::Operation *symbol_op = module.lookupSymbol(symbol_name);
if (!symbol_op) {
return op->emitError() << "'tf_saved_model.bound_input' attribute must "
"reference a valid symbol, got invalid symbol '"
<< symbol_name << "'";
}
auto arg_type = cast<func::FuncOp>(op).getArgument(arg_index).getType();
return VerifyBoundInputArgType(op, arg_type, symbol_op);
}
if (named_attr.getName() == kTfSavedModelIndexPathAttr) {
return VerifyIndexPath(op, named_attr);
}
return op->emitError() << "unknown tf_saved_model dialect arg attribute '"
<< named_attr.getName().getValue() << "'";
}
LogicalResult TensorFlowSavedModelDialect::verifyRegionResultAttribute(
Operation *op, unsigned region_index, unsigned result_index,
NamedAttribute named_attr) {
if (named_attr.getName() == kTfSavedModelIndexPathAttr) {
return VerifyIndexPath(op, named_attr);
}
return op->emitError() << "unknown tf_saved_model dialect result attribute '"
<< named_attr.getName().getValue() << "'";
}
LogicalResult VerifySessionInitOp(SessionInitializerOp session_init_op,
SymbolTable &symbol_table) {
llvm::SmallDenseSet<StringAttr> init_types{};
for (auto init_sym :
session_init_op.getInitializers().getAsValueRange<FlatSymbolRefAttr>()) {
auto init_func = symbol_table.lookup<func::FuncOp>(init_sym);
if (!init_func) continue;
auto init_type =
init_func->getAttrOfType<StringAttr>(kTfSavedModelInitializerTypeAttr);
if (!init_type) continue;
if (init_types.contains(init_type)) {
return init_func->emitError()
<< "Attribute tf_saved_model.initializer_type should not have "
"duplicate values. Found duplicate: "
<< init_type;
}
init_types.insert(init_type);
}
return success();
}
static bool HasAnyTfSavedModelArgAttr(func::FuncOp func) {
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (func.getArgAttr(i, kTfSavedModelIndexPathAttr) ||
func.getArgAttr(i, "tf_saved_model.bound_input")) {
return true;
}
}
for (int i = 0, e = func.getNumResults(); i < e; i++) {
if (func.getResultAttr(i, kTfSavedModelIndexPathAttr) ||
func.getResultAttr(i, "tf_saved_model.bound_input")) {
return true;
}
}
return false;
}
static LogicalResult VerifySavedModelModule(
ModuleOp module, TensorFlowSavedModelDialect *dialect) {
auto exported_names_ident =
StringAttr::get(dialect->getContext(), kTfSavedModelExportedNamesAttr);
DenseMap<StringRef, Operation *> exported_name_to_op;
for (auto &op : module) {
auto attr = op.getAttr(exported_names_ident);
if (!attr) continue;
if (failed(dialect->verifyOperationAttribute(
&op, {exported_names_ident, attr}))) {
return failure();
}
for (auto str : mlir::cast<ArrayAttr>(attr)) {
auto exported_name = mlir::cast<StringAttr>(str).getValue();
auto p = exported_name_to_op.insert({exported_name, &op});
if (!p.second) {
return op.emitError()
.append("duplicate exported name '", exported_name, "'")
.attachNote(p.first->getSecond()->getLoc())
.append("previously seen here");
}
}
}
for (auto func : module.getOps<func::FuncOp>()) {
const bool is_exported = IsExported(func);
if (is_exported && !func.isPublic()) {
return func.emitError()
<< "exported function @" << func.getName() << " should be public";
}
if (!is_exported && func.isPublic()) {
return func.emitError() << "non-exported function @" << func.getName()
<< " should be private";
}
if (!is_exported && HasAnyTfSavedModelArgAttr(func)) {
return func.emitError() << "can only apply 'tf_saved_model' argument "
"attributes to exported functions";
}
}
SymbolTable symbol_table(module);
auto session_initializers = module.getOps<SessionInitializerOp>();
if (!session_initializers.empty()) {
if (!llvm::hasSingleElement(session_initializers)) {
return (*++session_initializers.begin()).emitError()
<< "there must be no more than one session_initializer op";
}
if (failed(
VerifySessionInitOp(*session_initializers.begin(), symbol_table))) {
return failure();
}
}
auto is_init = [&session_initializers](mlir::func::FuncOp func) {
if (session_initializers.empty()) return false;
auto init_syms = (*session_initializers.begin()).getInitializers();
return std::any_of(
init_syms.begin(), init_syms.end(), [&](Attribute sym_ref) {
return mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue() ==
func.getName();
});
};
auto symbol_uses = SymbolTable::getSymbolUses(&module.getBodyRegion());
if (!symbol_uses.has_value()) {
return module.emitError() << "modules with 'tf_saved_model.semantics' must "
"have analyzable symbol uses";
}
for (auto symbol_use : *symbol_uses) {
auto func = symbol_table.lookupNearestSymbolFrom<func::FuncOp>(
symbol_use.getUser(), symbol_use.getSymbolRef());
if (func && IsExported(func)) {
if (is_init(func) &&
llvm::isa<SessionInitializerOp>(symbol_use.getUser())) {
if (!func->getAttr(kTfSavedModelInitializerTypeAttr)) {
LOG(WARNING)
<< "func op in session_initializer op's initializers attribute "
<< "should have tf_saved_model.initializer_type attribute.";
}
continue;
}
return symbol_use.getUser()
->emitError("exported function cannot be internally referenced")
.attachNote(func.getLoc())
.append("references this exported function");
}
}
return success();
}
LogicalResult VerifyExportedFunc(func::FuncOp func) {
bool reached_bound_inputs = false;
auto module = func->getParentOfType<ModuleOp>();
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (func.getArgAttr(i, "tf_saved_model.bound_input")) {
reached_bound_inputs = true;
continue;
}
if (func.getArgAttr(i, kTfSavedModelIndexPathAttr)) {
if (reached_bound_inputs) {
return func.emitError()
<< "all '" << kTfSavedModelIndexPathAttr
<< "' arg attributes should precede all "
"'tf_saved_model.bound_input' arg attributes";
}
continue;
}
if (func.getArgAttr(i, "tf.resource_name")) {
if (module->getAttr("tf_saved_model.under_construction")) continue;
return func.emitError() << "'tf.resource_name' attribute is not allowed "
"unless it is being under construction";
}
return func.emitError()
<< "all arguments should have '" << kTfSavedModelIndexPathAttr
<< "', 'tf_saved_model.bound_input' or 'tf.resource_name' "
"attributes";
}
llvm::SmallDenseSet<StringRef, 8> unique_bound_inputs;
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>(
i, "tf_saved_model.bound_input")) {
if (!unique_bound_inputs.insert(attr.getValue()).second) {
if (module->getAttr("tf_saved_model.under_construction")) continue;
return func.emitError()
<< "duplicate 'tf_saved_model.bound_input' binding";
}
}
}
for (int i = 0, e = func.getNumResults(); i < e; i++) {
if (!func.getResultAttr(i, kTfSavedModelIndexPathAttr)) {
return func.emitError() << "all results should have '"
<< kTfSavedModelIndexPathAttr << "' attributes";
}
}
return success();
}
bool IsValidInitializerType(StringRef initializer_type) {
return initializer_type == kTfSavedModelInitializerRestoreType ||
initializer_type == kTfSavedModelInitializerInitType;
}
LogicalResult VerifyInitializerTypeAttr(Operation *op,
const NamedAttribute named_attr) {
if (!isa<func::FuncOp>(op)) {
return op->emitError() << "Attribute tf_saved_model.initializer_type "
<< "should be on a func::FuncOp.";
}
auto initializer_type_attr_value =
mlir::dyn_cast_or_null<StringAttr>(named_attr.getValue());
if (!initializer_type_attr_value) {
return op->emitError() << "Attribute tf_saved_model.initializer_type "
<< "should be a StringAttr.";
}
if (!IsValidInitializerType(initializer_type_attr_value)) {
return op->emitError() << "tf_saved_model.initializer_type should be one "
"of 'restore_op' or 'init_op'. Got: "
<< initializer_type_attr_value.str();
}
return success();
}
LogicalResult TensorFlowSavedModelDialect::verifyOperationAttribute(
Operation *op, NamedAttribute named_attr) {
if (named_attr.getName() == kTfSavedModelExportedNamesAttr) {
if (!isa<func::FuncOp, GlobalTensorOp>(op)) {
return op->emitError()
<< "'" << kTfSavedModelExportedNamesAttr
<< "' must be on a 'func' or 'tf_saved_model.global_tensor' op";
}
if (!IsStrArrayAttr(named_attr.getValue())) {
return op->emitError() << "'" << kTfSavedModelExportedNamesAttr
<< "' must be an array of strings";
}
if (!op->getParentOp()->getAttr("tf_saved_model.semantics")) {
return op->emitError() << "'" << kTfSavedModelExportedNamesAttr
<< "' must be on an op whose immediate parent has "
"attribute 'tf_saved_model.semantics'";
}
if (auto func = dyn_cast<func::FuncOp>(op)) {
if (failed(VerifyExportedFunc(func))) {
return failure();
}
}
return success();
}
if (named_attr.getName() == "tf_saved_model.semantics") {
auto module = dyn_cast<ModuleOp>(op);
if (!module) {
return op->emitError() << "'tf_saved_model.semantics' must "
"be on a module op";
}
return VerifySavedModelModule(module, this);
}
if (named_attr.getName() == "tf_saved_model.under_construction") {
return success();
}
if (named_attr.getName() == kTfSavedModelInitializerTypeAttr) {
return VerifyInitializerTypeAttr(op, named_attr);
}
return op->emitError() << "unknown tf_saved_model dialect attribute '"
<< named_attr.getName().getValue() << "'";
}
SmallVector<StringRef, 2> GetExportedNames(Operation *op) {
SmallVector<StringRef, 2> ret;
auto exported_names =
op->getAttrOfType<ArrayAttr>(kTfSavedModelExportedNamesAttr);
if (exported_names) {
for (auto name : exported_names) {
ret.push_back(mlir::cast<StringAttr>(name).getValue());
}
}
return ret;
}
bool IsExported(Operation *op) {
auto exported_names =
op->getAttrOfType<ArrayAttr>(kTfSavedModelExportedNamesAttr);
return exported_names && !exported_names.empty();
}
bool HasTfSavedModelSemantics(ModuleOp module) {
return module->getAttr("tf_saved_model.semantics") != nullptr;
}
Operation *LookupBoundInput(func::FuncOp func, int arg_index,
const SymbolTable &symbol_table) {
auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>(
arg_index, "tf_saved_model.bound_input");
if (!attr) return nullptr;
return symbol_table.lookup(attr.getValue());
}
SessionInitializerOp GetSessionInitializerOp(mlir::ModuleOp op) {
auto initializers = op.getOps<SessionInitializerOp>();
if (initializers.empty()) return {};
return *initializers.begin();
}
class OptimizeSessionInitializerPattern
: public OpRewritePattern<SessionInitializerOp> {
public:
using OpRewritePattern::OpRewritePattern;
LogicalResult matchAndRewrite(SessionInitializerOp op,
PatternRewriter &rewriter) const override {
SymbolTable symbol_table(op->getParentOfType<ModuleOp>());
SmallVector<func::FuncOp, 2> to_remove;
SmallVector<mlir::Attribute, 2> to_keep;
for (auto sym_ref : op.getInitializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue());
auto &operations = init_func_op.front().getOperations();
if ((operations.size() == 1 &&
operations.front().hasTrait<OpTrait::IsTerminator>()) ||
(operations.size() == 2 &&
dyn_cast<mlir::TF::NoOp>(operations.front()) &&
operations.back().hasTrait<OpTrait::IsTerminator>())) {
to_remove.push_back(init_func_op);
} else {
to_keep.push_back(sym_ref);
}
}
for (auto func_op : to_remove) rewriter.eraseOp(func_op);
if (to_keep.empty())
rewriter.eraseOp(op);
else
op->setAttr("initializers", rewriter.getArrayAttr(to_keep));
return success();
}
};
void SessionInitializerOp::getCanonicalizationPatterns(
RewritePatternSet &results, MLIRContext *context) {
results.add<OptimizeSessionInitializerPattern>(context);
}
SmallVector<StringRef, 2> GetSessionInitializerExportedName(ModuleOp op) {
auto session_initializer_op = GetSessionInitializerOp(op);
if (!session_initializer_op) return {};
SymbolTable symbol_table(op);
SmallVector<StringRef, 2> results;
for (auto sym_ref : session_initializer_op.getInitializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
mlir::cast<FlatSymbolRefAttr>(sym_ref).getValue());
auto exported_names = GetExportedNames(init_func_op);
assert(exported_names.size() == 1);
results.push_back(exported_names[0]);
}
return results;
}
SmallVector<func::FuncOp, 2> GetInitializerFunctions(ModuleOp module_op) {
SessionInitializerOp session_initializer_op =
GetSessionInitializerOp(module_op);
if (!session_initializer_op) return {};
SymbolTable symbol_table(module_op);
SmallVector<func::FuncOp, 2> init_func_ops;
for (auto init_func_sym : session_initializer_op.getInitializers()
.getAsValueRange<FlatSymbolRefAttr>()) {
auto init_func_op = symbol_table.lookup<func::FuncOp>(init_func_sym);
init_func_ops.push_back(init_func_op);
}
return init_func_ops;
}
func::FuncOp GetInitializerFunction(ModuleOp module_op,
const StringRef initializer_type) {
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
auto init_func_itr = absl::c_find_if(
init_func_ops, [initializer_type](const func::FuncOp init_func_op) {
const auto init_type_attr = init_func_op->getAttrOfType<StringAttr>(
kTfSavedModelInitializerTypeAttr);
return init_type_attr && init_type_attr == initializer_type;
});
return init_func_itr == init_func_ops.end() ? nullptr : *init_func_itr;
}
bool IsRestoreGraph(ModuleOp module) {
return module
.walk([](mlir::Operation *op) {
if (llvm::isa<mlir::TF::RestoreV2Op>(op)) {
return mlir::WalkResult::interrupt();
}
return mlir::WalkResult::advance();
})
.wasInterrupted();
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tf_saved_model {
namespace {
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::IsNull;
using ::testing::NotNull;
using ::testing::SizeIs;
class TfSavedModelTest : public ::testing::Test {
protected:
TfSavedModelTest() : ctx_() {
ctx_.loadDialect<TensorFlowSavedModelDialect, func::FuncDialect>();
}
MLIRContext ctx_;
};
ModuleOp ParseModuleOp(const StringRef module_op_str, Block& block,
MLIRContext& ctx) {
const LogicalResult parse_result =
parseSourceString(module_op_str, &block, ParserConfig(&ctx));
EXPECT_TRUE(succeeded(parse_result));
return cast<ModuleOp>(block.front());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionReturnsNullWhenNoSessionInitializerOp) {
constexpr StringRef kModuleOpStr =
R"mlir(module attributes {tf_saved_model.semantics} {})mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, IsNull());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionReturnsNullWhenInitializersEmpty) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, IsNull());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionReturnsFuncOpMatchingInitializerType) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func]} : () -> ()
func.func @init_func() attributes {tf_saved_model.exported_names = ["init_func"], tf_saved_model.initializer_type = "init_op"} {
func.return
}
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, NotNull());
EXPECT_THAT(init_func_op.getSymName(), "init_func");
EXPECT_THAT(
init_func_op->getAttrOfType<StringAttr>(kTfSavedModelInitializerTypeAttr),
kTfSavedModelInitializerInitType);
}
TEST_F(TfSavedModelTest, GetInitializerFunctionNoMatchingInitializerType) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func]} : () -> ()
func.func @init_func() attributes {tf_saved_model.exported_names = ["init_func"], tf_saved_model.initializer_type = "restore_op"} {
func.return
}
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
func::FuncOp init_func_op = GetInitializerFunction(
module_op, kTfSavedModelInitializerInitType);
EXPECT_THAT(init_func_op, IsNull());
}
TEST_F(TfSavedModelTest, GetInitializerFunctionsEmptyWhenNoInitFunctions) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = []} : () -> ()
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
EXPECT_THAT(init_func_ops, IsEmpty());
}
TEST_F(TfSavedModelTest,
GetInitializerFunctionsEmptyWhenNoSessionInitializerOp) {
constexpr StringRef kModuleOpStr =
R"mlir(module attributes {tf_saved_model.semantics} {})mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
EXPECT_THAT(init_func_ops, IsEmpty());
}
TEST_F(TfSavedModelTest, GetInitializerFunctionsReturnsMultipleFuncOps) {
constexpr StringRef kModuleOpStr = R"mlir(
module attributes {tf_saved_model.semantics} {
"tf_saved_model.session_initializer"() {initializers = [@init_func1, @init_func2]} : () -> ()
func.func @init_func1() attributes {tf_saved_model.exported_names = ["init_func1"], tf_saved_model.initializer_type = "init_op"} {
func.return
}
func.func @init_func2() attributes {tf_saved_model.exported_names = ["init_func2"], tf_saved_model.initializer_type = "restore_op"} {
func.return
}
}
)mlir";
Block block;
ModuleOp module_op = ParseModuleOp(kModuleOpStr, block, ctx_);
SmallVector<func::FuncOp, 2> init_func_ops =
GetInitializerFunctions(module_op);
EXPECT_THAT(init_func_ops, SizeIs(2));
EXPECT_THAT(init_func_ops[0].getSymName(), Eq("init_func1"));
EXPECT_THAT(init_func_ops[1].getSymName(), Eq("init_func2"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98b2faaa-6d2b-4983-9116-c31a4abe2d4e | cpp | tensorflow/tensorflow | call_graph_util | tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.cc | tensorflow/compiler/mlir/tensorflow/utils/call_graph_util_test.cc | #include <vector>
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
namespace mlir {
std::vector<llvm::StringRef> GetEntryFunctionAttributeNames() {
return {"tf.entry_function",
tf_saved_model::kTfSavedModelInitializerTypeAttr};
}
bool IsEntryFunction(func::FuncOp func) {
for (const auto &attr : GetEntryFunctionAttributeNames()) {
if (func->hasAttr(attr)) {
return true;
}
}
return false;
}
llvm::SmallVector<func::FuncOp> GetEntryFunctions(ModuleOp module) {
llvm::SmallVector<func::FuncOp> entry_funcs;
module.walk([&](func::FuncOp func) {
if (IsEntryFunction(func)) {
entry_funcs.push_back(func);
}
});
return entry_funcs;
}
LogicalResult GetCallees(SymbolUserOpInterface op, SymbolTable &symtab,
llvm::SmallVector<func::FuncOp> &callees) {
for (auto attr : op->getAttrs()) {
auto sym = mlir::dyn_cast<SymbolRefAttr>(attr.getValue());
if (!sym) continue;
auto callee = symtab.lookup<func::FuncOp>(sym.getRootReference());
if (!callee) {
return op->emitError()
<< "Cannot find function " << sym.getRootReference();
}
callees.push_back(callee);
}
return success();
}
bool HasSingleBlock(func::FuncOp func) {
return func->getNumRegions() == 1 && func.getBody().hasOneBlock();
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(CallGraphUtilTest, GetEntryFunctionAttributeNames) {
auto attr_names = mlir::GetEntryFunctionAttributeNames();
EXPECT_EQ(attr_names.size(), 2);
EXPECT_EQ(attr_names[0], "tf.entry_function");
EXPECT_EQ(attr_names[1],
mlir::tf_saved_model::kTfSavedModelInitializerTypeAttr);
}
TEST(CallGraphUtilTest, GetEntryFunctions) {
const char *const code = R"mlir(
func.func @entry_func_1(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @entry_func_2(%arg0: tensor<i32>) -> tensor<i32> attributes {tf_saved_model.initializer_type = ""} {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
func.return %arg0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
auto entry_funcs = GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 2);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func_1");
EXPECT_EQ(entry_funcs[1].getSymName(), "entry_func_2");
}
TEST(CallGraphUtilTest, GetCallees) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf_saved_model.initializer_type = ""} {
%0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1>
func.return %0 : tensor<i1>
}
func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.Const"() {value = dense<0> : tensor<i32>} : () -> tensor<i32>
func.return %0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> callees;
module->walk([&](mlir::SymbolUserOpInterface op) {
auto result = GetCallees(op, symtab, callees).succeeded();
ASSERT_TRUE(result);
EXPECT_EQ(callees.size(), 2);
EXPECT_EQ(callees[0].getSymName(), "while_body_func");
EXPECT_EQ(callees[1].getSymName(), "while_cond_func");
});
}
TEST(CallGraphUtilTest, GetFirstOpsOfType) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1>
func.return %0 : tensor<i1>
}
func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @outer_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @outer_stateful_pcall_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @inner_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @inner_stateful_pcall_func(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
func.return %arg0 : tensor<i32>
}
)mlir";
auto has_compile_device_type = [](mlir::SymbolUserOpInterface op) {
return op->hasAttr(tensorflow::kCompileDeviceTypeAttr);
};
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
llvm::SmallVector<mlir::SymbolUserOpInterface> outermost_pcall_ops;
auto result =
mlir::GetFirstOpsOfType<mlir::TF::StatefulPartitionedCallOp,
mlir::TF::PartitionedCallOp>(
entry_funcs[0], symtab, has_compile_device_type, outermost_pcall_ops)
.succeeded();
ASSERT_TRUE(result);
EXPECT_EQ(outermost_pcall_ops.size(), 1);
auto func =
llvm::dyn_cast<mlir::func::FuncOp>(outermost_pcall_ops[0]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "outer_stateful_pcall_func");
}
TEST(CallGraphUtilTest, GetOpsOfTypeUntilMiss) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
%0 = "tf.While"(%arg0) {cond = @while_cond_func, body = @while_body_func, is_stateless = true} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @while_cond_func(%arg0: tensor<i32>) -> tensor<i1> {
%0 = "tf.Const"() {value = dense<0> : tensor<i1>} : () -> tensor<i1>
func.return %0 : tensor<i1>
}
func.func @while_body_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @outer_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @outer_stateful_pcall_func(%arg0: tensor<i32>) -> (tensor<i32>) {
%0 = "tf.StatefulPartitionedCall"(%arg0) {config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @inner_stateful_pcall_func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @inner_stateful_pcall_func(%arg0: tensor<i32>) -> tensor<i32> {
%0 = "tf.StatefulPartitionedCall"(%arg0) {_xla_compile_device_type = "CPU", config = "", config_proto = "", device = "/device:CPU:0", executor_type = "", f = @func} : (tensor<i32>) -> (tensor<i32>)
func.return %0 : tensor<i32>
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
func.return %arg0 : tensor<i32>
}
)mlir";
auto has_no_compile_device_type = [](mlir::SymbolUserOpInterface op) {
return !op->hasAttr(tensorflow::kCompileDeviceTypeAttr);
};
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
llvm::SmallVector<mlir::SymbolUserOpInterface> noinline_pcall_ops,
outermost_pcall_ops;
auto result =
mlir::GetOpsOfTypeUntilMiss<mlir::TF::StatefulPartitionedCallOp,
mlir::TF::PartitionedCallOp>(
entry_funcs[0], symtab, has_no_compile_device_type,
noinline_pcall_ops, outermost_pcall_ops)
.succeeded();
ASSERT_TRUE(result);
EXPECT_EQ(noinline_pcall_ops.size(), 2);
auto func =
llvm::dyn_cast<mlir::func::FuncOp>(noinline_pcall_ops[0]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "while_body_func");
func =
llvm::dyn_cast<mlir::func::FuncOp>(noinline_pcall_ops[1]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "outer_stateful_pcall_func");
EXPECT_EQ(outermost_pcall_ops.size(), 1);
func =
llvm::dyn_cast<mlir::func::FuncOp>(outermost_pcall_ops[0]->getParentOp());
ASSERT_TRUE(func);
EXPECT_EQ(func.getSymName(), "inner_stateful_pcall_func");
}
TEST(CallGraphUtilTest, SingleBlockEntryFunction) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
func.return %arg0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
llvm::errs() << "module:\n";
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
EXPECT_TRUE(HasSingleBlock(entry_funcs[0]));
}
TEST(CallGraphUtilTest, MultipleBlocksEntryFunction) {
const char *const code = R"mlir(
func.func @entry_func(%arg0: tensor<i32>) -> tensor<i32> attributes {tf.entry_function = {}} {
cf.br ^bb1
^bb1:
func.return %arg0 : tensor<i32>
}
)mlir";
mlir::MLIRContext context;
context.loadDialect<mlir::cf::ControlFlowDialect, mlir::func::FuncDialect,
mlir::TF::TensorFlowDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
llvm::errs() << "module:\n";
ASSERT_TRUE(module);
mlir::SymbolTable symtab(*module);
llvm::SmallVector<mlir::func::FuncOp> entry_funcs =
GetEntryFunctions(*module);
EXPECT_EQ(entry_funcs.size(), 1);
EXPECT_EQ(entry_funcs[0].getSymName(), "entry_func");
EXPECT_FALSE(HasSingleBlock(entry_funcs[0]));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/call_graph_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/call_graph_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
536b4950-2778-4946-b56e-c6ad1c3e215f | cpp | tensorflow/tensorflow | bridge_logger | tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.cc | tensorflow/compiler/mlir/tensorflow/utils/bridge_logger_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include <atomic>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/str_split.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
namespace tensorflow {
static std::atomic<int> log_counter(0);
BridgeLoggerConfig::BridgeLoggerConfig(bool print_module_scope,
bool print_after_only_on_change,
mlir::OpPrintingFlags op_printing_flags)
: mlir::PassManager::IRPrinterConfig(
print_module_scope, print_after_only_on_change,
false, op_printing_flags),
pass_filter_(GetFilter("MLIR_BRIDGE_LOG_PASS_FILTER")),
string_filter_(GetFilter("MLIR_BRIDGE_LOG_STRING_FILTER")) {}
inline static void Log(BridgeLoggerConfig::PrintCallbackFn print_callback,
mlir::Pass* pass, mlir::Operation* op,
llvm::StringRef file_suffix) {
std::string pass_name = pass->getName().str();
std::string name = llvm::formatv("{0,0+4}_mlir_bridge_{1}_{2}", log_counter++,
pass_name, file_suffix);
std::unique_ptr<llvm::raw_ostream> os;
std::string filepath;
if (CreateFileForDumping(name, &os, &filepath).ok()) {
print_callback(*os);
LOG(INFO) << "Dumped MLIR module to " << filepath;
}
}
void BridgeLoggerConfig::printBeforeIfEnabled(mlir::Pass* pass,
mlir::Operation* op,
PrintCallbackFn print_callback) {
if (ShouldPrint(pass, op)) Log(print_callback, pass, op, "before");
}
void BridgeLoggerConfig::printAfterIfEnabled(mlir::Pass* pass,
mlir::Operation* op,
PrintCallbackFn print_callback) {
if (ShouldPrint(pass, op)) Log(print_callback, pass, op, "after");
}
std::vector<std::string> BridgeLoggerConfig::GetFilter(
const std::string& env_var) {
std::vector<std::string> filter;
const char* filter_str = getenv(env_var.c_str());
if (filter_str) {
filter = absl::StrSplit(filter_str, ';', absl::SkipWhitespace());
}
return filter;
}
bool BridgeLoggerConfig::MatchesFilter(const std::string& str,
const std::vector<std::string>& filter,
bool exact_match) {
if (filter.empty()) return true;
for (const std::string& filter_str : filter) {
if (str == filter_str) return true;
if (!exact_match && str.find(filter_str) != std::string::npos) return true;
}
return false;
}
bool BridgeLoggerConfig::ShouldPrint(mlir::Pass* pass, mlir::Operation* op) {
std::string pass_name = pass->getName().str();
if (!MatchesFilter(pass_name, pass_filter_, true)) {
VLOG(1) << "Not logging invocation of pass `" << pass_name
<< "` because the pass name does not match any string in "
"`MLIR_BRIDGE_LOG_PASS_FILTER`";
return false;
}
if (!string_filter_.empty()) {
std::string serialized_op;
llvm::raw_string_ostream os(serialized_op);
op->print(os);
if (!MatchesFilter(serialized_op, string_filter_, false)) {
VLOG(1) << "Not logging invocation of pass `" << pass_name
<< "` because the serialized operation on which the pass is "
"invoked does not contain any of the strings specified by "
"MLIR_BRIDGE_LOG_STRING_FILTER";
return false;
}
}
return true;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.h"
#include <memory>
#include "mlir/IR/MLIRContext.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static const char *const module_with_add =
R"(module {
func.func @main(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x4x5xf32>) -> tensor<3x4x5xf32> {
%0 = "tf.AddV2"(%arg0, %arg1) : (tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>
func.return %0 : tensor<3x4x5xf32>
}
}
)";
static const char *const module_with_sub =
R"(module {
func.func @main(%arg0: tensor<7x8x9xi8>, %arg1: tensor<7x8x9xi8>) -> tensor<7x8x9xi8> {
%0 = "tf.Sub"(%arg0, %arg1) : (tensor<7x8x9xi8>, tensor<7x8x9xi8>) -> tensor<7x8x9xi8>
func.return %0 : tensor<7x8x9xi8>
}
}
)";
TEST(BridgeLoggerFilters, TestPassFilter) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
std::unique_ptr<mlir::Pass> partitioning_pass =
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass();
std::unique_ptr<mlir::Pass> shape_inference_pass =
mlir::TF::CreateTFShapeInferencePass();
std::unique_ptr<mlir::Pass> inliner_pass = mlir::createInlinerPass();
setenv("MLIR_BRIDGE_LOG_PASS_FILTER",
"TPUResourceReadsWritesPartitioningPass;TensorFlowShapeInferencePass",
1);
BridgeLoggerConfig logger_config;
EXPECT_TRUE(logger_config.ShouldPrint(partitioning_pass.get(),
mlir_module_with_add.get()));
EXPECT_TRUE(logger_config.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
EXPECT_FALSE(logger_config.ShouldPrint(inliner_pass.get(),
mlir_module_with_add.get()));
}
TEST(BridgeLoggerFilters, TestStringFilter) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add, mlir_module_with_sub;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
TF_ASSERT_OK(DeserializeMlirModule(module_with_sub, &mlir_context,
&mlir_module_with_sub));
std::unique_ptr<mlir::Pass> dummy_pass =
mlir::TF::CreateTFShapeInferencePass();
setenv("MLIR_BRIDGE_LOG_STRING_FILTER", "func @main(%arg0: tensor;XXX", 1);
BridgeLoggerConfig logger_config1;
EXPECT_TRUE(
logger_config1.ShouldPrint(dummy_pass.get(), mlir_module_with_add.get()));
EXPECT_TRUE(
logger_config1.ShouldPrint(dummy_pass.get(), mlir_module_with_sub.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER", "func @main(%arg0:tensor;XXX", 1);
BridgeLoggerConfig logger_config2;
EXPECT_FALSE(
logger_config2.ShouldPrint(dummy_pass.get(), mlir_module_with_add.get()));
EXPECT_FALSE(
logger_config2.ShouldPrint(dummy_pass.get(), mlir_module_with_sub.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER",
"\"tf.AddV2\"(%arg0, %arg1) : (tensor<3x4x5xf32>", 1);
BridgeLoggerConfig logger_config3;
EXPECT_TRUE(
logger_config3.ShouldPrint(dummy_pass.get(), mlir_module_with_add.get()));
EXPECT_FALSE(
logger_config3.ShouldPrint(dummy_pass.get(), mlir_module_with_sub.get()));
}
TEST(BridgeLoggerFilters, TestBothFilters) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
std::unique_ptr<mlir::Pass> shape_inference_pass =
mlir::TF::CreateTFShapeInferencePass();
setenv("MLIR_BRIDGE_LOG_STRING_FILTER",
"(tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>", 1);
setenv("MLIR_BRIDGE_LOG_PASS_FILTER", "ensorFlowShapeInferencePass", 1);
BridgeLoggerConfig logger_config1;
EXPECT_FALSE(logger_config1.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER", "XXX", 1);
setenv("MLIR_BRIDGE_LOG_PASS_FILTER", "TensorFlowShapeInferencePass", 1);
BridgeLoggerConfig logger_config2;
EXPECT_FALSE(logger_config2.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
setenv("MLIR_BRIDGE_LOG_STRING_FILTER",
"(tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>", 1);
setenv("MLIR_BRIDGE_LOG_PASS_FILTER", "TensorFlowShapeInferencePass", 1);
BridgeLoggerConfig logger_config3;
EXPECT_TRUE(logger_config3.ShouldPrint(shape_inference_pass.get(),
mlir_module_with_add.get()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/bridge_logger.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/bridge_logger_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
71175d7d-af7a-41c7-8175-be39824b744d | cpp | tensorflow/tensorflow | tpu_rewrite_device_util | tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc | tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_structs.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/utils/string_container_utils.h"
#include "xla/array4d.h"
#include "xla/service/computation_placer.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
constexpr int kTPUTopologyRank = 4;
constexpr char kDeviceTPUSystem[] = "TPU_SYSTEM";
constexpr char kDeviceTPU[] = "TPU";
constexpr char kTPUReplicatedCore[] = "TPU_REPLICATED_CORE";
constexpr char kTPUReplicatedHost[] = "TPU_REPLICATED_HOST";
constexpr char kBadIntArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not an int";
using ParsedDevice = DeviceNameUtils::ParsedName;
using ParsedDevices = llvm::ArrayRef<DeviceNameUtils::ParsedName>;
namespace {
llvm::SmallVector<ParsedDevice, 8> FindMatchingDevices(
ParsedDevices devices, const ParsedDevice& spec) {
llvm::SmallVector<ParsedDevice, 8> matching_devices;
for (const auto& device : devices) {
if (DeviceNameUtils::IsCompleteSpecification(spec, device)) {
matching_devices.push_back(device);
}
}
return matching_devices;
}
template <typename T>
absl::Status MismatchedTPUSystemAttributeErr(absl::string_view attribute, T a,
T b) {
return absl::InvalidArgumentError(
absl::StrCat("found ", kDeviceTPUSystem, " devices with conflicting ",
attribute, "s '", a, "' and '", b, "'"));
}
absl::StatusOr<llvm::SmallVector<ParsedDevice, 8>> GetTPUSystemDevices(
ParsedDevices devices) {
ParsedDevice spec;
spec.type = kDeviceTPUSystem;
spec.has_type = true;
spec.id = 0;
spec.has_id = true;
llvm::SmallVector<ParsedDevice, 8> system_devices =
FindMatchingDevices(devices, spec);
if (system_devices.empty())
return absl::InvalidArgumentError(
absl::StrCat("no ", kDeviceTPUSystem, " devices found"));
const auto& job = system_devices[0].job;
auto replica = system_devices[0].replica;
for (const auto& device : llvm::make_range(std::next(system_devices.begin()),
system_devices.end())) {
if (device.job != job)
return MismatchedTPUSystemAttributeErr("job", job, device.job);
if (device.replica != replica)
return MismatchedTPUSystemAttributeErr("replica", replica,
device.replica);
}
std::sort(system_devices.begin(), system_devices.end(),
[](const ParsedDevice& a, const ParsedDevice& b) {
return a.task < b.task;
});
return system_devices;
}
absl::StatusOr<llvm::SmallVector<llvm::SmallVector<ParsedDevice, 8>, 8>>
GetTPUDevices(ParsedDevices devices,
llvm::ArrayRef<ParsedDevice> system_devices) {
llvm::SmallVector<llvm::SmallVector<ParsedDevice, 8>, 8> tpu_devices;
tpu_devices.reserve(system_devices.size());
auto lookup = [&devices](ParsedDevice device_spec) {
device_spec.has_type = true;
device_spec.type = kDeviceTPU;
device_spec.has_id = false;
llvm::SmallVector<ParsedDevice, 8> host_tpu_devices =
FindMatchingDevices(devices, device_spec);
std::sort(host_tpu_devices.begin(), host_tpu_devices.end(),
[](const ParsedDevice& i, const ParsedDevice& j) {
return i.id < j.id;
});
return host_tpu_devices;
};
int num_tpus_per_host = 0;
{
const auto& device = system_devices[0];
auto host_tpu_devices = lookup(device);
num_tpus_per_host = host_tpu_devices.size();
tpu_devices.push_back(std::move(host_tpu_devices));
}
for (const auto& device_spec : llvm::make_range(
std::next(system_devices.begin()), system_devices.end())) {
auto host_tpu_devices = lookup(device_spec);
const int64_t host_tpu_devices_size = host_tpu_devices.size();
if (num_tpus_per_host != host_tpu_devices_size)
return absl::InvalidArgumentError(
absl::StrCat("expected the number of TPU devices per host to be ",
num_tpus_per_host, ", got ", host_tpu_devices.size()));
tpu_devices.push_back(std::move(host_tpu_devices));
}
return tpu_devices;
}
std::string GetTPUCompilationDevice(ParsedDevice system_device) {
system_device.type = tensorflow::DEVICE_CPU;
return DeviceNameUtils::ParsedNameToString(system_device);
}
absl::StatusOr<std::string> GetCPUHostDeviceForTPUDevice(
ParsedDevice tpu_device, ParsedDevices devices) {
tpu_device.type = DEVICE_CPU;
bool enable_multiple_local_cpu_devices =
tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_multiple_local_cpu_devices;
if (!enable_multiple_local_cpu_devices) {
tpu_device.id = 0;
}
if (FindMatchingDevices(devices, tpu_device).empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Can't find device: ", DeviceNameUtils::ParsedNameToString(tpu_device),
" in the devices list."));
}
return DeviceNameUtils::ParsedNameToString(tpu_device);
}
absl::StatusOr<TPUDevicesAndHosts> GetFullMeshTPUExecutionDeviceAssignment(
int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<llvm::SmallVector<ParsedDevice, 8>> tpu_devices,
ParsedDevices devices) {
const int num_tasks = tpu_devices.size();
const int num_tpus_per_task = tpu_devices[0].size();
const int num_tpu_devices = num_tasks * num_tpus_per_task;
if (num_replicas != 1 && num_replicas != num_tpu_devices)
return absl::InvalidArgumentError(
absl::StrCat("'num_replicas' must be equal to 1 or ", num_tpu_devices,
", got ", num_replicas));
if (num_cores_per_replica != 1)
return absl::InvalidArgumentError(
absl::StrCat("'num_cores_per_replica' must be equal to 1, got ",
num_cores_per_replica));
TPUDevicesAndHosts devices_and_hosts;
devices_and_hosts.reserve(num_replicas);
for (int i = 0; i < num_replicas; ++i) {
const int task = i / num_tpus_per_task;
const int device = i % num_tpus_per_task;
const auto& tpu_device = tpu_devices[task][device];
devices_and_hosts.push_back({TPUDeviceAndHost(
tensorflow::DeviceNameUtils::ParsedNameToString(tpu_device),
*GetCPUHostDeviceForTPUDevice(tpu_device, devices))});
}
return devices_and_hosts;
}
struct TaskAndDevice {
TaskAndDevice() = default;
TaskAndDevice(int task, int device) : task(task), device(device) {}
int task = -1;
int device = -1;
};
bool DeviceCoordinateOutOfBound(int x, int y, int z, int core, int bound_x,
int bound_y, int bound_z, int bound_core) {
return x < 0 || x >= bound_x || y < 0 || y >= bound_y || z < 0 ||
z >= bound_z || core < 0 || core >= bound_core;
}
absl::Status DeviceCoordinateErrorMsg(absl::string_view attribute, int x, int y,
int z, int core, int bound_x, int bound_y,
int bound_z, int bound_core) {
return absl::InvalidArgumentError(
absl::StrCat("device coordinate (", x, ", ", y, ", ", z, ", ", core,
") in '", attribute, "' is outside of mesh shape (", bound_x,
", ", bound_y, ", ", bound_z, ", ", bound_core, ")"));
}
absl::Status DuplicateCoordinateErrorMsg(absl::string_view attribute, int x,
int y, int z, int core) {
return absl::InvalidArgumentError(
absl::StrCat("'", attribute, "' has duplicate device coordinate (", x,
", ", y, ", ", z, ", ", core, ")"));
}
absl::StatusOr<xla::Array4D<TaskAndDevice>> ParseTopologyAttr(
llvm::StringRef topology_attr, int num_tasks, int num_tpus_per_task) {
tpu::TopologyProto topology_proto;
if (!topology_proto.ParseFromString(topology_attr.str()))
return absl::InvalidArgumentError(absl::StrCat(
"failed to parse '", kTopologyAttr, "' attribute to TopologyProto"));
if (topology_proto.mesh_shape_size() != kTPUTopologyRank)
return absl::InvalidArgumentError(absl::StrCat(
"'", kTopologyAttr, "' 'mesh_shape' must be rank ", kTPUTopologyRank,
", got rank ", topology_proto.mesh_shape_size()));
for (auto mesh_shape_dim : llvm::enumerate(topology_proto.mesh_shape()))
if (mesh_shape_dim.value() <= 0)
return absl::InvalidArgumentError(
absl::StrCat("'", kTopologyAttr, "' 'mesh_shape' dimension ",
mesh_shape_dim.index(), " must be positive, got ",
mesh_shape_dim.value()));
if (topology_proto.num_tasks() != num_tasks)
return absl::InvalidArgumentError(absl::StrCat(
"number of tasks from available TPU devices must be 'num_tasks' in '",
kTopologyAttr, "' (", topology_proto.num_tasks(), "), got ",
num_tasks));
if (topology_proto.num_tpu_devices_per_task() != num_tpus_per_task)
return absl::InvalidArgumentError(absl::StrCat(
"number of TPU devices available per task must be "
"'num_tpu_devices_per_task' in '",
kTopologyAttr, "' (", topology_proto.num_tpu_devices_per_task(),
"), got ", num_tpus_per_task));
const int expected_device_coordinates_size =
num_tasks * num_tpus_per_task * kTPUTopologyRank;
if (topology_proto.device_coordinates_size() !=
expected_device_coordinates_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of 'device_coordinates' in '", kTopologyAttr,
"' must be 'num_tasks' * 'num_tpus_per_task' * ", kTPUTopologyRank,
" (", num_tasks, " * ", num_tpus_per_task, " * ", kTPUTopologyRank,
"), got ", topology_proto.device_coordinates_size()));
const int bound_x = topology_proto.mesh_shape(0);
const int bound_y = topology_proto.mesh_shape(1);
const int bound_z = topology_proto.mesh_shape(2);
const int bound_core = topology_proto.mesh_shape(3);
xla::Array4D<TaskAndDevice> topology(bound_x, bound_y, bound_z, bound_core);
int pos = 0;
for (int task = 0; task < num_tasks; ++task) {
for (int device = 0; device < num_tpus_per_task; ++device) {
int x = topology_proto.device_coordinates(pos++);
int y = topology_proto.device_coordinates(pos++);
int z = topology_proto.device_coordinates(pos++);
int core = topology_proto.device_coordinates(pos++);
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kTopologyAttr, x, y, z, core, bound_x,
bound_y, bound_z, bound_core);
auto& task_and_device = topology(x, y, z, core);
if (task_and_device.task != -1)
return DuplicateCoordinateErrorMsg(kTopologyAttr, x, y, z, core);
task_and_device = {task, device};
}
}
return topology;
}
absl::StatusOr<std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>>
GetGeneralTPUExecutionDeviceAssignment(
int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<llvm::SmallVector<ParsedDevice, 8>> tpu_devices,
ParsedDevices devices, llvm::StringRef topology_attr,
llvm::ArrayRef<int64_t> device_assignment_attr) {
const int num_tasks = tpu_devices.size();
const int num_tpus_per_task = tpu_devices[0].size();
TF_ASSIGN_OR_RETURN(auto topology, ParseTopologyAttr(topology_attr, num_tasks,
num_tpus_per_task));
const int expected_device_assignment_size =
num_replicas * num_cores_per_replica * kTPUTopologyRank;
const int device_assignment_attr_size = device_assignment_attr.size();
if (device_assignment_attr_size != expected_device_assignment_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of '", kDeviceAssignmentAttr,
"' must be 'num_replicas' * 'num_cores_per_replica' * ",
kTPUTopologyRank, " (", num_replicas, " * ", num_cores_per_replica,
" * ", kTPUTopologyRank, "), got ", device_assignment_attr.size()));
const int bound_x = topology.n1();
const int bound_y = topology.n2();
const int bound_z = topology.n3();
const int bound_core = topology.n4();
auto location_to_id = [&](int x, int y, int z, int core) {
return (x + bound_x * (y + bound_y * z)) * bound_core + core;
};
std::vector<bool> used_device_ids(bound_x * bound_y * bound_z * bound_core,
false);
TPUDevicesAndHosts devices_and_hosts(
num_replicas, llvm::SmallVector<TPUDeviceAndHost, 8>(
num_cores_per_replica, TPUDeviceAndHost()));
xla::DeviceAssignment device_assignment(num_replicas, num_cores_per_replica);
int pos = 0;
for (int replica = 0; replica < num_replicas; ++replica) {
for (int logical_core = 0; logical_core < num_cores_per_replica;
++logical_core) {
int x = device_assignment_attr[pos++];
int y = device_assignment_attr[pos++];
int z = device_assignment_attr[pos++];
int core = device_assignment_attr[pos++];
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z, core,
bound_x, bound_y, bound_z, bound_core);
TaskAndDevice task_and_device = topology(x, y, z, core);
const int task = task_and_device.task;
const int device = task_and_device.device;
if (task == -1 || device == -1)
return absl::InvalidArgumentError(absl::StrCat(
"no TPU device found for '", kDeviceAssignmentAttr,
"' device coordinate (", x, ", ", y, ", ", z, ", ", core, ")"));
const int device_id = location_to_id(x, y, z, core);
if (used_device_ids[device_id])
return DuplicateCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z,
core);
used_device_ids[device_id] = true;
device_assignment(replica, logical_core) = device_id;
auto& device_and_host = devices_and_hosts[replica][logical_core];
const auto& tpu_device = tpu_devices[task][device];
device_and_host.device = DeviceNameUtils::ParsedNameToString(tpu_device);
device_and_host.host = *GetCPUHostDeviceForTPUDevice(tpu_device, devices);
}
}
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
return std::pair<TPUDevicesAndHosts, xla::DeviceAssignmentProto>(
std::move(devices_and_hosts), std::move(device_assignment_proto));
}
mlir::LogicalResult GetTopology(mlir::tf_device::ClusterOp cluster,
std::string& topology) {
mlir::StringAttr topology_attr =
cluster->getAttrOfType<mlir::StringAttr>(tensorflow::kTopologyAttr);
if (topology_attr) {
topology = topology_attr.getValue();
return mlir::success();
} else {
return cluster.emitOpError(
llvm::formatv("requires attribute '{0}'", tensorflow::kTopologyAttr)
.str());
}
}
mlir::LogicalResult GetDeviceAssignmentCoordinates(
mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<int64_t, 8>& device_coordinates) {
mlir::ArrayAttr device_assignment_attr =
cluster->getAttrOfType<mlir::ArrayAttr>(
tensorflow::kDeviceAssignmentAttr);
if (!device_assignment_attr)
return cluster.emitOpError(llvm::formatv("requires attribute '{0}'",
tensorflow::kDeviceAssignmentAttr)
.str());
if (absl::StatusOr<llvm::SmallVector<int64_t, 8>> fetched_device_coordinates =
tensorflow::GetDeviceCoordinates(device_assignment_attr);
fetched_device_coordinates.ok()) {
device_coordinates = *fetched_device_coordinates;
return mlir::success();
} else {
return cluster.emitError() << "error in fetching tpu device coordinates: "
<< fetched_device_coordinates.status().message();
}
}
int GetNumCoresPerReplica(mlir::tf_device::ClusterOp cluster) {
mlir::IntegerAttr num_cores_per_replica_attr =
cluster->getAttrOfType<mlir::IntegerAttr>(kNumCoresPerReplicaAttr);
if (num_cores_per_replica_attr) {
return num_cores_per_replica_attr.getInt();
} else {
return 1;
}
}
mlir::LogicalResult GetTPUDevicesAndHostsNotReplicated(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
tensorflow::TPUDevicesAndHosts& devices_and_hosts) {
std::string topology;
if (failed(GetTopology(cluster, topology))) {
return mlir::failure();
}
llvm::SmallVector<int64_t, 8> device_coordinates;
if (failed(GetDeviceAssignmentCoordinates(cluster, device_coordinates))) {
return mlir::failure();
}
if (absl::StatusOr<TPUDeviceAssignment> tpu_device_assignment =
tensorflow::GetTPUCompilationAndExecutionDevices(
devices.device_names(), 1,
GetNumCoresPerReplica(cluster), topology, device_coordinates);
tpu_device_assignment.ok()) {
devices_and_hosts = tpu_device_assignment->tpu_devices;
return mlir::success();
} else {
return cluster.emitError()
<< "error in fetching TPU compilation/execution devices: "
<< tpu_device_assignment.status().message();
}
}
mlir::LogicalResult GetHostDeviceOCInTPUPipeline(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
std::string& host_device) {
mlir::tf_device::ReplicateOp replicate =
cluster->getParentOfType<mlir::tf_device::ReplicateOp>();
if (replicate) {
host_device = GetDeviceAliasForHostOfLogicalCore(0);
return mlir::success();
}
tensorflow::TPUDevicesAndHosts devices_and_hosts;
if (failed(GetTPUDevicesAndHostsNotReplicated(devices, cluster,
devices_and_hosts))) {
return mlir::failure();
} else {
host_device = devices_and_hosts[0][0].host;
return mlir::success();
}
}
llvm::SmallVector<std::string, 8> GetTPUToHostMapReplicated(
mlir::tf_device::ClusterOp cluster) {
int num_cores_per_replica = GetNumCoresPerReplica(cluster);
llvm::SmallVector<std::string, 8> core_to_host;
core_to_host.reserve(num_cores_per_replica);
for (int core = 0; core < num_cores_per_replica; ++core) {
core_to_host.push_back(GetDeviceAliasForHostOfLogicalCore(core));
}
return core_to_host;
}
mlir::LogicalResult GetTPUToHostMapNotReplicated(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host) {
tensorflow::TPUDevicesAndHosts devices_and_hosts;
if (failed(GetTPUDevicesAndHostsNotReplicated(devices, cluster,
devices_and_hosts))) {
return mlir::failure();
}
core_to_host.reserve(GetNumCoresPerReplica(cluster));
for (const auto& device_and_host : devices_and_hosts[0]) {
core_to_host.push_back(device_and_host.host);
}
return mlir::success();
}
mlir::LogicalResult GetTPUToHostMap(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host) {
if (cluster->getParentOfType<mlir::tf_device::ReplicateOp>()) {
core_to_host = GetTPUToHostMapReplicated(cluster);
return mlir::success();
}
return GetTPUToHostMapNotReplicated(devices, cluster, core_to_host);
}
}
absl::StatusOr<llvm::SmallVector<int64_t, 8>> GetDeviceCoordinates(
mlir::ArrayAttr device_assignment_attr) {
llvm::SmallVector<int64_t, 8> device_coordinates;
device_coordinates.reserve(device_assignment_attr.size());
for (auto device_coordinate_and_idx :
llvm::enumerate(device_assignment_attr)) {
auto device_coordinate =
mlir::dyn_cast<mlir::IntegerAttr>(device_coordinate_and_idx.value());
if (!device_coordinate)
return absl::InvalidArgumentError(
llvm::formatv(kBadIntArrayElementMsg, kDeviceAssignmentAttr,
device_coordinate_and_idx.index())
.str());
device_coordinates.push_back(device_coordinate.getInt());
}
return device_coordinates;
}
absl::StatusOr<xla::DeviceAssignmentProto> GetXlaDeviceAssignmentProto(
llvm::StringRef topology_attr, int num_replicas, int num_cores_per_replica,
llvm::ArrayRef<int64_t> device_assignment_attr) {
tpu::TopologyProto topology_proto;
if (!topology_proto.ParseFromString(topology_attr.str()))
return absl::InvalidArgumentError(absl::StrCat(
"failed to parse '", kTopologyAttr, "' attribute to TopologyProto"));
if (topology_proto.mesh_shape_size() < 4) {
return absl::InvalidArgumentError(absl::StrCat(
"The size of mesh_shape must be larger than or equal to 4, but got ",
topology_proto.mesh_shape_size()));
}
const int bound_x = topology_proto.mesh_shape(0);
const int bound_y = topology_proto.mesh_shape(1);
const int bound_z = topology_proto.mesh_shape(2);
const int bound_core = topology_proto.mesh_shape(3);
const int expected_device_assignment_size =
num_replicas * num_cores_per_replica * kTPUTopologyRank;
const int device_assignment_attr_size = device_assignment_attr.size();
if (device_assignment_attr_size != expected_device_assignment_size)
return absl::InvalidArgumentError(absl::StrCat(
"length of '", kDeviceAssignmentAttr,
"' must be 'num_replicas' * 'num_cores_per_replica' * ",
kTPUTopologyRank, " (", num_replicas, " * ", num_cores_per_replica,
" * ", kTPUTopologyRank, "), got ", device_assignment_attr.size()));
auto location_to_id = [&](int x, int y, int z, int core) {
return (x + bound_x * (y + bound_y * z)) * bound_core + core;
};
std::vector<bool> used_device_ids(bound_x * bound_y * bound_z * bound_core,
false);
xla::DeviceAssignment device_assignment(num_replicas, num_cores_per_replica);
int pos = 0;
for (int replica = 0; replica < num_replicas; ++replica) {
for (int logical_core = 0; logical_core < num_cores_per_replica;
++logical_core) {
int x = device_assignment_attr[pos++];
int y = device_assignment_attr[pos++];
int z = device_assignment_attr[pos++];
int core = device_assignment_attr[pos++];
if (DeviceCoordinateOutOfBound(x, y, z, core, bound_x, bound_y, bound_z,
bound_core))
return DeviceCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z, core,
bound_x, bound_y, bound_z, bound_core);
const int device_id = location_to_id(x, y, z, core);
if (used_device_ids[device_id])
return DuplicateCoordinateErrorMsg(kDeviceAssignmentAttr, x, y, z,
core);
used_device_ids[device_id] = true;
device_assignment(replica, logical_core) = device_id;
}
}
xla::DeviceAssignmentProto device_assignment_proto;
device_assignment.Serialize(&device_assignment_proto);
return device_assignment_proto;
}
absl::StatusOr<TPUDeviceAssignment> GetTPUCompilationAndExecutionDevices(
ParsedDevices devices, int num_replicas, int num_cores_per_replica,
llvm::StringRef topology_attr,
llvm::ArrayRef<int64_t> device_assignment_attr) {
TF_ASSIGN_OR_RETURN(auto system_devices, GetTPUSystemDevices(devices));
TF_ASSIGN_OR_RETURN(auto tpu_devices, GetTPUDevices(devices, system_devices));
std::string compilation_device = GetTPUCompilationDevice(system_devices[0]);
if (topology_attr.empty()) {
if (!device_assignment_attr.empty())
return absl::InvalidArgumentError(
absl::StrCat("'", kDeviceAssignmentAttr, "' must not be set when '",
kTopologyAttr, "' is not set"));
TF_ASSIGN_OR_RETURN(
auto execution_devices,
GetFullMeshTPUExecutionDeviceAssignment(
num_replicas, num_cores_per_replica, tpu_devices, devices));
return TPUDeviceAssignment(compilation_device,
std::move(execution_devices));
}
TF_ASSIGN_OR_RETURN(auto devices_and_ids,
GetGeneralTPUExecutionDeviceAssignment(
num_replicas, num_cores_per_replica, tpu_devices,
devices, topology_attr, device_assignment_attr));
return TPUDeviceAssignment(compilation_device,
std::move(devices_and_ids.first),
std::move(devices_and_ids.second));
}
std::string GetDeviceAliasForLogicalCore(const int core_index) {
return llvm::formatv("{0}_{1}", kTPUReplicatedCore, core_index).str();
}
std::string GetDeviceAliasForHostOfLogicalCore(const int core_index) {
return llvm::formatv("{0}_{1}", kTPUReplicatedHost, core_index).str();
}
bool HasModelParallelism(mlir::tf_device::ClusterOp cluster) {
mlir::IntegerAttr num_cores_per_replica_attr =
cluster->getAttrOfType<mlir::IntegerAttr>(
tensorflow::kNumCoresPerReplicaAttr);
if (!num_cores_per_replica_attr) return false;
return num_cores_per_replica_attr.getInt() != 1;
}
bool HasTPUDevice(const mlir::TF::RuntimeDevices& devices) {
for (const auto& device : devices.device_names()) {
if (device.has_type && device.type == "TPU") return true;
}
return false;
}
mlir::LogicalResult GetHostDeviceOutsideCompilationInGenericPipeline(
mlir::TF::RuntimeDevices devices, std::string* host_device) {
for (const auto& device : devices.device_names()) {
if (device.has_type && device.type == "CPU" && device.id == 0) {
if (!host_device->empty()) {
LOG(WARNING) << "Found multiple CPU:0 host devices";
if (device.job == "chief")
*host_device =
tensorflow::DeviceNameUtils::ParsedNameToString(device);
continue;
}
*host_device = tensorflow::DeviceNameUtils::ParsedNameToString(device);
}
}
if (host_device->empty()) {
LOG(ERROR) << "Did not find any CPU:0 host devices";
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetHostDeviceOutsideComputation(
mlir::TF::RuntimeDevices devices, mlir::tf_device::ClusterOp cluster,
std::string* host_device) {
if (HasTPUDevice(devices) ||
cluster->getParentOfType<mlir::tf_device::ReplicateOp>()) {
return GetHostDeviceOCInTPUPipeline(devices, cluster, *host_device);
} else {
return GetHostDeviceOutsideCompilationInGenericPipeline(devices,
host_device);
}
}
bool IsTPUDevice(llvm::StringRef device) {
ParsedDevice parsed_device;
if (!DeviceNameUtils::ParseFullName(mlir::StringRefToView(device),
&parsed_device))
return false;
return parsed_device.has_type && parsed_device.type == kDeviceTPU;
}
bool IsTPUReplicatedCore(llvm::StringRef device) {
ParsedDevice parsed_device;
if (!DeviceNameUtils::ParseFullName(mlir::StringRefToView(device),
&parsed_device))
return false;
return parsed_device.has_type && parsed_device.type == kTPUReplicatedCore;
}
bool TypeValidForXLA(const mlir::Type& type) {
const mlir::Type elem = getElementTypeOrSelf(type);
return !mlir::isa<mlir::TF::ResourceType>(elem) &&
!mlir::isa<mlir::TF::StringType>(elem);
}
mlir::LogicalResult GetDeviceToHostMap(
mlir::tf_device::ClusterOp cluster,
llvm::SmallVector<std::string, 8>& core_to_host) {
mlir::TF::RuntimeDevices devices;
if (failed(tensorflow::GetDevicesFromOp(
cluster->getParentOfType<mlir::ModuleOp>(), &devices))) {
return mlir::failure();
}
if (tensorflow::HasTPUDevice(devices) ||
cluster->getParentOfType<mlir::tf_device::ReplicateOp>()) {
return GetTPUToHostMap(devices, cluster, core_to_host);
}
std::string host_device;
if (failed(tensorflow::GetHostDeviceOutsideCompilationInGenericPipeline(
devices, &host_device))) {
return mlir::failure();
} else {
core_to_host.push_back(host_device);
return mlir::success();
}
}
mlir::LogicalResult GetNonReplicatedTPU0(mlir::Operation* op,
std::string* tpu0_device) {
mlir::ModuleOp moduleOp = op->getParentOfType<mlir::ModuleOp>();
mlir::TF::RuntimeDevices devices;
if (failed(tensorflow::GetDevicesFromOp(moduleOp, &devices)))
return moduleOp.emitOpError() << "No available devices.";
llvm::ArrayRef<tensorflow::DeviceNameUtils::ParsedName> device_names =
devices.device_names();
auto status_or_system_devices = GetTPUSystemDevices(device_names);
if (!status_or_system_devices.ok())
return moduleOp.emitOpError()
<< "error in fetching TPU_SYSTEM devices: "
<< status_or_system_devices.status().message();
auto status_or_tpu_devices =
GetTPUDevices(device_names, status_or_system_devices.value());
if (!status_or_tpu_devices.ok())
return moduleOp.emitOpError() << "error in fetching TPU devices: "
<< status_or_tpu_devices.status().message();
*tpu0_device =
DeviceNameUtils::ParsedNameToString(status_or_tpu_devices.value()[0][0]);
return mlir::success();
}
mlir::LogicalResult GetNonReplicatedCPU0(mlir::Operation* op,
std::string* cpu0_device) {
std::string tpu0_device;
if (failed(tensorflow::GetNonReplicatedTPU0(op, &tpu0_device)))
return mlir::failure();
auto status = tensorflow::DeviceNameUtils::DeviceNameToCpuDeviceName(
tpu0_device, cpu0_device);
if (!status.ok())
return op->emitError()
<< "error in converting TPU0 to CPU0. The TPU device is "
<< tpu0_device;
return mlir::success();
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.h"
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
using Device = DeviceNameUtils::ParsedName;
bool DeviceNamesToParsedNames(llvm::ArrayRef<std::string> device_names,
llvm::SmallVectorImpl<Device>* parsed_devices) {
parsed_devices->reserve(device_names.size());
for (const auto& device_name : device_names) {
Device parsed_name;
if (!DeviceNameUtils::ParseFullName(device_name, &parsed_name))
return false;
parsed_devices->push_back(parsed_name);
}
return true;
}
using DeviceNames = llvm::SmallVector<std::string, 8>;
struct ParameterizedDeviceSetTest
: ::testing::TestWithParam<std::tuple<DeviceNames, std::string>> {};
TEST_P(ParameterizedDeviceSetTest, BadDeviceSet) {
llvm::SmallVector<Device, 8> devices;
ASSERT_TRUE(DeviceNamesToParsedNames(std::get<0>(GetParam()), &devices));
std::string topology_attr;
std::vector<int64_t> device_assignment_attr;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 1, 1, topology_attr,
device_assignment_attr);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(), std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
BadDeviceSet, ParameterizedDeviceSetTest,
::testing::Values(
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:CPU:0"},
"no TPU_SYSTEM devices found"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:0/device:TPU_SYSTEM:0"},
"found TPU_SYSTEM devices with conflicting jobs 'localhost' and "
"'worker'"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:1/task:0/device:TPU_SYSTEM:0"},
"found TPU_SYSTEM devices with conflicting replicas '0' and '1'"),
std::make_tuple<DeviceNames, std::string>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:TPU:1",
"/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:1/device:TPU:0"},
"expected the number of TPU devices per host to be 2, got 1")));
struct ParameterizedMetadataTest
: ::testing::TestWithParam<std::tuple<int, int, std::string,
std::vector<int64_t>, std::string>> {
};
TEST_P(ParameterizedMetadataTest, BadMetadata) {
llvm::SmallVector<Device, 8> devices;
ASSERT_TRUE(DeviceNamesToParsedNames(
{"/job:worker/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:0/device:TPU:0",
"/job:worker/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:1/device:TPU_SYSTEM:0",
"/job:worker/replica:0/task:1/device:TPU:0",
"/job:worker/replica:0/task:1/device:CPU:0"},
&devices));
std::string compilation_device;
llvm::SmallVector<llvm::SmallVector<std::string, 8>, 8> execution_devices;
std::optional<xla::DeviceAssignmentProto> xla_device_assignment;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, std::get<0>(GetParam()), std::get<1>(GetParam()),
std::get<2>(GetParam()), std::get<3>(GetParam()));
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(), std::get<4>(GetParam()));
}
std::string TopologyWithMeshShape(llvm::ArrayRef<int> mesh_shape) {
tpu::TopologyProto topology_proto;
for (int mesh_dim : mesh_shape) topology_proto.add_mesh_shape(mesh_dim);
return topology_proto.SerializeAsString();
}
std::string TopologyWithMeshShapeAndTasks(llvm::ArrayRef<int> mesh_shape,
int num_tasks,
int num_tpu_devices_per_task) {
tpu::TopologyProto topology_proto;
for (int mesh_dim : mesh_shape) topology_proto.add_mesh_shape(mesh_dim);
topology_proto.set_num_tasks(num_tasks);
topology_proto.set_num_tpu_devices_per_task(num_tpu_devices_per_task);
return topology_proto.SerializeAsString();
}
std::string TopologyWithDeviceCoordinates(
llvm::ArrayRef<int> device_coordinates) {
tpu::TopologyProto topology_proto;
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.set_num_tasks(2);
topology_proto.set_num_tpu_devices_per_task(1);
for (int device_coordinate : device_coordinates)
topology_proto.add_device_coordinates(device_coordinate);
return topology_proto.SerializeAsString();
}
INSTANTIATE_TEST_SUITE_P(
BadFullMeshMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(
2, 1, "", std::vector<int64_t>{0},
"'device_assignment' must not be set when 'topology' is not set"),
std::make_tuple(8, 1, "", std::vector<int64_t>(),
"'num_replicas' must be equal to 1 or 2, got 8"),
std::make_tuple(2, 2, "", std::vector<int64_t>(),
"'num_cores_per_replica' must be equal to 1, got 2")));
INSTANTIATE_TEST_SUITE_P(
BadGeneralTopologyMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(
2, 1, "BAD_TOPOLOGY", std::vector<int64_t>(),
"failed to parse 'topology' attribute to TopologyProto"),
std::make_tuple(4, 2, TopologyWithMeshShape({0}),
std::vector<int64_t>(),
"'topology' 'mesh_shape' must be rank 4, got rank 1"),
std::make_tuple(
2, 1, TopologyWithMeshShape({2, 0, 1, 2}), std::vector<int64_t>(),
"'topology' 'mesh_shape' dimension 1 must be positive, got 0"),
std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 1, 1),
std::vector<int64_t>(),
"number of tasks from available TPU devices must be "
"'num_tasks' in 'topology' (1), got 2"),
std::make_tuple(2, 1, TopologyWithMeshShapeAndTasks({1, 1, 1, 1}, 2, 2),
std::vector<int64_t>(),
"number of TPU devices available per task must be "
"'num_tpu_devices_per_task' in 'topology' (2), got 1"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({}), std::vector<int64_t>(),
"length of 'device_coordinates' in 'topology' must be 'num_tasks' "
"* 'num_tpus_per_task' * 4 (2 * 1 * 4), got 0"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({-1, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (-1, 0, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({2, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (2, 0, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, -1, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, -1, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 1, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 1, 0, 0) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, -1, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 0, 0, -1) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 1, 1, 0, 0, 0}),
std::vector<int64_t>(),
"device coordinate (0, 0, 0, 1) in 'topology' is outside "
"of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 0, 0, 0, 0}),
std::vector<int64_t>(),
"'topology' has duplicate device coordinate (0, 0, 0, 0)")));
INSTANTIATE_TEST_SUITE_P(
BadGeneralDeviceAssignmentMetadata, ParameterizedMetadataTest,
::testing::Values(
std::make_tuple(2, 1,
TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>(),
"length of 'device_assignment' must be 'num_replicas' "
"* 'num_cores_per_replica' * 4 (2 * 1 * 4), got 0"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{-1, 0, 0, 0, 0, 0, 0, 0},
"device coordinate (-1, 0, 0, 0) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{2, 0, 0, 0, 0, 0, 0, 0},
"device coordinate (2, 0, 0, 0) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, -1, 0, 0, 0, 0, 0, 0},
"device coordinate (0, -1, 0, 0) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 1, 0, 0, 0, 0, 0, 0},
"device coordinate (0, 1, 0, 0) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, -1, 0, 0, 0, 0},
"device coordinate (0, 0, 0, -1) in 'device_assignment' "
"is outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(
2, 1, TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, 1, 0, 0, 0, 0},
"device coordinate (0, 0, 0, 1) in 'device_assignment' is "
"outside of mesh shape (2, 1, 1, 1)"),
std::make_tuple(2, 1,
TopologyWithDeviceCoordinates({0, 0, 0, 0, 1, 0, 0, 0}),
std::vector<int64_t>{0, 0, 0, 0, 0, 0, 0, 0},
"'device_assignment' has duplicate device coordinate "
"(0, 0, 0, 0)")));
std::vector<std::string> MakeDeviceSet(int num_tasks,
int num_devices_per_task) {
std::vector<std::string> devices{
"/job:localhost/replica:0/task:0/device:CPU:0"};
devices.reserve(num_tasks * num_devices_per_task + num_tasks + 1);
for (int task = 0; task < num_tasks; ++task) {
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:CPU:0", task)
.str());
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:TPU_SYSTEM:0",
task)
.str());
for (int device = 0; device < num_devices_per_task; ++device)
devices.push_back(
llvm::formatv("/job:worker/replica:0/task:{0}/device:TPU:{1}", task,
device)
.str());
}
return devices;
}
TEST(TPURewriteDeviceUtilTest,
BadGeneralDeviceAssignmentMetadataMissingDevice) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(1);
topology_proto.set_num_tasks(1);
topology_proto.set_num_tpu_devices_per_task(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{1, 0, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(1, 1);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 1, 1, topology_attr,
device_assignment_attr);
ASSERT_FALSE(status_or.ok());
EXPECT_EQ(status_or.status().message(),
"no TPU device found for 'device_assignment' device coordinate (1, "
"0, 0, 0)");
}
TEST(TPURewriteDeviceUtilTest, ValidFullMeshDeviceAssignment) {
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(2, 4);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
std::string topology_attr;
std::vector<int64_t> device_assignment_attr;
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 8, 1, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
const auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
const auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 8);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 1);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[2][0].device,
"/job:worker/replica:0/task:0/device:TPU:2");
EXPECT_EQ(tpu_devices[2][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[3][0].device,
"/job:worker/replica:0/task:0/device:TPU:3");
EXPECT_EQ(tpu_devices[3][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[4][0].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[4][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[5][0].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[5][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[6][0].device,
"/job:worker/replica:0/task:1/device:TPU:2");
EXPECT_EQ(tpu_devices[6][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[7][0].device,
"/job:worker/replica:0/task:1/device:TPU:3");
EXPECT_EQ(tpu_devices[7][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_FALSE(tpu_device_assignment.xla_device_assignment.has_value());
}
TEST(TPURewriteDeviceUtilTest, ValidGeneralDeviceAssignmentMesh2x2x2) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.set_num_tasks(2);
topology_proto.set_num_tpu_devices_per_task(4);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0,
0, 1, 1, 1, 0, 0, 1, 1, 0, 1};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(2, 4);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 4, 2, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
const auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
const auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 4);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 2);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[0][1].device,
"/job:worker/replica:0/task:1/device:TPU:3");
EXPECT_EQ(tpu_devices[0][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][1].device,
"/job:worker/replica:0/task:1/device:TPU:2");
EXPECT_EQ(tpu_devices[1][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[2][0].device,
"/job:worker/replica:0/task:0/device:TPU:3");
EXPECT_EQ(tpu_devices[2][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[2][1].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[2][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[3][0].device,
"/job:worker/replica:0/task:0/device:TPU:2");
EXPECT_EQ(tpu_devices[3][0].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[3][1].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[3][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
auto& xla_device_assignment = tpu_device_assignment.xla_device_assignment;
ASSERT_TRUE(xla_device_assignment.has_value());
EXPECT_EQ(xla_device_assignment->replica_count(), 4);
EXPECT_EQ(xla_device_assignment->computation_count(), 2);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 2);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 4);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 4);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 0);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 4);
EXPECT_EQ(computation_device_0.replica_device_ids(2), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(3), 6);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(2), 3);
EXPECT_EQ(computation_device_1.replica_device_ids(3), 7);
}
TEST(TPURewriteDeviceUtilTest, ValidXLADeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto xla_device_assignment = GetXlaDeviceAssignmentProto(
topology_attr, 2, 3,
device_assignment_attr);
TF_ASSERT_OK(xla_device_assignment.status());
EXPECT_EQ(xla_device_assignment->replica_count(), 2);
EXPECT_EQ(xla_device_assignment->computation_count(), 3);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 3);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 2);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 2);
const auto& computation_device_2 =
xla_device_assignment->computation_devices(2);
ASSERT_EQ(computation_device_2.replica_device_ids_size(), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 4);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 0);
EXPECT_EQ(computation_device_2.replica_device_ids(0), 2);
EXPECT_EQ(computation_device_2.replica_device_ids(1), 3);
}
TEST(TPURewriteDeviceUtilTest, InvalidXLADeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto xla_device_assignment = GetXlaDeviceAssignmentProto(
topology_attr, 2, 2,
device_assignment_attr);
EXPECT_THAT(xla_device_assignment,
testing::StatusIs(
absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"must be 'num_replicas' * 'num_cores_per_replica' * ")));
}
TEST(TPURewriteDeviceUtilTest, ValidGeneralDeviceAssignmentMesh1x2x1x3) {
tpu::TopologyProto topology_proto;
{
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(2);
topology_proto.add_mesh_shape(1);
topology_proto.add_mesh_shape(3);
topology_proto.set_num_tasks(3);
topology_proto.set_num_tpu_devices_per_task(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(1);
topology_proto.add_device_coordinates(0);
topology_proto.add_device_coordinates(2);
}
std::string topology_attr = topology_proto.SerializeAsString();
std::vector<int64_t> device_assignment_attr{
0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 2, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0};
llvm::SmallVector<Device, 8> devices;
std::vector<std::string> device_names =
MakeDeviceSet(3, 2);
ASSERT_TRUE(DeviceNamesToParsedNames(device_names, &devices));
auto status_or = GetTPUCompilationAndExecutionDevices(
devices, 2, 3, topology_attr,
device_assignment_attr);
TF_ASSERT_OK(status_or.status());
auto& tpu_device_assignment = status_or.value();
EXPECT_EQ(tpu_device_assignment.compilation_device,
"/job:worker/replica:0/task:0/device:CPU:0");
auto& tpu_devices = tpu_device_assignment.tpu_devices;
ASSERT_EQ(tpu_devices.size(), 2);
for (const auto& replica_tpu_devices : tpu_devices)
ASSERT_EQ(replica_tpu_devices.size(), 3);
EXPECT_EQ(tpu_devices[0][0].device,
"/job:worker/replica:0/task:1/device:TPU:1");
EXPECT_EQ(tpu_devices[0][0].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[0][1].device,
"/job:worker/replica:0/task:1/device:TPU:0");
EXPECT_EQ(tpu_devices[0][1].host,
"/job:worker/replica:0/task:1/device:CPU:0");
EXPECT_EQ(tpu_devices[0][2].device,
"/job:worker/replica:0/task:2/device:TPU:0");
EXPECT_EQ(tpu_devices[0][2].host,
"/job:worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(tpu_devices[1][0].device,
"/job:worker/replica:0/task:2/device:TPU:1");
EXPECT_EQ(tpu_devices[1][0].host,
"/job:worker/replica:0/task:2/device:CPU:0");
EXPECT_EQ(tpu_devices[1][1].device,
"/job:worker/replica:0/task:0/device:TPU:0");
EXPECT_EQ(tpu_devices[1][1].host,
"/job:worker/replica:0/task:0/device:CPU:0");
EXPECT_EQ(tpu_devices[1][2].device,
"/job:worker/replica:0/task:0/device:TPU:1");
EXPECT_EQ(tpu_devices[1][2].host,
"/job:worker/replica:0/task:0/device:CPU:0");
auto& xla_device_assignment = tpu_device_assignment.xla_device_assignment;
ASSERT_TRUE(xla_device_assignment.has_value());
EXPECT_EQ(xla_device_assignment->replica_count(), 2);
EXPECT_EQ(xla_device_assignment->computation_count(), 3);
ASSERT_EQ(xla_device_assignment->computation_devices_size(), 3);
const auto& computation_device_0 =
xla_device_assignment->computation_devices(0);
ASSERT_EQ(computation_device_0.replica_device_ids_size(), 2);
const auto& computation_device_1 =
xla_device_assignment->computation_devices(1);
ASSERT_EQ(computation_device_1.replica_device_ids_size(), 2);
const auto& computation_device_2 =
xla_device_assignment->computation_devices(2);
ASSERT_EQ(computation_device_2.replica_device_ids_size(), 2);
EXPECT_EQ(computation_device_0.replica_device_ids(0), 1);
EXPECT_EQ(computation_device_0.replica_device_ids(1), 5);
EXPECT_EQ(computation_device_1.replica_device_ids(0), 4);
EXPECT_EQ(computation_device_1.replica_device_ids(1), 0);
EXPECT_EQ(computation_device_2.replica_device_ids(0), 2);
EXPECT_EQ(computation_device_2.replica_device_ids(1), 3);
}
TEST(TPURewriteDeviceUtilTest, TestGetDeviceCoordinates) {
mlir::MLIRContext context;
mlir::Builder builder(&context);
auto device_assignment_attr = builder.getI64ArrayAttr({1, 2, 3});
auto status_or_device_coodinates =
GetDeviceCoordinates(device_assignment_attr);
ASSERT_TRUE(status_or_device_coodinates.ok());
auto device_coordinates = status_or_device_coodinates.value();
EXPECT_EQ(device_coordinates[0], 1);
EXPECT_EQ(device_coordinates[1], 2);
EXPECT_EQ(device_coordinates[2], 3);
}
TEST(TPURewriteDeviceUtilTest, TestInvalidAttrForDeviceAssignmentDisallowed) {
mlir::MLIRContext context;
mlir::Builder builder(&context);
auto device_assignment_attr = builder.getF32ArrayAttr({1.0, 2.0, 3.0});
auto status_or_device_coodinates =
GetDeviceCoordinates(device_assignment_attr);
ASSERT_TRUE(!status_or_device_coodinates.ok());
EXPECT_EQ(status_or_device_coodinates.status().message(),
"bad 'device_assignment' attribute at index 0, not an int");
}
TEST(TPURewriteDeviceUtilTest, TestHasModelParallelismFalse) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_FALSE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest, TestHasModelParallelismTrue) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 5));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_TRUE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest,
TestHasModelParallelismFalseMissingCoresPerReplicaAttr) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
EXPECT_FALSE(HasModelParallelism(cluster));
}
TEST(TPURewriteDeviceUtilTest,
TestGetHostFailNumCoresPerReplicaMissingAttributes) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostFailDeviceMissingAttributes) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
mlir::TF::RuntimeDevices devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailMissingTopology) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailMissingDeviceAssignment) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailBadDeviceAssignment) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr,
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"bad_device_assigment"})));
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceFailBadDeviceName) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(
llvm::ArrayRef<llvm::StringRef>({"bad_device_name"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::failed(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceTPUReplicate) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
llvm::SmallDenseMap<llvm::StringRef, llvm::SmallVector<llvm::StringRef, 4>>
devices;
auto replicate = builder.create<mlir::tf_device::ReplicateOp>(
mlir::UnknownLoc::get(&context), 2, devices,
llvm::ArrayRef<std::pair<mlir::ValueRange, mlir::Type>>{},
mlir::ValueRange{}, mlir::TypeRange{});
builder.setInsertionPoint(&replicate.getBody().front(),
replicate.getBody().front().begin());
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
mlir::TF::RuntimeDevices runtime_devices;
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, GetDeviceAliasForHostOfLogicalCore(0));
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceNotReplicated) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0",
"/job:localhost/replica:0/task:0/device:TPU:0",
"/job:localhost/replica:0/task:0/device:CPU:0",
"/job:worker/replica:0/task:0/device:CPU:0"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
cluster->setAttr(kNumCoresPerReplicaAttr,
builder.getIntegerAttr(builder.getIntegerType(64), 1));
cluster->setAttr(kTopologyAttr, builder.getStringAttr(""));
cluster->setAttr(kDeviceAssignmentAttr, builder.getArrayAttr({}));
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, "/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceInGenericPipeline) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"/job:localhost/replica:0/task:0/device:CPU:0"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, "/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestGetHostDeviceInGenericPipelineMultiCPUs) {
mlir::MLIRContext context;
context.loadDialect<mlir::tf_device::TensorFlowDeviceDialect>();
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
mlir::OpBuilder builder(module_ref->getBodyRegion());
(*module_ref)
->setAttr("tf.devices",
builder.getStrArrayAttr(llvm::ArrayRef<llvm::StringRef>(
{"/job:chief/replica:0/task:0/device:CPU:0",
"/job:ps/replica:0/task:0/device:CPU:0",
"/job:ps/replica:0/task:1/device:CPU:0",
"/job:worker/replica:0/task:2/device:CPU:0"})));
llvm::SmallVector<mlir::Type, 8> result_types;
auto cluster = builder.create<mlir::tf_device::ClusterOp>(
mlir::UnknownLoc::get(&context), result_types);
mlir::TF::RuntimeDevices runtime_devices;
(void)GetDevicesFromOp(*module_ref, &runtime_devices);
std::string host_device;
EXPECT_TRUE(mlir::succeeded(
GetHostDeviceOutsideComputation(runtime_devices, cluster, &host_device)));
EXPECT_EQ(host_device, "/job:chief/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestIsTPUDevice) {
EXPECT_TRUE(IsTPUDevice("/job:localhost/replica:0/task:0/device:TPU:0"));
EXPECT_FALSE(IsTPUDevice("/job:localhost/replica:0/task:0/device:CPU:0"));
EXPECT_FALSE(IsTPUDevice("INVALID_DEVICE"));
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapBadTopology) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {device_assignment = [0, 0, 0, 0, 0, 0, 0, 1], num_cores_per_replica = 2 : i64} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::failed(GetDeviceToHostMap(cluster, core_to_host)));
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapBadDeviceAssignment) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {num_cores_per_replica = 2 : i64, topology = "\0A\04\01\01\01\02\10\01\18\02\22\08\00\00\00\00\00\00\00\01*\02\08\01"} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::failed(GetDeviceToHostMap(cluster, core_to_host)));
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapNotReplicated) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {device_assignment = [0, 0, 0, 0, 0, 0, 0, 1], num_cores_per_replica = 2 : i64, topology = "\0A\04\01\01\01\02\10\01\18\02\22\08\00\00\00\00\00\00\00\01*\02\08\01"} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::succeeded(GetDeviceToHostMap(cluster, core_to_host)));
EXPECT_EQ(core_to_host.size(), 2);
EXPECT_EQ(core_to_host[0], "/job:localhost/replica:0/task:0/device:CPU:0");
EXPECT_EQ(core_to_host[1], "/job:localhost/replica:0/task:0/device:CPU:0");
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapReplicated) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU:2", "/job:localhost/replica:0/task:0/device:TPU:3", "/job:localhost/replica:0/task:0/device:TPU:4", "/job:localhost/replica:0/task:0/device:TPU:5", "/job:localhost/replica:0/task:0/device:TPU:6", "/job:localhost/replica:0/task:0/device:TPU:7", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0"}} {
func.func @main() -> () {
tf_device.replicate() {n = 4 : i32} {
"tf_device.cluster"() ({
tf_device.return
}) {device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1], num_cores_per_replica = 2 : i64, topology = "\0A\04\02\02\01\02\10\01\18\08\22 \00\00\00\00\00\00\00\01\01\00\00\00\01\00\00\01\00\01\00\00\00\01\00\01\01\01\00\00\01\01\00\01*\02\08\01"} : () -> ()
tf_device.return
}
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::succeeded(GetDeviceToHostMap(cluster, core_to_host)));
EXPECT_EQ(core_to_host.size(), 2);
EXPECT_EQ(core_to_host[0], "TPU_REPLICATED_HOST_0");
EXPECT_EQ(core_to_host[1], "TPU_REPLICATED_HOST_1");
}
TEST(TPURewriteDeviceUtilTest, TestDeviceToHostMapCPU) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
llvm::SmallVector<std::string, 8> core_to_host;
EXPECT_TRUE(mlir::succeeded(GetDeviceToHostMap(cluster, core_to_host)));
EXPECT_EQ(core_to_host.size(), 1);
EXPECT_EQ(core_to_host[0], "/job:localhost/replica:0/task:0/device:CPU:0");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/tpu_rewrite_device_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2467c13b-8b56-471c-b969-75f56b4ce64b | cpp | tensorflow/tensorflow | xla_sharding_util | tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc | tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <cassert>
#include <cstdint>
#include <iterator>
#include <map>
#include <numeric>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/LogicalResult.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/TypeUtilities.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "xla/service/hlo_parser.h"
#include "xla/tsl/lib/math/math_util.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
namespace {
constexpr char kNumSplitAttr[] = "num_split";
int64_t GetPadding(const int split_dim, const int num_splits,
const PartialTensorShape& partial_tensor_shape) {
if (partial_tensor_shape.dim_size(split_dim) <= 0) {
return 0;
}
int64_t per_split_size = tsl::MathUtil::CeilOfRatio<int64_t>(
partial_tensor_shape.dim_size(split_dim), num_splits);
int64_t total_padding =
per_split_size * num_splits - partial_tensor_shape.dim_size(split_dim);
return total_padding;
}
mlir::TF::SliceOp CreateSliceOp(mlir::OpBuilder* builder,
const mlir::Location& location,
mlir::Value input,
const PartialTensorShape& shape) {
mlir::SmallVector<int64_t, 4> slice_start_position;
for (int i = 0; i < shape.dims(); ++i) {
slice_start_position.push_back(0);
}
mlir::SmallVector<int64_t, 4> slice_size;
for (int i = 0; i < shape.dims(); ++i) {
slice_size.push_back(shape.dim_size(i));
}
auto start_position_type =
mlir::RankedTensorType::get(shape.dims(), builder->getIntegerType(64));
auto start_position_op = builder->create<mlir::TF::ConstOp>(
input.getLoc(), mlir::DenseIntElementsAttr::get(start_position_type,
slice_start_position));
auto slice_size_op = builder->create<mlir::TF::ConstOp>(
input.getLoc(), mlir::DenseIntElementsAttr::get(
mlir::RankedTensorType::get(
shape.dims(), builder->getIntegerType(64)),
slice_size));
auto slice_result_type =
mlir::RankedTensorType::get(slice_size, getElementTypeOrSelf(input));
return builder->create<mlir::TF::SliceOp>(input.getLoc(), slice_result_type,
input, start_position_op,
slice_size_op);
}
mlir::TF::PadOp CreatePadOp(mlir::OpBuilder* builder,
const mlir::Location& location, int64_t num_dims,
int64_t split_dim, mlir::Value src_input,
int64_t padding) {
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
llvm::SmallVector<int64_t, 4> padding_values;
std::vector<int64_t> padded_shape;
for (int i = 0; i < num_dims; ++i) {
padding_values.push_back(0);
if (i == split_dim) {
padding_values.push_back(padding);
padded_shape.push_back(input_type.getShape()[i] + padding);
} else {
padding_values.push_back(0);
padded_shape.push_back(input_type.getShape()[i]);
}
}
auto padding_type =
mlir::RankedTensorType::get({num_dims, 2}, builder->getIntegerType(64));
auto paddings = mlir::DenseIntElementsAttr::get(padding_type, padding_values);
auto paddings_value = builder->create<mlir::TF::ConstOp>(location, paddings);
mlir::SmallVector<int64_t, 4> expand_shape(padded_shape.begin(),
padded_shape.end());
auto expand_result_type =
mlir::RankedTensorType::get(expand_shape, input_type.getElementType());
return builder->create<mlir::TF::PadOp>(location, expand_result_type,
src_input, paddings_value);
}
mlir::LogicalResult CreateSplitOp(
const int num_split, const int split_dimension, const int64_t padding,
const mlir::Location& location, mlir::Value src_input,
mlir::OpBuilder* builder, mlir::TF::SplitOp* split_op,
bool is_ici_weight_dist_spmd) {
if (padding > 0) {
int64_t num_dims =
mlir::cast<mlir::TensorType>(src_input.getType()).getRank();
auto pad_op = CreatePadOp(builder, location, num_dims, split_dimension,
src_input, padding);
if (is_ici_weight_dist_spmd) {
pad_op->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
src_input = pad_op.getResult();
}
auto split_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto split_dimension_attr =
mlir::DenseElementsAttr::get(split_dim_type, split_dimension);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
if (input_type.hasRank()) {
if (input_type.getShape()[split_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
if (shape[split_dimension] % num_split != 0) {
return mlir::emitError(
location,
llvm::formatv(
"incorrect input sharding configuration received. "
"{0}-th dimension of the input must be evenly divisible by {1}",
split_dimension, num_split));
}
shape[split_dimension] = shape[split_dimension] / num_split;
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
auto split_dimension_op = builder->create<mlir::TF::ConstOp>(
location, split_dim_type, split_dimension_attr);
if (is_ici_weight_dist_spmd) {
split_dimension_op->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
llvm::SmallVector<mlir::Type, 4> output_types(num_split, output_type);
*split_op = builder->create<mlir::TF::SplitOp>(
location, output_types, split_dimension_op.getOutput(), src_input);
(*split_op)->setAttr(
kNumSplitAttr,
builder->getIntegerAttr(builder->getIntegerType(32), num_split));
if (is_ici_weight_dist_spmd) {
(*split_op)->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
return mlir::success();
}
mlir::TF::ConcatOp CreateConcatOp(const int concat_dimension,
const mlir::Location& location,
const int64_t padding,
mlir::ArrayRef<mlir::Value> inputs,
mlir::OpBuilder* builder) {
auto concat_dim_type =
mlir::RankedTensorType::get({}, builder->getIntegerType(32));
auto concat_dimension_attr =
mlir::DenseElementsAttr::get(concat_dim_type, concat_dimension);
auto concat_dimension_op = builder->create<mlir::TF::ConstOp>(
location, concat_dim_type, concat_dimension_attr);
mlir::Type output_type;
auto input_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
if (input_type.hasRank()) {
if (input_type.getShape()[concat_dimension] == mlir::ShapedType::kDynamic) {
output_type = input_type;
} else {
auto shape = llvm::to_vector<4>(input_type.getShape());
shape[concat_dimension] = shape[concat_dimension] * inputs.size();
output_type =
mlir::RankedTensorType::get(shape, input_type.getElementType());
}
} else {
output_type = input_type;
}
return builder->create<mlir::TF::ConcatOp>(
location, output_type, concat_dimension_op.getOutput(), inputs);
}
mlir::TF::XlaConcatNDOp CreateXlaConcatNDOp(
const mlir::Location& location, mlir::ArrayRef<mlir::Value> inputs,
const std::vector<int64_t>& num_concats,
const std::vector<int64_t>& paddings, mlir::OpBuilder& builder) {
llvm::SmallVector<int64_t, 4> output_shape;
if (inputs.empty()) {
mlir::emitError(location, "inputs list to concat ops is empty");
return nullptr;
}
if (num_concats.size() != paddings.size()) {
mlir::emitError(location,
"num_concats and paddings must be of the same length.");
return nullptr;
}
auto input_slice_type = mlir::cast<mlir::TensorType>(inputs[0].getType());
auto element_type = input_slice_type.getElementType();
mlir::Type output_type;
if (input_slice_type.hasRank()) {
const auto& slice_shape = input_slice_type.getShape();
for (int i = 0; i < num_concats.size(); i++) {
auto num_concat = num_concats[i];
const int max_dim_size = slice_shape[i] * num_concat;
output_shape.emplace_back(max_dim_size - paddings[i]);
}
VLOG(2) << "SL: CreateXlaConcatNDOp. output_shape="
<< absl::StrJoin(output_shape, ",")
<< ", Padding=" << absl::StrJoin(paddings, ",");
output_type = mlir::RankedTensorType::get(output_shape, element_type);
} else {
output_type = input_slice_type;
}
auto op = builder.create<mlir::TF::XlaConcatNDOp>(
location, output_type, inputs, builder.getI64ArrayAttr(num_concats),
builder.getI64ArrayAttr(paddings));
return op;
}
mlir::LogicalResult CreateXlaSplitNDOp(const mlir::Location& location,
mlir::Value src_input,
const std::vector<int64_t>& num_splits,
const std::vector<int64_t>& paddings,
mlir::OpBuilder* builder,
mlir::TF::XlaSplitNDOp* xla_split_op,
bool is_ici_weight_dist_spmd) {
auto input_type = mlir::cast<mlir::TensorType>(src_input.getType());
mlir::Type output_type;
if (!input_type.hasRank()) {
mlir::emitError(
location,
"XLA Split/Concat ops are supported only for Ranked Tensors.");
return mlir::failure();
}
const int rank = input_type.getRank();
const auto& input_shape = input_type.getShape();
auto output_slice_shape = llvm::to_vector<4>(input_type.getShape());
int num_tiles = 1;
if (num_splits.size() != rank || num_splits.size() != paddings.size()) {
return mlir::failure();
}
for (int i = 0; i < rank; ++i) {
if (input_shape[i] == mlir::ShapedType::kDynamic) {
output_slice_shape[i] = input_shape[i];
} else {
output_slice_shape[i] = ((input_shape[i] + paddings[i]) / num_splits[i]);
}
num_tiles *= num_splits[i];
}
output_type = mlir::RankedTensorType::get(output_slice_shape,
input_type.getElementType());
llvm::SmallVector<mlir::Type, 4> output_types(num_tiles, output_type);
VLOG(2) << "SL: CreateXlaSplitNDOp. input_shape="
<< absl::StrJoin(input_shape, ",")
<< ", Padding: " << absl::StrJoin(paddings, ",");
*xla_split_op = builder->create<mlir::TF::XlaSplitNDOp>(
location, output_types, src_input, builder->getI64ArrayAttr(num_splits),
builder->getI64ArrayAttr(paddings));
if (is_ici_weight_dist_spmd) {
(*xla_split_op)
->setAttr(kICIWeightDistributionMlirBridgeMarker,
builder->getBoolAttr(true));
}
return mlir::success();
}
bool IsShapeKnown(mlir::TensorType type) {
if (!type.hasRank()) return false;
bool shape_known = false;
for (int i = 0; i < type.getRank(); ++i) {
if (type.getShape()[i] == mlir::ShapedType::kDynamic) {
shape_known = false;
break;
} else {
shape_known = true;
}
}
return shape_known;
}
mlir::LogicalResult HandleTileShardedInputsUsingXlaSplitOps(
const mlir::Location& location, const xla::OpSharding& input_sharding,
const mlir::Value& original_source, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<mlir::Value>* tiled_inputs,
bool is_ici_weight_dist_spmd) {
std::vector<int64_t> num_splits(
input_sharding.tile_assignment_dimensions().begin(),
input_sharding.replicate_on_last_tile_dim()
? std::prev(input_sharding.tile_assignment_dimensions().end())
: input_sharding.tile_assignment_dimensions().end());
const int rank = input_sharding.replicate_on_last_tile_dim()
? input_sharding.tile_assignment_dimensions_size() - 1
: input_sharding.tile_assignment_dimensions_size();
std::vector<int64_t> paddings;
paddings.reserve(rank);
auto shape = llvm::to_vector<4>(
original_source.getType().cast<mlir::TensorType>().getShape());
for (int dim = 0; dim < rank; ++dim) {
paddings.push_back(
GetPadding(dim, input_sharding.tile_assignment_dimensions(dim),
PartialTensorShape(shape)));
}
mlir::TF::XlaSplitNDOp xla_split_op;
if (mlir::failed(CreateXlaSplitNDOp(location, original_source, num_splits,
paddings, builder, &xla_split_op,
is_ici_weight_dist_spmd))) {
return mlir::failure();
}
tiled_inputs->clear();
tiled_inputs->reserve(input_sharding.tile_assignment_devices_size());
int64_t repeat_count =
input_sharding.replicate_on_last_tile_dim()
? *input_sharding.tile_assignment_dimensions().rbegin()
: 1;
for (int i = 0; i < xla_split_op.getResults().size(); i++) {
auto split_op_output = xla_split_op.getResults()[i];
for (int64_t j = 0; j < repeat_count; ++j) {
tiled_inputs->push_back(split_op_output);
}
}
return mlir::success();
}
mlir::LogicalResult HandleTileShardedInputsUsingTfSplitOps(
const mlir::Location& location, const xla::OpSharding& input_sharding,
const mlir::Value& original_source, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<mlir::Value>* tiled_inputs,
bool is_ici_weight_dist_spmd) {
llvm::SmallVector<mlir::TF::SplitOp, 4> split_ops_for_tiled_input;
split_ops_for_tiled_input.reserve(
input_sharding.tile_assignment_devices_size());
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(input_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
PartialTensorShape shape;
const auto input_type =
mlir::cast<mlir::TensorType>(original_source.getType());
bool input_shape_known = IsShapeKnown(input_type);
if (input_shape_known) {
shape = PartialTensorShape(input_type.getShape());
}
for (const auto& dimension_and_num_splits : *dimension_to_splits_map) {
const int dimension = dimension_and_num_splits.first;
const int num_splits = dimension_and_num_splits.second;
int padding = input_shape_known
? GetPadding(dimension, num_splits,
PartialTensorShape(input_type.getShape()))
: 0;
if (split_ops_for_tiled_input.empty()) {
mlir::TF::SplitOp root_split_op;
auto result = CreateSplitOp(num_splits, dimension, padding, location,
original_source, builder, &root_split_op,
is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
split_ops_for_tiled_input.emplace_back(root_split_op);
continue;
}
llvm::SmallVector<mlir::TF::SplitOp, 4> new_split_ops;
new_split_ops.reserve(split_ops_for_tiled_input.size() * num_splits);
for (auto split_op : split_ops_for_tiled_input) {
for (auto parent_split_output_value : split_op.getResults()) {
mlir::TF::SplitOp child_split_op;
auto result = CreateSplitOp(num_splits, dimension, padding, location,
parent_split_output_value, builder,
&child_split_op, is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
new_split_ops.emplace_back(child_split_op);
}
}
std::swap(new_split_ops, split_ops_for_tiled_input);
}
tiled_inputs->clear();
tiled_inputs->reserve(input_sharding.tile_assignment_devices_size());
for (auto split_op : split_ops_for_tiled_input) {
for (auto split_op_output : split_op.getResults()) {
int64_t repeat_count =
input_sharding.replicate_on_last_tile_dim()
? *input_sharding.tile_assignment_dimensions().rbegin()
: 1;
for (int64_t i = 0; i < repeat_count; ++i) {
tiled_inputs->push_back(split_op_output);
}
}
}
return mlir::success();
}
bool UnsupportedPartitionedShardingType(xla::OpSharding::Type sharding) {
return sharding != xla::OpSharding::REPLICATED &&
sharding != xla::OpSharding::OTHER;
}
}
absl::StatusOr<std::map<int, int>> GetDimensionIndicesAndNumSplitsFromSharding(
const xla::OpSharding& sharding) {
int64_t tensor_tile_rank = sharding.tile_assignment_dimensions_size();
if (sharding.replicate_on_last_tile_dim()) {
tensor_tile_rank--;
}
std::map<int, int> dimension_to_splits_map;
for (int dim_index = 0; dim_index < tensor_tile_rank; ++dim_index) {
if (sharding.tile_assignment_dimensions(dim_index) > 1) {
dimension_to_splits_map.emplace(
dim_index, sharding.tile_assignment_dimensions(dim_index));
}
}
if (dimension_to_splits_map.empty()) {
return absl::InvalidArgumentError(absl::StrCat(
"Arg has unnecessary tiled sharding: ", sharding.DebugString()));
}
return dimension_to_splits_map;
}
int GetDimsFromXLAShardingTiled(const xla::OpSharding& xla_sharding) {
return xla_sharding.tile_assignment_dimensions_size() -
(xla_sharding.replicate_on_last_tile_dim() ? 1 : 0) -
xla_sharding.last_tile_dims_size();
}
bool IsOtherReplicatedSharding(const xla::OpSharding& xla_sharding) {
int max_dim = GetDimsFromXLAShardingTiled(xla_sharding);
for (int i = 0; i < max_dim; ++i) {
if (xla_sharding.tile_assignment_dimensions(i) != 1) {
return false;
}
}
return xla_sharding.type() == xla::OpSharding::OTHER &&
(xla_sharding.replicate_on_last_tile_dim() ||
!xla_sharding.last_tile_dims().empty());
}
bool IsSplitSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::OTHER &&
!IsOtherReplicatedSharding(sharding);
}
bool IsReplicatedSharding(const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::REPLICATED ||
IsOtherReplicatedSharding(sharding);
}
mlir::LogicalResult DecodeShardingAttribute(const std::string& shard_str,
xla::OpSharding& sharding,
bool report_error) {
if (sharding.ParseFromString(shard_str)) return mlir::success();
absl::StatusOr<xla::HloSharding> sharding_hlo = xla::ParseSharding(shard_str);
if (sharding_hlo.ok()) {
sharding = sharding_hlo->ToProto();
return mlir::success();
}
if (report_error)
llvm::errs() << std::string(sharding_hlo.status().message()) << "\n";
return mlir::failure();
}
mlir::LogicalResult DecodeShardingAttribute(mlir::Attribute shard_attr,
xla::OpSharding& sharding,
bool report_error) {
if (!mlir::isa<mlir::StringAttr>(shard_attr)) return mlir::failure();
auto shard_str = mlir::cast<mlir::StringAttr>(shard_attr).getValue().str();
return DecodeShardingAttribute(shard_str, sharding, report_error);
}
void EncodeSharding(mlir::Operation* op, llvm::StringRef shard_str) {
if (!op->hasAttrOfType<mlir::StringAttr>(shard_str)) return;
::xla::OpSharding sharding;
auto sharding_proto_str =
op->getAttrOfType<mlir::StringAttr>(shard_str).getValue().str();
if (!sharding.ParseFromString(sharding_proto_str)) return;
auto hlosharding = xla::HloSharding::FromProto(sharding);
if (!hlosharding.ok()) {
op->emitError("Unable to encode sharding to human readable ")
<< hlosharding.status().message();
return;
}
op->setAttr(shard_str,
mlir::StringAttr::get(op->getContext(), hlosharding->ToString()));
}
mlir::LogicalResult ExtractInputsForLogicalDevices(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func, mlir::OpBuilder* builder,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list) {
return ExtractInputsForLogicalDevices(num_cores_per_replica, cluster_func,
builder, false,
input_list);
}
mlir::LogicalResult ExtractInputsForLogicalDevices(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func, mlir::OpBuilder* builder,
bool use_xla_nd_ops,
llvm::SmallVectorImpl<llvm::SmallVector<mlir::Value, 4>>* input_list) {
input_list->reserve(num_cores_per_replica);
for (int i = 0; i < num_cores_per_replica; ++i)
input_list->emplace_back(llvm::SmallVector<mlir::Value, 4>());
llvm::SmallVector<mlir::Value, 4> cluster_func_inputs(
cluster_func.getOperands());
auto sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kInputShardingAttr);
if (!sharding_attrs) {
(*input_list)[0] = cluster_func_inputs;
return mlir::success();
}
for (const auto& sharding_attr_and_index : llvm::enumerate(sharding_attrs)) {
const auto& sharding_attr = sharding_attr_and_index.value();
const auto input_index = sharding_attr_and_index.index();
const auto& input_value = cluster_func_inputs[input_index];
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(sharding_attr).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for inputs");
}
const auto input_sharding_type = sharding.type();
auto tiled_sharding_mismatched = [&](int tiled_input_size) {
return cluster_func.emitError(
llvm::formatv("incorrect {0}-th tiled input sharding received. "
"Product of tile sharding splits({1}) must be equal to "
"number of logical devices : {2}",
input_index, tiled_input_size, num_cores_per_replica));
};
if (auto partitioned_input =
llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
input_value.getDefiningOp())) {
if (UnsupportedPartitionedShardingType(input_sharding_type))
return cluster_func->emitOpError()
<< "unsupported input sharding type "
<< OpSharding_Type_Name(input_sharding_type) << " for "
<< input_index << "-th input";
if (input_sharding_type == xla::OpSharding::REPLICATED) {
for (const auto& index_and_inputs : llvm::enumerate(*input_list)) {
index_and_inputs.value().emplace_back(
partitioned_input.getOperand(index_and_inputs.index()));
}
} else {
assert(input_sharding_type == xla::OpSharding::OTHER);
if (partitioned_input.getInputs().size() != num_cores_per_replica)
return tiled_sharding_mismatched(
partitioned_input.getInputs().size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device =
sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(
partitioned_input.getInputs()[i]);
}
}
continue;
}
if (IsSplitSharding(sharding)) {
bool is_ici_weight_dist_spmd =
cluster_func.getOperand(input_index).getDefiningOp() &&
cluster_func.getOperand(input_index)
.getDefiningOp()
->hasAttr(kICIWeightDistributionMlirBridgeMarker);
llvm::SmallVector<mlir::Value, 4> tiled_inputs;
if (use_xla_nd_ops) {
auto result = HandleTileShardedInputsUsingXlaSplitOps(
cluster_func.getLoc(), sharding, input_value, builder,
&tiled_inputs, is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
} else {
auto result = HandleTileShardedInputsUsingTfSplitOps(
cluster_func.getLoc(), sharding, input_value, builder,
&tiled_inputs, is_ici_weight_dist_spmd);
if (mlir::failed(result)) return mlir::failure();
}
const int64_t tiled_inputs_size = tiled_inputs.size();
if (tiled_inputs_size != num_cores_per_replica)
return tiled_sharding_mismatched(tiled_inputs.size());
for (int i = 0; i < sharding.tile_assignment_devices_size(); ++i) {
const int assigned_logical_device = sharding.tile_assignment_devices(i);
(*input_list)[assigned_logical_device].emplace_back(tiled_inputs[i]);
}
} else if (IsReplicatedSharding(sharding)) {
for (auto& inputs : *input_list) inputs.emplace_back(input_value);
} else {
assert(input_sharding_type == xla::OpSharding::MAXIMAL);
const int logical_device_id = sharding.tile_assignment_devices(0);
(*input_list)[logical_device_id].emplace_back(input_value);
}
}
return mlir::success();
}
mlir::LogicalResult ParseAndValidateOutputSharding(
const int num_cores_per_replica,
mlir::tf_device::ClusterFuncOp cluster_func,
mlir::SmallVector<xla::OpSharding, 4>* output_sharding_list) {
output_sharding_list->reserve(cluster_func.getNumResults());
const auto output_sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kOutputShardingAttr);
if (!output_sharding_attrs)
return cluster_func.emitError(
"output_sharding_configuration missing from cluster func");
if (output_sharding_attrs.size() != cluster_func.getNumResults())
return cluster_func.emitError("incorrect number of output sharding");
for (const auto& output_sharding_and_index :
llvm::enumerate(output_sharding_attrs)) {
const auto& output_sharding = output_sharding_and_index.value();
const int sharding_index = output_sharding_and_index.index();
if (!mlir::isa<mlir::StringAttr>(output_sharding))
return cluster_func.emitError(llvm::formatv(
"non-string output sharding at index {0}", sharding_index));
xla::OpSharding sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(output_sharding).getValue().str(),
sharding)
.failed()) {
return cluster_func.emitError("incorrect sharding format for outputs");
}
if (sharding.type() == xla::OpSharding::OTHER &&
sharding.tile_assignment_devices_size() != num_cores_per_replica)
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Number of "
"tiled outputs({0}) must match the number of logical "
"devices({1})",
sharding.tile_assignment_devices_size(), num_cores_per_replica));
if (sharding.type() == xla::OpSharding::MAXIMAL &&
((sharding.tile_assignment_devices(0) >= num_cores_per_replica) ||
(sharding.tile_assignment_devices(0) < 0)))
return cluster_func.emitError(llvm::formatv(
"incorrect sharding format for outputs. Maximal "
"sharding should be assigned to device id in range "
"[0, {0}). Currently assigned to {1}",
num_cores_per_replica, sharding.tile_assignment_devices(0)));
output_sharding_list->emplace_back(std::move(sharding));
}
return mlir::success();
}
namespace {
bool IsAssignedToLogicalDevice(const int core_id,
const xla::OpSharding& sharding) {
return sharding.type() == xla::OpSharding::MAXIMAL &&
sharding.tile_assignment_devices(0) == core_id;
}
mlir::LogicalResult LookupClusterToCoreIndex(
const mlir::Location& location,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const int core_id, const int cluster_func_output_index,
int* core_output_index) {
*core_output_index =
cluster_to_core_index[core_id][cluster_func_output_index];
if (*core_output_index == -1) {
mlir::emitError(
location,
llvm::formatv("Attempted to map cluster_func output index {0} to "
"program assigned to core {1}. The tensor at this output "
"index was not assigned or sharded to this core.",
cluster_func_output_index, core_id));
return mlir::failure();
}
return mlir::success();
}
mlir::LogicalResult GetTileShardedOutputsToMerge(
const mlir::Location& location, const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
llvm::SmallVector<mlir::Value, 4>* outputs_to_merge) {
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
outputs_to_merge->reserve(sharding.tile_assignment_devices_size());
for (const auto& core_id_and_index :
llvm::enumerate(sharding.tile_assignment_devices())) {
auto core_id = core_id_and_index.value();
auto tile_index = core_id_and_index.index();
int last_tile_dim_size = *sharding.tile_assignment_dimensions().rbegin();
if (sharding.replicate_on_last_tile_dim() &&
tile_index % last_tile_dim_size != 0) {
continue;
}
int region_output_index;
auto status = LookupClusterToCoreIndex(location, cluster_to_core_index,
core_id, cluster_func_output_index,
®ion_output_index);
if (failed(status)) return mlir::failure();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(cluster_idx +
core_id)[region_output_index];
outputs_to_merge->emplace_back(output_from_logical_device);
}
return mlir::success();
}
mlir::LogicalResult HandleTileShardedOutputsUsingXlaConcatOps(
const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const mlir::Location& location, mlir::Value cluster_func_output,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder& builder) {
builder.setInsertionPointAfter(new_parallel_execute);
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
const std::vector<int64_t> num_concats(
sharding.tile_assignment_dimensions().begin(),
sharding.replicate_on_last_tile_dim()
? std::prev(sharding.tile_assignment_dimensions().end())
: sharding.tile_assignment_dimensions().end());
const int rank = sharding.replicate_on_last_tile_dim()
? sharding.tile_assignment_dimensions_size() - 1
: sharding.tile_assignment_dimensions_size();
std::vector<int64_t> paddings;
paddings.reserve(rank);
auto output_type =
mlir::cast<mlir::TensorType>(cluster_func_output.getType());
if (output_type.hasRank()) {
auto shape = llvm::to_vector<4>(output_type.getShape());
for (int dim = 0; dim < rank; ++dim) {
paddings.push_back(GetPadding(dim,
sharding.tile_assignment_dimensions(dim),
PartialTensorShape(shape)));
}
} else {
mlir::emitError(
location, "XLA concat/split ops are supported only for Ranked tensor.");
return mlir::failure();
}
llvm::SmallVector<mlir::Value, 4> outputs_to_merge;
auto status = GetTileShardedOutputsToMerge(
location, cluster_func_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&outputs_to_merge);
if (failed(status)) return mlir::failure();
mlir::TF::XlaConcatNDOp concat_op = CreateXlaConcatNDOp(
location, outputs_to_merge, num_concats, paddings, builder);
cluster_func_output.replaceAllUsesWith(concat_op.getResult());
return mlir::success();
}
mlir::LogicalResult HandleTileShardedOutputsUsingTfConcatOps(
const int cluster_func_output_index,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
const mlir::Location& location, mlir::Value cluster_func_output,
int cluster_idx, mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder) {
builder->setInsertionPointAfter(new_parallel_execute);
llvm::SmallVector<mlir::Value, 4> outputs_to_merge;
auto status = GetTileShardedOutputsToMerge(
location, cluster_func_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&outputs_to_merge);
if (failed(status)) return mlir::failure();
const xla::OpSharding& sharding =
output_sharding_config[cluster_func_output_index];
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
auto output_type =
mlir::cast<mlir::TensorType>(cluster_func_output.getType());
PartialTensorShape shape;
bool output_shape_known = IsShapeKnown(output_type);
if (output_shape_known) {
shape = PartialTensorShape(output_type.getShape());
}
bool has_paddings = false;
std::vector<int64_t> paddings;
for (auto it = dimension_to_splits_map->rbegin();
it != dimension_to_splits_map->rend(); ++it) {
int concat_dimension = it->first;
int num_splits = it->second;
llvm::SmallVector<mlir::Value, 4> new_outputs;
new_outputs.reserve(num_splits);
for (int i = 0, end = outputs_to_merge.size(); i < end;
i = i + num_splits) {
int64_t padding;
if (output_shape_known) {
padding = GetPadding(concat_dimension, num_splits, shape);
} else {
padding = 0;
}
mlir::TF::ConcatOp concat_op =
CreateConcatOp(concat_dimension, location, padding,
llvm::ArrayRef<mlir::Value>{
outputs_to_merge.begin() + i,
outputs_to_merge.begin() + i + num_splits},
builder);
paddings.push_back(padding);
has_paddings |= padding > 0;
new_outputs.emplace_back(concat_op.getResult());
}
std::swap(new_outputs, outputs_to_merge);
}
assert(outputs_to_merge.size() == 1);
if (has_paddings) {
mlir::TF::SliceOp slice_op =
CreateSliceOp(builder, location, outputs_to_merge[0], shape);
cluster_func_output.replaceAllUsesWith(slice_op.getResult());
}
cluster_func_output.replaceAllUsesWith(outputs_to_merge[0]);
return mlir::success();
}
mlir::LogicalResult ValidateAndGetTiledExecuteOutputShape(
const mlir::Location& location,
const mlir::TensorType cluster_func_output_type,
const xla::OpSharding& output_sharding, bool use_xla_nd_ops,
mlir::Type* tiled_logical_computation_type) {
const auto output_shape = cluster_func_output_type.getShape();
auto new_output_shape = llvm::to_vector<4>(output_shape);
auto dimension_to_splits_map =
GetDimensionIndicesAndNumSplitsFromSharding(output_sharding);
if (!dimension_to_splits_map.ok()) {
LOG(ERROR) << dimension_to_splits_map.status();
return mlir::failure();
}
for (const auto& dimension_and_output_splits : *dimension_to_splits_map) {
const auto dimension = dimension_and_output_splits.first;
const auto output_splits = dimension_and_output_splits.second;
if (output_shape[dimension] == mlir::ShapedType::kDynamic) {
*tiled_logical_computation_type = cluster_func_output_type;
break;
}
if (output_shape[dimension] % output_splits == 0) {
new_output_shape[dimension] = output_shape[dimension] / output_splits;
} else {
new_output_shape[dimension] =
(output_shape[dimension] / output_splits) + 1;
}
}
*tiled_logical_computation_type = mlir::RankedTensorType::get(
new_output_shape, cluster_func_output_type.getElementType());
return mlir::success();
}
}
bool AreInputOutputShapesStaticallyKnownForSplitSharding(
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func) {
bool sharded_input_output_shape_statically_known = true;
llvm::SmallVector<mlir::Value, 4> cluster_func_inputs(
cluster_func.getOperands());
auto sharding_attrs =
cluster_func.getOperation()->getAttrOfType<mlir::ArrayAttr>(
kInputShardingAttr);
if (sharding_attrs) {
for (const auto& sharding_attr_and_index :
llvm::enumerate(sharding_attrs)) {
const auto& sharding_attr = sharding_attr_and_index.value();
const auto input_index = sharding_attr_and_index.index();
const auto& input_value = cluster_func_inputs[input_index];
const auto input_type =
mlir::cast<mlir::TensorType>(input_value.getType());
xla::OpSharding input_sharding;
if (DecodeShardingAttribute(
mlir::cast<mlir::StringAttr>(sharding_attr).getValue().str(),
input_sharding)
.failed()) {
sharded_input_output_shape_statically_known = false;
}
if (IsSplitSharding(input_sharding)) {
sharded_input_output_shape_statically_known &= IsShapeKnown(input_type);
}
}
}
for (const auto& result_and_index :
llvm::enumerate(cluster_func.getResults())) {
const auto output_index = result_and_index.index();
const auto& output_sharding = output_sharding_config[output_index];
const auto cluster_func_output_type =
mlir::cast<mlir::TensorType>(result_and_index.value().getType());
if (IsSplitSharding(output_sharding)) {
sharded_input_output_shape_statically_known &=
IsShapeKnown(cluster_func_output_type);
}
}
return sharded_input_output_shape_statically_known;
}
mlir::LogicalResult GetOutputTypesForLogicalDeviceComputation(
const int core_id, llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func,
llvm::SmallVectorImpl<mlir::Type>* output_types,
llvm::SmallVectorImpl<int>* cluster_to_core_index) {
return GetOutputTypesForLogicalDeviceComputation(
core_id, output_sharding_config, cluster_func, output_types,
false, cluster_to_core_index);
}
mlir::LogicalResult GetOutputTypesForLogicalDeviceComputation(
const int core_id, llvm::ArrayRef<xla::OpSharding> output_sharding_config,
mlir::tf_device::ClusterFuncOp cluster_func,
llvm::SmallVectorImpl<mlir::Type>* output_types, bool use_xla_nd_ops,
llvm::SmallVectorImpl<int>* cluster_to_core_index) {
output_types->reserve(cluster_func.getNumResults());
int core_index = 0;
for (const auto& result_and_index :
llvm::enumerate(cluster_func.getResults())) {
const auto output_index = result_and_index.index();
const auto& output_sharding = output_sharding_config[output_index];
const auto cluster_func_output_type =
mlir::cast<mlir::TensorType>(result_and_index.value().getType());
if (IsSplitSharding(output_sharding)) {
mlir::Type tiled_logical_computation_type;
if (cluster_func_output_type.hasRank()) {
auto result = ValidateAndGetTiledExecuteOutputShape(
cluster_func.getLoc(), cluster_func_output_type, output_sharding,
use_xla_nd_ops, &tiled_logical_computation_type);
if (mlir::failed(result)) return mlir::failure();
} else {
tiled_logical_computation_type = cluster_func_output_type;
}
cluster_to_core_index->emplace_back(core_index++);
output_types->emplace_back(tiled_logical_computation_type);
} else if (IsReplicatedSharding(output_sharding) ||
IsAssignedToLogicalDevice(core_id, output_sharding)) {
cluster_to_core_index->emplace_back(core_index++);
output_types->emplace_back(cluster_func_output_type);
} else {
cluster_to_core_index->emplace_back(-1);
}
}
return mlir::success();
}
mlir::LogicalResult RemapOutputsFromLogicalDevices(
const mlir::Location& location,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int num_results_pre_cluster,
mlir::tf_device::ParallelExecuteOp old_parallel_execute, int cluster_idx,
mlir::tf_device::ParallelExecuteOp new_parallel_execute,
mlir::OpBuilder* builder) {
return RemapOutputsFromLogicalDevices(
location, output_sharding_config, cluster_to_core_index,
num_results_pre_cluster, old_parallel_execute, cluster_idx,
new_parallel_execute, false, builder);
}
mlir::LogicalResult RemapOutputsFromLogicalDevices(
const mlir::Location& location,
llvm::ArrayRef<xla::OpSharding> output_sharding_config,
llvm::SmallVector<llvm::SmallVector<int, 4>, 4> cluster_to_core_index,
int num_results_pre_cluster,
mlir::tf_device::ParallelExecuteOp old_parallel_execute, int cluster_idx,
mlir::tf_device::ParallelExecuteOp new_parallel_execute,
bool use_xla_nd_ops, mlir::OpBuilder* builder) {
for (auto [output_index, old_parallel_execute_output] :
llvm::enumerate(old_parallel_execute.getResults())) {
if (output_index < num_results_pre_cluster) {
for (auto& use : llvm::make_early_inc_range(
old_parallel_execute->getResult(output_index).getUses())) {
use.set(new_parallel_execute->getResult(output_index));
}
continue;
}
int tpu_cluster_output_index = output_index - num_results_pre_cluster;
const auto& output_sharding =
output_sharding_config[tpu_cluster_output_index];
const auto output_sharding_type = output_sharding.type();
mlir::TF::TPUPartitionedOutputV2Op partitioned_output;
for (auto user : old_parallel_execute_output.getUsers()) {
if (auto partitioned_output_user =
llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedOutputV2Op>(
user)) {
partitioned_output = partitioned_output_user;
break;
}
}
if (partitioned_output) {
if (!old_parallel_execute_output.hasOneUse())
return partitioned_output.emitOpError()
<< "must be a unique user of TPU Cluster "
"(tf_device.old_parallel_execute) output "
<< *old_parallel_execute_output.getOwner();
if (UnsupportedPartitionedShardingType(output_sharding_type))
return old_parallel_execute.emitOpError()
<< "unsupported output sharding type "
<< OpSharding_Type_Name(output_sharding_type) << " for "
<< output_index << "-th output";
if (output_sharding_type == xla::OpSharding::REPLICATED) {
for (const auto& index_and_output :
llvm::enumerate(partitioned_output.getOutput())) {
auto idx = (cluster_idx + index_and_output.index()) %
new_parallel_execute->getNumRegions();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(
idx)[tpu_cluster_output_index];
index_and_output.value().replaceAllUsesWith(
output_from_logical_device);
}
} else {
assert(output_sharding_type == xla::OpSharding::OTHER);
llvm::SmallVector<mlir::Value, 4> tile_sharded_outputs;
if (failed(GetTileShardedOutputsToMerge(
location, tpu_cluster_output_index, output_sharding_config,
cluster_to_core_index, cluster_idx, new_parallel_execute,
&tile_sharded_outputs)))
return mlir::failure();
for (auto result :
llvm::zip(partitioned_output.getOutput(), tile_sharded_outputs))
std::get<0>(result).replaceAllUsesWith(std::get<1>(result));
}
continue;
}
if (IsSplitSharding(output_sharding)) {
if (use_xla_nd_ops) {
auto result = HandleTileShardedOutputsUsingXlaConcatOps(
tpu_cluster_output_index, output_sharding_config,
cluster_to_core_index, location, old_parallel_execute_output,
cluster_idx, new_parallel_execute, *builder);
if (mlir::failed(result)) return mlir::failure();
} else {
auto result = HandleTileShardedOutputsUsingTfConcatOps(
tpu_cluster_output_index, output_sharding_config,
cluster_to_core_index, location, old_parallel_execute_output,
cluster_idx, new_parallel_execute, builder);
if (failed(result)) return mlir::failure();
}
continue;
}
int logical_device_id = 0;
if (output_sharding_type == xla::OpSharding::MAXIMAL)
logical_device_id = output_sharding.tile_assignment_devices(0);
int region_output_index;
if (failed(LookupClusterToCoreIndex(
location, cluster_to_core_index, logical_device_id,
tpu_cluster_output_index, ®ion_output_index)))
return mlir::failure();
const auto output_from_logical_device =
new_parallel_execute.GetRegionOutputs(
cluster_idx + logical_device_id)[region_output_index];
old_parallel_execute_output.replaceAllUsesWith(output_from_logical_device);
}
return mlir::success();
}
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> GetMetadataArgumentMapping(
const tpu::TPUCompileMetadataProto& metadata) {
llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> input_mappings(
metadata.num_cores_per_replica(), llvm::SmallVector<int64_t, 4>());
if (metadata.num_cores_per_replica() == 1) {
input_mappings.front().resize(metadata.args_size());
std::iota(input_mappings.front().begin(), input_mappings.front().end(), 0);
return input_mappings;
}
for (const auto& arg_and_idx : llvm::enumerate(metadata.args())) {
const auto& sharding = arg_and_idx.value().sharding();
const int64_t idx = arg_and_idx.index();
const auto sharding_type = sharding.type();
if (sharding_type == xla::OpSharding::OTHER) {
for (const auto& device : sharding.tile_assignment_devices()) {
CHECK(device >= 0 && device < input_mappings.size());
input_mappings[device].push_back(idx);
}
} else if (sharding_type == xla::OpSharding::REPLICATED) {
for (auto& input : input_mappings) input.push_back(idx);
} else {
assert(sharding_type == xla::OpSharding::MAXIMAL);
CHECK(sharding.tile_assignment_devices(0) >= 0 &&
sharding.tile_assignment_devices(0) < input_mappings.size());
input_mappings[sharding.tile_assignment_devices(0)].push_back(idx);
}
}
return input_mappings;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tsl/platform/statusor.h"
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
TEST(XLAShardingUtilTest, TestShapesCheckForSplitSharding) {
static const char* const module_str =
R"(
module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:1/device:CPU:0", "/job:localhost/replica:0/task:1/device:TPU:0", "/job:localhost/replica:0/task:1/device:TPU:1", "/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0"]} {
func.func @parallel_execute_with_tiled_input(%arg0: tensor<128x9xf32>, %arg1: tensor<128x9xf32>, %arg2: tensor<128x10xi32>, %arg3: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%0:2, %1:2 = tf_device.replicate([%arg0, %arg1] as %ri_1: tensor<128x9xf32>, [%arg2, %arg3] as %ri_2: tensor<128x10xi32>) {n = 2 : i32} {
%1 = "tf_device.launch"() <{device = "TPU_REPLICATED_HOST_0"}> ({
%identity = "tf.Identity"(%ri_1) {ici_weight_distribution_mlir_bridge_marker = true} : (tensor<128x9xf32>) -> tensor<128x9xf32>
tf_device.return %identity : tensor<128x9xf32>
}) {ici_weight_distribution_mlir_bridge_marker = true} : () -> tensor<128x9xf32>
%2, %3 = "tf_device.cluster_func"(%1, %ri_2) {_xla_compile_device_type = "TPU", _replication_info = "cluster0", func = @tpu0_func, num_cores_per_replica = 2, step_marker_location = "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP", topology = "\0A\04\01\02\01\02\10\02\18\02\22\10\00\00\00\00\00\00\00\01\00\01\00\00\00\01\00\01", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], input_sharding_configuration = ["\08\03\1A\02\01\02\22\02\00\01", "\08\01\1A\01\01\22\01\01"], output_sharding_configuration = ["\08\01\1A\01\01\22\01\00", ""], use_spmd_for_xla_partitioning = false} : (tensor<128x9xf32>, tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
tf_device.return %2, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.return %0#0, %1#0 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.func @tpu0_func(%arg0: tensor<128x9xf32>, %arg1: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%1, %2 = "tf.A"(%arg0) : (tensor<128x9xf32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
%4 = "tf.B"(%1, %arg1) : (tensor<128x10xi32>, tensor<128x10xi32>) -> (tensor<128x10xi32>)
%3 = "tf.XlaSharding"(%2) { _XlaSharding = "", sharding = "" } : (tensor<10x5xi1>) -> tensor<10x5xi1>
func.return %4, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
auto cluster_func_op = cluster_func_ops[0];
llvm::SmallVector<xla::OpSharding, 4> output_shardings;
auto result = tensorflow::ParseAndValidateOutputSharding(2, cluster_func_op,
&output_shardings);
ASSERT_TRUE(succeeded(result));
ASSERT_TRUE(tensorflow::AreInputOutputShapesStaticallyKnownForSplitSharding(
output_shardings, cluster_func_op));
}
TEST(XLAShardingUtilTest, TestShapesCheckForSplitShardingWithUnknownShapes) {
static const char* const module_str =
R"(
module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:1/device:CPU:0", "/job:localhost/replica:0/task:1/device:TPU:0", "/job:localhost/replica:0/task:1/device:TPU:1", "/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0"]} {
func.func @parallel_execute_with_tiled_input(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>, %arg2: tensor<128x10xi32>, %arg3: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%0:2, %1:2 = tf_device.replicate([%arg0, %arg1] as %ri_1: tensor<*xf32>, [%arg2, %arg3] as %ri_2: tensor<128x10xi32>) {n = 2 : i32} {
%1 = "tf_device.launch"() <{device = "TPU_REPLICATED_HOST_0"}> ({
%identity = "tf.Identity"(%ri_1) {ici_weight_distribution_mlir_bridge_marker = true} : (tensor<*xf32>) -> tensor<*xf32>
tf_device.return %identity : tensor<*xf32>
}) {ici_weight_distribution_mlir_bridge_marker = true} : () -> tensor<*xf32>
%2, %3 = "tf_device.cluster_func"(%1, %ri_2) {_xla_compile_device_type = "TPU", _replication_info = "cluster0", func = @tpu0_func, num_cores_per_replica = 2, step_marker_location = "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP", topology = "\0A\04\01\02\01\02\10\02\18\02\22\10\00\00\00\00\00\00\00\01\00\01\00\00\00\01\00\01", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], input_sharding_configuration = ["\08\03\1A\02\01\02\22\02\00\01", "\08\01\1A\01\01\22\01\01"], output_sharding_configuration = ["\08\01\1A\01\01\22\01\00", ""], use_spmd_for_xla_partitioning = false} : (tensor<*xf32>, tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
tf_device.return %2, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.return %0#0, %1#0 : tensor<128x10xi32>, tensor<10x5xi1>
}
func.func @tpu0_func(%arg0: tensor<*xf32>, %arg1: tensor<128x10xi32>) -> (tensor<128x10xi32>, tensor<10x5xi1>) {
%1, %2 = "tf.A"(%arg0) : (tensor<*xf32>) -> (tensor<128x10xi32>, tensor<10x5xi1>)
%4 = "tf.B"(%1, %arg1) : (tensor<128x10xi32>, tensor<128x10xi32>) -> (tensor<128x10xi32>)
%3 = "tf.XlaSharding"(%2) { _XlaSharding = "", sharding = "" } : (tensor<10x5xi1>) -> tensor<10x5xi1>
func.return %4, %3 : tensor<128x10xi32>, tensor<10x5xi1>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
auto cluster_func_op = cluster_func_ops[0];
llvm::SmallVector<xla::OpSharding, 4> output_shardings;
auto result = tensorflow::ParseAndValidateOutputSharding(2, cluster_func_op,
&output_shardings);
ASSERT_TRUE(succeeded(result));
ASSERT_FALSE(tensorflow::AreInputOutputShapesStaticallyKnownForSplitSharding(
output_shardings, cluster_func_op));
}
TEST(XLAShardingUtilTest, NotDivisibleShardingSplitOpTest) {
static const char* const module_str =
R"(
module attributes {tf.versions = {producer = 888 : i32}, tf.devices = ["/job:localhost/replica:0/task:0/device:CPU:0", "/job:localhost/replica:0/task:0/device:TPU:0", "/job:localhost/replica:0/task:0/device:TPU:1", "/job:localhost/replica:0/task:0/device:TPU_SYSTEM:0", "/job:localhost/replica:0/task:1/device:CPU:0", "/job:localhost/replica:0/task:1/device:TPU:0", "/job:localhost/replica:0/task:1/device:TPU:1", "/job:localhost/replica:0/task:1/device:TPU_SYSTEM:0"]} {
func.func @uneven_input_sharding_disallowed(%arg0: tensor<128x10xf32>, %arg1: tensor<128x10xf32>, %arg2: tensor<*xi32>, %arg3: tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>) {
%0:2, %1:2 = tf_device.replicate([%arg0, %arg1] as %ri_1: tensor<128x10xf32>, [%arg2, %arg3] as %ri_2: tensor<*xi32>) {n = 2 : i32} {
%1, %2 = "tf_device.cluster_func"(%ri_1, %ri_2) {_xla_compile_device_type = "TPU", _replication_info = "cluster0", func = @tpu0_func, num_cores_per_replica = 2, step_marker_location = "STEP_MARK_AT_TOP_LEVEL_WHILE_LOOP", topology = "\0A\04\01\02\01\02\10\02\18\02\22\10\00\00\00\00\00\00\00\01\00\01\00\00\00\01\00\01", device_assignment = [0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0], input_sharding_configuration = ["\08\03\12\12\10\0b\1a\02\01\04\2a\06\0a\02\01\00\20\01\32\02\00\00\1a\02\01\04\22\04\00\01\02\03", "\08\01\1A\01\01\22\01\01"], output_sharding_configuration = ["\08\01\1A\01\01\22\01\00", ""], use_spmd_for_xla_partitioning = false} : (tensor<128x10xf32>, tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>)
tf_device.return %1, %2 : tensor<*xi32>, tensor<*xi1>
}
func.return %0#0, %1#0 : tensor<*xi32>, tensor<*xi1>
}
func.func @tpu0_func(%arg0: tensor<128x10xf32>, %arg1: tensor<*xi32>) -> (tensor<*xi32>, tensor<*xi1>) {
%1, %2 = "tf.A"(%arg0) : (tensor<128x10xf32>) -> (tensor<*xi32>, tensor<*xi1>)
%4 = "tf.B"(%1, %arg1) : (tensor<*xi32>, tensor<*xi32>) -> (tensor<*xi32>)
%3 = "tf.XlaSharding"(%2) { _XlaSharding = "", sharding = "" } : (tensor<*xi1>) -> tensor<*xi1>
func.return %4, %3 : tensor<*xi32>, tensor<*xi1>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
auto& cluster_func_op = cluster_func_ops[0];
int num_cores_per_replica = 4;
mlir::OpBuilder builder(&context);
bool use_xla_nd_ops = true;
llvm::SmallVector<llvm::SmallVector<mlir::Value, 4>, 4> input_list;
auto result = tensorflow::ExtractInputsForLogicalDevices(
num_cores_per_replica, cluster_func_op, &builder, use_xla_nd_ops,
&input_list);
ASSERT_TRUE(succeeded(result));
ASSERT_EQ(input_list.size(), num_cores_per_replica);
ASSERT_GT(input_list.front().size(), 0);
auto* op = input_list.front().front().getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::XlaSplitNDOp>(op));
op->destroy();
input_list.clear();
result = tensorflow::ExtractInputsForLogicalDevices(
num_cores_per_replica, cluster_func_op, &builder, false, &input_list);
ASSERT_TRUE(succeeded(result));
auto* split_op = input_list.front().front().getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::SplitOp>(split_op));
llvm::SmallVector<mlir::Value, 4> split_inputs(split_op->getOperands());
auto* const_op = split_inputs[0].getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::ConstOp>(const_op));
auto* pad_op = split_inputs[1].getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::PadOp>(pad_op));
llvm::SmallVector<mlir::Value, 4> pad_inputs(pad_op->getOperands());
auto* const_pad_value = pad_inputs[1].getDefiningOp();
ASSERT_TRUE(mlir::isa<mlir::TF::ConstOp>(const_pad_value));
split_op->destroy();
const_op->destroy();
pad_op->destroy();
const_pad_value->destroy();
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd5b418b-318e-4f6d-837b-518a563c09b1 | cpp | tensorflow/tensorflow | xla_rewrite_util | tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.cc | tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.h"
namespace tensorflow {
mlir::LogicalResult EraseClusterFuncs(
llvm::MutableArrayRef<mlir::tf_device::ClusterFuncOp> to_be_erased) {
for (auto cluster : to_be_erased) {
auto old_parallel_execute =
cluster->getParentOfType<mlir::tf_device::ParallelExecuteOp>();
if (!old_parallel_execute) {
LOG(ERROR) << "Parent op of cluster " << cluster.getOperationName().str()
<< " is not ParallelExecuteOp.";
return mlir::failure();
}
for (auto result : old_parallel_execute.getExecuteOutputs()) {
for (mlir::Operation* user :
llvm::make_early_inc_range(result.getUsers())) {
if (llvm::isa<mlir::TF::TPUPartitionedOutputV2Op>(user)) {
assert(user->use_empty());
user->erase();
}
}
}
for (auto operand : cluster.getOperands()) {
mlir::Operation* def = operand.getDefiningOp();
if (operand.hasOneUse() &&
llvm::isa_and_nonnull<mlir::TF::TPUPartitionedInputV2Op>(def)) {
operand.dropAllUses();
def->erase();
}
}
if (!old_parallel_execute->use_empty()) {
LOG(ERROR) << "Use of parallel execute op "
<< old_parallel_execute.getOperationName().str()
<< " is not empty.";
return mlir::failure();
}
old_parallel_execute->erase();
}
return mlir::success();
}
int MovePreservedParallelExecuteChildren(
int num_cores_per_replica,
llvm::SmallVector<mlir::Type, 8>& concatenated_output_types,
mlir::OpBuilder* builder, mlir::tf_device::ClusterFuncOp cluster_func,
mlir::tf_device::ParallelExecuteOp old_parallel_execute,
mlir::tf_device::ParallelExecuteOp* new_parallel_execute) {
const size_t num_moved_children =
old_parallel_execute.getRegions().size() - 1;
*new_parallel_execute = builder->create<mlir::tf_device::ParallelExecuteOp>(
old_parallel_execute->getLoc(),
num_moved_children + num_cores_per_replica, concatenated_output_types);
int cluster_idx = -1;
for (size_t child_idx = 0;
child_idx < old_parallel_execute.getRegions().size(); ++child_idx) {
auto& block = old_parallel_execute.GetRegionBlockWithIndex(child_idx);
if (cluster_func->getBlock() == &block) {
assert(cluster_idx == -1);
cluster_idx = child_idx;
}
}
assert(cluster_idx != -1);
for (int child_idx = 0; child_idx < num_moved_children; ++child_idx) {
int old_idx = child_idx >= cluster_idx ? child_idx + 1 : child_idx;
int new_idx = child_idx >= cluster_idx ? child_idx + num_cores_per_replica
: child_idx;
new_parallel_execute->getRegions()[new_idx].takeBody(
old_parallel_execute.getRegions()[old_idx]);
}
return cluster_idx;
}
mlir::tf_device::LaunchOp WrapOpInLaunch(mlir::OpBuilder* builder,
mlir::Location loc,
mlir::Operation* op,
llvm::StringRef device) {
mlir::OpBuilder::InsertPoint insert_point = builder->saveInsertionPoint();
auto launch = builder->create<mlir::tf_device::LaunchOp>(
loc, builder->getStringAttr(device), op->getResultTypes());
launch.getBody().push_back(new mlir::Block);
builder->setInsertionPointToEnd(&launch.GetBody());
builder->create<mlir::tf_device::ReturnOp>(loc, op->getResults());
op->moveBefore(launch.GetBody().getTerminator());
builder->restoreInsertionPoint(insert_point);
return launch;
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.h"
#include <string>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
namespace {
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> GetMlirModuleFromString(
llvm::StringRef string, mlir::MLIRContext* context) {
mlir::DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
TEST(XlaRewriteUtilTest, TestEraseClusterFuncs) {
static const char* const module_str =
R"(
module attributes {tf.devices = ["/job:worker/replica:0/task:0/device:CPU:0", "/job:worker/replica:0/task:0/device:GPU:0"]} {
func.func @convert_cluster_func(%arg0: tensor<i32>) -> () {
%2 = "tf_device.parallel_execute"() ({
%3 = "tf_device.cluster_func"(%arg0) {device = "/job:localhost/replica:0/task:0/device:GPU:0", func = @func} : (tensor<i32>) -> tensor<i32>
tf_device.return %3 : tensor<i32>
}) : () -> tensor<i32>
return
}
func.func @func(%arg0: tensor<i32>) -> tensor<i32> {
return %arg0 : tensor<i32>
}
}
)";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(cluster_func_ops.size(), 1);
EXPECT_TRUE(mlir::succeeded(tensorflow::EraseClusterFuncs(cluster_func_ops)));
llvm::SmallVector<mlir::tf_device::ClusterFuncOp, 4> new_cluster_func_ops;
module->walk([&](mlir::tf_device::ClusterFuncOp cluster_func) {
new_cluster_func_ops.push_back(cluster_func);
});
EXPECT_EQ(new_cluster_func_ops.size(), 0);
}
TEST(XlaRewriteUtilTest, TestWrapOpInLaunch) {
static const char* const module_str =
R"(
module attributes {tf.devices = {"/job:localhost/replica:0/task:0/device:CPU:0"}} {
func.func @main() -> () {
"tf_device.cluster"() ({
tf_device.return
}) {} : () -> ()
func.return
}
})";
mlir::MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(mlir::OwningOpRef<mlir::ModuleOp> module,
GetMlirModuleFromString(module_str, &context));
mlir::tf_device::ClusterOp cluster;
std::string device = "/job:localhost/replica:0/task:0/device:CPU:0";
module->walk(
[&](mlir::tf_device::ClusterOp descendant) { cluster = descendant; });
mlir::OpBuilder builder(&context);
auto loc = cluster->getLoc();
auto launch_op = tensorflow::WrapOpInLaunch(&builder, loc, cluster, device);
EXPECT_TRUE(llvm::isa<mlir::tf_device::LaunchOp>(launch_op));
launch_op->erase();
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/xla_rewrite_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6c806b30-785a-4e6a-bc6a-047845ca3876 | cpp | tensorflow/tensorflow | device_util | tensorflow/compiler/jit/device_util.cc | tensorflow/compiler/jit/device_util_test.cc | #include "tensorflow/compiler/jit/device_util.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "xla/status_macros.h"
namespace tensorflow {
namespace jit {
void DeviceSet::Insert(DeviceId device_id) {
int word_index = device_id.id() / kWordSize;
int bit_index = device_id.id() % kWordSize;
const int storage_size = storage_.size();
if (word_index >= storage_size) {
storage_.resize(word_index + 1, 0);
}
storage_[word_index] |= (1ull << bit_index);
}
void DeviceSet::UnionWith(const DeviceSet& other) {
if (other.storage_.size() > storage_.size()) {
storage_.resize(other.storage_.size(), 0);
}
for (int i = 0, end = other.storage_.size(); i < end; i++) {
storage_[i] |= other.storage_[i];
}
}
bool DeviceSet::IsEmpty() const {
return absl::c_all_of(storage_, [&](uint64 val) { return val == 0; });
}
absl::StatusOr<DeviceId> DeviceInfoCache::GetIdFor(absl::string_view name) {
TF_RET_CHECK(!name.empty());
auto it = name_to_id_.find(name);
if (it != name_to_id_.end()) {
return it->second;
}
int new_id = names_.size();
names_.push_back(string(name));
id_to_device_type_.push_back(std::make_unique<DeviceType>(""));
DeviceType* device_type = id_to_device_type_.back().get();
TF_RETURN_IF_ERROR(DeviceNameToDeviceType(names_.back(), device_type));
is_cpu_.push_back(device_type->type_string() == DEVICE_CPU);
is_gpu_.push_back(device_type->type_string() == DEVICE_GPU);
name_to_id_.emplace(string(name), DeviceId(new_id));
const XlaOpRegistry::DeviceRegistration* compilation_device;
if (!XlaOpRegistry::GetCompilationDevice(device_type->type(),
&compilation_device)) {
compilation_device = nullptr;
}
id_to_compilation_device_.push_back(compilation_device);
return DeviceId(new_id);
}
string DeviceInfoCache::DebugString(const DeviceSet& device_set) const {
std::vector<string> names;
device_set.ForEach([&](DeviceId device_id) {
names.push_back(string(GetNameFor(device_id)));
return true;
});
return absl::StrCat("[", absl::StrJoin(names, ","), "]");
}
}
Status DeviceNameToDeviceType(const string& device, DeviceType* device_type) {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(device, &parsed)) {
return errors::Internal("Malformed assigned device '", device, "'");
}
*device_type = DeviceType(parsed.type);
return absl::OkStatus();
}
absl::StatusOr<std::optional<jit::DeviceId>> PickDeviceForXlaImpl(
const jit::DeviceInfoCache& device_info_cache,
const jit::DeviceSet& devices, bool allow_mixing_unknown_and_cpu,
bool failure_to_pick_is_error) {
#define FAILED_TO_PICK_DEVICE(failing_status) \
do { \
if (failure_to_pick_is_error) { \
return failing_status; \
} else { \
return {std::nullopt}; \
} \
} while (false)
std::optional<jit::DeviceId> maybe_gpu_device;
std::optional<jit::DeviceId> maybe_cpu_device;
std::optional<jit::DeviceId> maybe_unknown_device;
bool multiple_cpu_devices = false;
bool multiple_gpu_devices = false;
bool multiple_unknown_devices = false;
const auto is_multiple_devices =
[&](const jit::DeviceId& d0, std::optional<jit::DeviceId>* d1) -> bool {
const absl::string_view name0 = device_info_cache.GetNameFor(d0);
const absl::string_view name1 = device_info_cache.GetNameFor(d1->value());
DeviceNameUtils::ParsedName parsed0, parsed1;
if (!DeviceNameUtils::ParseFullName(name0, &parsed0) ||
!DeviceNameUtils::ParseFullName(name1, &parsed1) ||
!DeviceNameUtils::AreCompatibleDevNames(parsed0, parsed1)) {
return true;
}
if (DeviceNameUtils::IsSpecification(parsed0, parsed1)) {
return false;
}
if (DeviceNameUtils::IsSpecification(parsed1, parsed0)) {
*d1 = d0;
return false;
}
return true;
};
devices.ForEach([&](jit::DeviceId device) {
if (device_info_cache.IsGpu(device)) {
if (maybe_gpu_device) {
multiple_gpu_devices = is_multiple_devices(device, &maybe_gpu_device);
if (multiple_gpu_devices) return false;
} else {
maybe_gpu_device = device;
}
} else if (device_info_cache.IsCpu(device)) {
if (maybe_cpu_device) {
multiple_cpu_devices = is_multiple_devices(device, &maybe_cpu_device);
if (multiple_cpu_devices) return false;
} else {
maybe_cpu_device = device;
}
} else {
if (maybe_unknown_device) {
multiple_unknown_devices = true;
return false;
}
maybe_unknown_device = device;
}
return true;
});
if (multiple_cpu_devices) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Multiple CPU devices ", device_info_cache.DebugString(devices)));
}
if (multiple_gpu_devices) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Multiple GPU devices ", device_info_cache.DebugString(devices)));
}
if (multiple_unknown_devices) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Multiple unknown devices ", device_info_cache.DebugString(devices)));
}
if (maybe_unknown_device && maybe_gpu_device) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Found both unknown and GPU devices: ",
device_info_cache.GetNameFor(*maybe_unknown_device), ", ",
device_info_cache.GetNameFor(*maybe_gpu_device)));
}
if (!allow_mixing_unknown_and_cpu) {
if (maybe_unknown_device && maybe_cpu_device) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Found both unknown and CPU devices: ",
device_info_cache.GetNameFor(*maybe_unknown_device), ", ",
device_info_cache.GetNameFor(*maybe_cpu_device)));
}
}
if (maybe_gpu_device) {
return {*maybe_gpu_device};
} else if (maybe_unknown_device) {
return {*maybe_unknown_device};
} else if (maybe_cpu_device) {
return {*maybe_cpu_device};
}
FAILED_TO_PICK_DEVICE(errors::Internal("Empty device set!"));
#undef FAILED_TO_PICK_DEVICE
}
absl::StatusOr<jit::DeviceId> PickDeviceForXla(
const jit::DeviceInfoCache& device_info_cache,
const jit::DeviceSet& devices, bool allow_mixing_unknown_and_cpu) {
TF_ASSIGN_OR_RETURN(std::optional<jit::DeviceId> device_id,
PickDeviceForXlaImpl(device_info_cache, devices,
allow_mixing_unknown_and_cpu,
true));
return *device_id;
}
absl::StatusOr<std::optional<jit::DeviceId>> MaybePickDeviceForXla(
const jit::DeviceInfoCache& device_info_cache,
const jit::DeviceSet& devices, bool allow_mixing_unknown_and_cpu) {
return PickDeviceForXlaImpl(device_info_cache, devices,
allow_mixing_unknown_and_cpu,
false);
}
} | #include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status PickDeviceHelper(bool allow_mixing_unknown_and_cpu,
absl::Span<const absl::string_view> device_names,
string* result) {
jit::DeviceInfoCache cache;
jit::DeviceSet device_set;
for (absl::string_view name : device_names) {
TF_ASSIGN_OR_RETURN(jit::DeviceId device_id, cache.GetIdFor(name));
device_set.Insert(device_id);
}
TF_ASSIGN_OR_RETURN(
jit::DeviceId result_id,
PickDeviceForXla(cache, device_set, allow_mixing_unknown_and_cpu));
*result = string(cache.GetNameFor(result_id));
return absl::OkStatus();
}
void CheckPickDeviceResult(absl::string_view expected_result,
bool allow_mixing_unknown_and_cpu,
absl::Span<const absl::string_view> inputs) {
string result;
TF_ASSERT_OK(PickDeviceHelper(allow_mixing_unknown_and_cpu, inputs, &result))
<< "inputs = [" << absl::StrJoin(inputs, ", ")
<< "], allow_mixing_unknown_and_cpu=" << allow_mixing_unknown_and_cpu
<< ", expected_result=" << expected_result;
EXPECT_EQ(result, expected_result);
}
void CheckPickDeviceHasError(bool allow_mixing_unknown_and_cpu,
absl::Span<const absl::string_view> inputs) {
string result;
EXPECT_FALSE(
PickDeviceHelper(allow_mixing_unknown_and_cpu, inputs, &result).ok());
}
const char* kCPU0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const char* kGPU0 = "/job:localhost/replica:0/task:0/device:GPU:0";
const char* kXPU0 = "/job:localhost/replica:0/task:0/device:XPU:0";
const char* kYPU0 = "/job:localhost/replica:0/task:0/device:YPU:0";
const char* kCPU1 = "/job:localhost/replica:0/task:0/device:CPU:1";
const char* kGPU1 = "/job:localhost/replica:0/task:0/device:GPU:1";
const char* kXPU1 = "/job:localhost/replica:0/task:0/device:XPU:1";
const char* kCPU0Partial = "/device:CPU:0";
const char* kGPU0Partial = "/device:GPU:0";
const char* kXPU0Partial = "/device:XPU:0";
TEST(PickDeviceForXla, UniqueDevice) {
CheckPickDeviceResult(kGPU0, false, {kGPU0, kGPU0});
}
TEST(PickDeviceForXla, MoreSpecificDevice) {
CheckPickDeviceResult(kCPU0, false, {kCPU0, kCPU0Partial});
CheckPickDeviceResult(kGPU0, false, {kGPU0, kGPU0Partial});
CheckPickDeviceHasError(false, {kXPU1, kXPU0Partial});
}
TEST(PickDeviceForXla, DeviceOrder) {
CheckPickDeviceResult(kGPU0, false, {kGPU0, kCPU0});
CheckPickDeviceResult(kGPU0, false, {kCPU0, kGPU0});
CheckPickDeviceResult(kXPU0, true, {kXPU0, kCPU0});
}
TEST(PickDeviceForXla, MultipleUnknownDevices) {
CheckPickDeviceHasError(false, {kXPU0, kYPU0});
}
TEST(PickDeviceForXla, GpuAndUnknown) {
CheckPickDeviceHasError(false, {kGPU0, kXPU1});
}
TEST(PickDeviceForXla, UnknownAndCpu) {
CheckPickDeviceHasError(false, {kXPU0, kCPU1});
}
TEST(PickDeviceForXla, MultipleDevicesOfSameType) {
CheckPickDeviceHasError(true, {kCPU0, kCPU1});
CheckPickDeviceHasError(false, {kCPU0, kCPU1});
CheckPickDeviceHasError(false, {kGPU0, kGPU1});
CheckPickDeviceHasError(false, {kXPU0, kXPU1});
CheckPickDeviceHasError(false, {kCPU0, kCPU1, kGPU0});
}
void SimpleRoundTripTestForDeviceSet(int num_devices) {
jit::DeviceSet device_set;
jit::DeviceInfoCache device_info_cache;
std::vector<string> expected_devices, actual_devices;
for (int i = 0; i < num_devices; i++) {
string device_name =
absl::StrCat("/job:localhost/replica:0/task:0/device:XPU:", i);
TF_ASSERT_OK_AND_ASSIGN(jit::DeviceId device_id,
device_info_cache.GetIdFor(device_name));
device_set.Insert(device_id);
expected_devices.push_back(device_name);
}
device_set.ForEach([&](jit::DeviceId device_id) {
actual_devices.push_back(string(device_info_cache.GetNameFor(device_id)));
return true;
});
EXPECT_EQ(expected_devices, actual_devices);
}
TEST(DeviceSetTest, SimpleRoundTrip_One) { SimpleRoundTripTestForDeviceSet(1); }
TEST(DeviceSetTest, SimpleRoundTrip_Small) {
SimpleRoundTripTestForDeviceSet(8);
}
TEST(DeviceSetTest, SimpleRoundTrip_Large) {
SimpleRoundTripTestForDeviceSet(800);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f59b481f-e9c6-46ac-944c-564850fc6d2d | cpp | tensorflow/tensorflow | dump_mlir_util | tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc | tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h"
#include "mlir/Pass/Pass.h"
#include "xla/tsl/lib/io/buffered_file.h"
#include "tensorflow/core/platform/crash_analysis.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/path.h"
using llvm::raw_ostream;
namespace tensorflow {
namespace {
struct NameCounts {
mutex counts_mutex;
llvm::StringMap<int64_t> counts;
};
std::string MakeUniqueFilename(string name) {
static NameCounts& instance = *new NameCounts;
for (int i = 0, e = name.size(); i < e; ++i) {
char ch = name[i];
if (ch == '/' || ch == '[' || ch == ']' || ch == '*' || ch == '?' ||
ch == '\\') {
name[i] = '_';
}
}
int count;
{
mutex_lock lock(instance.counts_mutex);
count = instance.counts[name]++;
}
std::string filename = name;
if (count > 0) {
filename = llvm::formatv("{0}_{1}", filename, count).str();
}
filename = llvm::Twine(filename).concat(".mlir").str();
return filename;
}
struct LogInfoRawStream : public llvm::raw_ostream {
LogInfoRawStream() { SetUnbuffered(); }
~LogInfoRawStream() override = default;
uint64_t current_pos() const override { return 0; }
void write_impl(const char* ptr, size_t size) override {
fprintf(stderr, "%.*s", static_cast<int>(size), ptr);
}
};
struct WritableFileRawStream : public llvm::raw_ostream {
explicit WritableFileRawStream(std::unique_ptr<WritableFile> file)
: file(std::move(file)) {
SetUnbuffered();
}
~WritableFileRawStream() override = default;
uint64_t current_pos() const override {
int64_t position;
if (file->Tell(&position).ok()) {
return position;
} else {
LOG(WARNING)
<< "Couldn't query file position. Stream might be malformed.\n";
return -1;
}
}
void write_impl(const char* ptr, size_t size) override {
if (file && !file->Append(StringPiece(ptr, size)).ok()) {
file = nullptr;
}
}
std::unique_ptr<WritableFile> file;
};
struct CrashReproducerStream : public mlir::ReproducerStream {
CrashReproducerStream(llvm::StringRef name,
std::unique_ptr<llvm::raw_ostream> file)
: name(name), ostream(std::move(file)) {}
llvm::StringRef description() override { return name; }
raw_ostream& os() override { return *ostream; }
private:
std::string name;
std::unique_ptr<llvm::raw_ostream> ostream;
};
struct CrashAnalysisCrashReproducerStream : public mlir::ReproducerStream {
public:
CrashAnalysisCrashReproducerStream()
: internal_str(""), string_stream(internal_str) {}
~CrashAnalysisCrashReproducerStream() override {
crash_analysis::ReportEvent(
"mlir_crash_reproducer.mlir",
"Pass pipeline failure; crash reproducer attached",
string_stream.str());
}
llvm::StringRef description() override { return "mlir_crash_reproducer"; }
raw_ostream& os() override { return string_stream; }
private:
std::string internal_str;
llvm::raw_string_ostream string_stream;
};
}
Status CreateFileForDumping(llvm::StringRef name,
std::unique_ptr<raw_ostream>* os,
std::string* filepath, llvm::StringRef dirname) {
std::string dir;
if (!dirname.empty())
dir = std::string(dirname);
else
dir = GetDumpDirFromEnvVar();
if (dir.empty()) {
return Status(absl::StatusCode::kInvalidArgument,
"(TF_DUMP_GRAPH_PREFIX not specified)");
}
if (dir == kCrashReproducerStdErr) {
*os = std::make_unique<LogInfoRawStream>();
*filepath =
llvm::formatv("(stderr; requested filename: '{0}')", name).str();
return Status();
}
Env* env = Env::Default();
Status status = env->RecursivelyCreateDir(dir);
if (!status.ok()) {
LOG(WARNING) << "Failed to create '" << dir
<< "' directory for dumping: " << status;
return Status(absl::StatusCode::kUnavailable, "(unavailable)");
}
*filepath = io::JoinPath(dir, MakeUniqueFilename(std::string(name)));
std::unique_ptr<WritableFile> file;
status = env->NewWritableFile(*filepath, &file);
if (!status.ok()) {
LOG(WARNING) << "Failed to create file '" << filepath << "': " << status;
return Status(absl::StatusCode::kUnavailable, "(unavailable)");
}
file = std::make_unique<tsl::BufferedWritableFile>(std::move(file));
*os = std::make_unique<WritableFileRawStream>(std::move(file));
return Status();
}
void PrintPassPipeline(const mlir::PassManager& pass_manager,
mlir::Operation* op, llvm::raw_ostream& os) {
std::string str;
llvm::raw_string_ostream passOS(str);
llvm::interleaveComma(
pass_manager.getPasses(), passOS,
[&](mlir::Pass& pass) { pass.printAsTextualPipeline(passOS); });
os << "{-# external_resources: { mlir_reproducer: { pipeline: "
"\"builtin.module("
<< passOS.str() << ")\", ";
os << "disable_threading: true, ";
os << "verify_each: true } } #-}";
os << "\n\n";
}
std::string DumpMlirOpToFile(llvm::StringRef name, mlir::Operation* op,
llvm::StringRef dirname,
const mlir::PassManager* pass_manager) {
std::unique_ptr<raw_ostream> os;
std::string filepath;
Status result = CreateFileForDumping(name, &os, &filepath, dirname);
if (!result.ok()) return std::string(result.message());
LOG(INFO) << "Dumping MLIR operation '" << op->getName().getStringRef().str()
<< "' to '" << filepath << "'";
if (pass_manager) PrintPassPipeline(*pass_manager, op, *os);
op->print(*os, mlir::OpPrintingFlags().useLocalScope());
return filepath;
}
std::string GetDumpDirFromEnvVar() {
const char* prefix_env = getenv("TF_DUMP_GRAPH_PREFIX");
if (!prefix_env) {
LOG(WARNING)
<< "Failed to dump MLIR module because dump location is not "
<< "specified through TF_DUMP_GRAPH_PREFIX environment variable.";
return "";
}
std::string result = prefix_env;
if (absl::EqualsIgnoreCase(result, "sponge") &&
!io::GetTestUndeclaredOutputsDir(&result)) {
LOG(WARNING) << "TF_DUMP_GRAPH_PREFIX=sponge but "
"TEST_UNDECLARED_OUTPUT_DIRS is not set";
return "";
}
return result;
}
std::string DumpRawStringToFile(llvm::StringRef name, llvm::StringRef content,
llvm::StringRef dirname) {
std::unique_ptr<raw_ostream> os;
std::string filepath;
Status result = CreateFileForDumping(name, &os, &filepath, dirname);
if (!result.ok()) return std::string(result.message());
(*os) << content;
LOG(INFO) << "Outputted requested string to '" << filepath << "'";
return filepath;
}
void SetCrashReproducer(mlir::PassManager& pm, llvm::StringRef dir_path) {
std::string path = dir_path.str();
if (path.empty() || path == kCrashReproducerCrashAnalysis) {
if (getenv("MLIR_CRASH_REPRODUCER_DIRECTORY"))
path = getenv("MLIR_CRASH_REPRODUCER_DIRECTORY");
else if (getenv("TEST_UNDECLARED_OUTPUTS_DIR"))
path = "sponge";
}
if (path.empty()) {
LOG_FIRST_N(INFO, 1) << "disabling MLIR crash reproducer, set env var "
"`MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.";
return;
}
string lower_path = absl::AsciiStrToLower(path);
if (lower_path == "sponge") {
if (!tensorflow::io::GetTestUndeclaredOutputsDir(&path)) {
LOG(ERROR) << "MLIR crash reproducer is set to '" << dir_path.str()
<< "', but environment variable TEST_UNDECLARED_OUTPUTS_DIR "
"is not set, so cannot dump anywhere.";
return;
}
}
if (path != kCrashReproducerStdErr && path != kCrashReproducerCrashAnalysis) {
auto* env = tensorflow::Env::Default();
auto status = env->RecursivelyCreateDir(path);
if (!status.ok()) {
LOG(WARNING) << "cannot create directory '" << path
<< "': " << status.message();
return;
}
path += "/mlir_reproducer_";
if (!tensorflow::Env::Default()->CreateUniqueFileName(&path, ".mlir")) {
LOG(WARNING) << "cannot create unique filename, won't enable MLIR crash "
"reproducer.";
return;
}
}
mlir::ReproducerStreamFactory factory =
[path](std::string& error) -> std::unique_ptr<mlir::ReproducerStream> {
if (path == kCrashReproducerStdErr)
return std::make_unique<CrashReproducerStream>(
"(stderr)", std::make_unique<LogInfoRawStream>());
if (path == kCrashReproducerCrashAnalysis) {
return std::make_unique<CrashAnalysisCrashReproducerStream>();
}
std::unique_ptr<WritableFile> file;
Status status = tensorflow::Env::Default()->NewWritableFile(path, &file);
file = std::make_unique<tsl::BufferedWritableFile>(std::move(file));
if (!status.ok()) {
error = absl::StrCat("Failed to create file '", path,
"': ", status.message());
return nullptr;
}
return std::make_unique<CrashReproducerStream>(
path, std::make_unique<WritableFileRawStream>(std::move(file)));
};
pm.enableCrashReproducerGeneration(factory, false);
}
void applyTensorflowAndCLOptions(mlir::PassManager& pm,
llvm::StringRef dir_path) {
mlir::registerPassManagerCLOptions();
if (!mlir::succeeded(mlir::applyPassManagerCLOptions(pm))) {
LOG(ERROR) << "cannot apply MLIR pass manager CL options";
return;
}
SetCrashReproducer(pm, dir_path);
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/ToolOutputFile.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllPasses.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/FileUtilities.h"
#include "mlir/Tools/mlir-opt/MlirOptMain.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/bridge.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
using ::testing::IsNull;
TEST(DumpMlirModuleTest, NoEnvPrefix) {
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
unsetenv("TF_DUMP_GRAPH_PREFIX");
std::string filepath = DumpMlirOpToFile("module", module_ref.get());
EXPECT_EQ(filepath, "(TF_DUMP_GRAPH_PREFIX not specified)");
}
TEST(DumpMlirModuleTest, LogInfo) {
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
setenv("TF_DUMP_GRAPH_PREFIX", "-", 1);
std::string filepath = DumpMlirOpToFile("module", module_ref.get());
EXPECT_EQ(filepath, "(stderr; requested filename: 'module')");
}
TEST(DumpMlirModuleTest, Valid) {
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
std::string expected_txt_module;
{
llvm::raw_string_ostream os(expected_txt_module);
module_ref->getOperation()->print(os,
mlir::OpPrintingFlags().useLocalScope());
os.flush();
}
std::string filepath = DumpMlirOpToFile("module", module_ref.get());
ASSERT_NE(filepath, "(TF_DUMP_GRAPH_PREFIX not specified)");
ASSERT_NE(filepath, "LOG(INFO)");
ASSERT_NE(filepath, "(unavailable)");
Env* env = Env::Default();
std::string file_txt_module;
TF_ASSERT_OK(ReadFileToString(env, filepath, &file_txt_module));
EXPECT_EQ(file_txt_module, expected_txt_module);
}
TEST(DumpCrashReproducerTest, RoundtripDumpAndReadValid) {
mlir::registerPassManagerCLOptions();
mlir::MLIRContext context;
mlir::OwningOpRef<mlir::ModuleOp> module_ref =
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context));
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
std::string filepath =
testing::TmpDir() + "/" + mlir::TF::kStandardPipelineBefore + ".mlir";
std::string output_dump = testing::TmpDir() + "/" + "output_dump.txt";
TF_ASSERT_OK(mlir::TF::RunBridgeWithStandardPipeline(
module_ref.get(),
true, false));
std::string errorMessage;
auto input_file = mlir::openInputFile(filepath, &errorMessage);
EXPECT_THAT(input_file, Not(IsNull()));
auto output_stream = mlir::openOutputFile(output_dump, &errorMessage);
EXPECT_THAT(output_stream, Not(IsNull()));
mlir::PassPipelineCLParser passPipeline(
"", "Compiler passes to run", "p");
mlir::DialectRegistry registry;
mlir::registerAllDialects(registry);
mlir::RegisterAllTensorFlowDialects(registry);
mlir::registerAllPasses();
mlir::registerTensorFlowPasses();
EXPECT_TRUE(mlir::MlirOptMain(output_stream->os(), std::move(input_file),
registry,
mlir::MlirOptMainConfig{}
.splitInputFile("")
.verifyDiagnostics(false)
.verifyPasses(false)
.allowUnregisteredDialects(false)
.setPassPipelineParser(passPipeline))
.succeeded());
}
TEST(DumpRawStringToFileTest, Valid) {
llvm::StringRef example = "module {\n}";
setenv("TF_DUMP_GRAPH_PREFIX", testing::TmpDir().c_str(), 1);
std::string filepath = DumpRawStringToFile("example", example);
ASSERT_NE(filepath, "(TF_DUMP_GRAPH_PREFIX not specified)");
ASSERT_NE(filepath, "LOG(INFO)");
ASSERT_NE(filepath, "(unavailable)");
Env* env = Env::Default();
std::string file_txt_module;
TF_ASSERT_OK(ReadFileToString(env, filepath, &file_txt_module));
EXPECT_EQ(file_txt_module, example);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa5f9250-0c55-434f-b6b0-db857da5921c | cpp | tensorflow/tensorflow | data_dumper_logger_config | tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.cc | tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include <functional>
#include <memory>
#include <string>
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
namespace tensorflow {
DataDumperLoggerConfig::DataDumperLoggerConfig(
std::function<std::string(const std::string &, mlir::Operation *op)>
get_filename,
const std::string &pass_prefix, bool print_module_scope,
bool print_after_only_on_change, mlir::OpPrintingFlags op_printing_flags)
: ::tensorflow::BridgeLoggerConfig(
print_module_scope, print_after_only_on_change, op_printing_flags),
get_filename_(get_filename),
pass_prefix_(pass_prefix) {}
void DataDumperLoggerConfig::printBeforeIfEnabled(
mlir::Pass *pass, mlir::Operation *op, PrintCallbackFn print_callback) {
std::string pass_name = pass->getName().str();
std::string filename =
get_filename_(pass_prefix_ + "before_" + pass_name, op);
if (ShouldPrint(pass, op)) DumpMlir(filename, print_callback);
}
void DataDumperLoggerConfig::printAfterIfEnabled(
mlir::Pass *pass, mlir::Operation *op, PrintCallbackFn print_callback) {
std::string pass_name = pass->getName().str();
std::string filename = get_filename_(pass_prefix_ + "after_" + pass_name, op);
if (ShouldPrint(pass, op)) DumpMlir(filename, print_callback);
}
void DataDumperLoggerConfig::DumpMlir(
const std::string &filename,
BridgeLoggerConfig::PrintCallbackFn print_callback) {
std::unique_ptr<llvm::raw_ostream> os;
std::string filepath;
if (tensorflow::CreateFileForDumping(filename, &os, &filepath).ok()) {
print_callback(*os);
LOG(INFO) << "Dumped MLIR module to " << filepath;
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static const char *const module_with_add =
R"(module {
func.func @main(%arg0: tensor<3x4x5xf32>, %arg1: tensor<3x4x5xf32>) -> tensor<3x4x5xf32> {
%0 = "tf.AddV2"(%arg0, %arg1) : (tensor<3x4x5xf32>, tensor<3x4x5xf32>) -> tensor<3x4x5xf32>
func.return %0 : tensor<3x4x5xf32>
}
}
)";
TEST(DataDumperLoggerConfig, TestPassFilter) {
mlir::DialectRegistry mlir_registry;
mlir::RegisterAllTensorFlowDialects(mlir_registry);
mlir::MLIRContext mlir_context(mlir_registry);
mlir::OwningOpRef<mlir::ModuleOp> mlir_module_with_add;
TF_ASSERT_OK(DeserializeMlirModule(module_with_add, &mlir_context,
&mlir_module_with_add));
std::unique_ptr<mlir::Pass> partitioning_pass =
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass();
std::unique_ptr<mlir::Pass> shape_inference_pass =
mlir::TF::CreateTFShapeInferencePass();
std::unique_ptr<mlir::Pass> inliner_pass = mlir::createInlinerPass();
setenv("MLIR_BRIDGE_LOG_PASS_FILTER",
"TPUResourceReadsWritesPartitioningPass;TensorFlowShapeInferencePass",
1);
setenv("TF_DUMP_GRAPH_PREFIX", "sponge", 1);
const string kTestFilename = "test.txt";
int print_callback_count = 0;
auto get_filename_fn = [](const string &filename, mlir::Operation *op) {
return filename;
};
auto print_callback = [&](llvm::raw_ostream &out) {
print_callback_count++;
return;
};
DataDumperLoggerConfig data_dumper_logger_config(get_filename_fn);
data_dumper_logger_config.printBeforeIfEnabled(
partitioning_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 1);
data_dumper_logger_config.printBeforeIfEnabled(
shape_inference_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 2);
data_dumper_logger_config.printBeforeIfEnabled(
inliner_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 2);
data_dumper_logger_config.printAfterIfEnabled(
partitioning_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 3);
data_dumper_logger_config.printAfterIfEnabled(
shape_inference_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 4);
data_dumper_logger_config.printAfterIfEnabled(
inliner_pass.get(), mlir_module_with_add.get(), print_callback);
EXPECT_EQ(print_callback_count, 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c8ba4521-9d25-4c76-a227-837e51e9995a | cpp | tensorflow/tensorflow | cluster_util | tensorflow/compiler/mlir/tensorflow/utils/cluster_util.cc | tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc | #include "tensorflow/compiler/mlir/tensorflow/utils/cluster_util.h"
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/IR/Block.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/Matchers.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Transforms/RegionUtils.h"
#include "tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h"
namespace mlir::TF {
namespace {
llvm::SetVector<Operation*> GetAllOpsDependOnCluster(
const Cluster& c,
const llvm::DenseMap<Operation*, Cluster*>& op_to_cluster_map) {
llvm::SetVector<Operation*> ops_depend_on_cluster;
for (Operation& op : *c.ops.front()->getBlock()) {
if (op.isBeforeInBlock(c.ops.front()) || c.ops.contains(&op)) {
continue;
}
llvm::SetVector<Value> live_ins(op.operand_begin(), op.operand_end());
getUsedValuesDefinedAbove(op.getRegions(), live_ins);
if (llvm::any_of(live_ins, [&](Value value) {
Operation* defining_op = value.getDefiningOp();
if (!defining_op) {
return false;
}
return c.ops.contains(defining_op) ||
ops_depend_on_cluster.contains(defining_op);
})) {
ops_depend_on_cluster.insert(&op);
}
}
llvm::SetVector<Operation*> same_cluster_ops_with_dependency(
ops_depend_on_cluster.begin(), ops_depend_on_cluster.end());
for (Operation* op : ops_depend_on_cluster) {
Cluster* cluster = op_to_cluster_map.lookup(op);
if (cluster == nullptr) {
continue;
}
for (Operation* ops_in_same_cluster : cluster->ops) {
same_cluster_ops_with_dependency.insert(ops_in_same_cluster);
}
}
return same_cluster_ops_with_dependency;
}
bool CanMergeIntoCluster(
const Cluster& c, Operation* to_merge,
const TF::SideEffectAnalysis::Info& side_effect_analysis,
std::function<std::string(Operation*)> get_target,
const llvm::DenseMap<Operation*, Cluster*>& op_to_cluster_map) {
const bool has_control_predecessors_after_cluster =
!side_effect_analysis
.DirectControlPredecessors(
to_merge,
[&c](Operation* pred) {
Operation* const last_c_op = c.ops.back();
return last_c_op->getBlock() == pred->getBlock() &&
last_c_op->isBeforeInBlock(pred);
})
.empty();
if (has_control_predecessors_after_cluster) {
return false;
}
llvm::SetVector<Operation*> ops_depend_on_cluster =
GetAllOpsDependOnCluster(c, op_to_cluster_map);
return llvm::none_of(to_merge->getOperands(), [&](Value value) {
Operation* defining_op = value.getDefiningOp();
return defining_op && ops_depend_on_cluster.contains(defining_op);
});
}
}
llvm::StringMap<SmallVector<Cluster>> BuildAllClusters(
Block& block, const TF::SideEffectAnalysis::Info& side_effect_analysis,
std::function<std::string(Operation*)> get_target,
std::function<bool(Operation*)> is_ignored_op) {
llvm::StringMap<SmallVector<Cluster>> all_clusters;
llvm::DenseMap<Operation*, Cluster*> op_to_cluster_map;
llvm::StringMap<Cluster> nearest_clusters;
for (Operation& op : llvm::make_early_inc_range(block)) {
if (is_ignored_op(&op)) {
continue;
}
std::string target_name = get_target(&op);
auto it = nearest_clusters.find(target_name);
if (it == nearest_clusters.end()) {
SetVector<Operation*> new_cluster_op_set;
new_cluster_op_set.insert(&op);
nearest_clusters[target_name] = Cluster{new_cluster_op_set, target_name};
op_to_cluster_map[&op] = &nearest_clusters[target_name];
continue;
}
Cluster& nearest_cluster = it->second;
if (CanMergeIntoCluster(nearest_cluster, &op, side_effect_analysis,
get_target, op_to_cluster_map)) {
nearest_cluster.ops.insert(&op);
op_to_cluster_map[&op] = &nearest_cluster;
continue;
}
all_clusters[target_name].push_back(nearest_cluster);
SetVector<Operation*> new_cluster_op_set;
new_cluster_op_set.insert(&op);
nearest_clusters[target_name] = Cluster{new_cluster_op_set, target_name};
op_to_cluster_map[&op] = &nearest_clusters[target_name];
}
for (auto& target_cluster : nearest_clusters) {
all_clusters[target_cluster.first()].push_back(target_cluster.second);
}
return all_clusters;
}
void ReorderOpResultUses(mlir::Operation* cluster) {
mlir::Block* const cluster_block = cluster->getBlock();
llvm::SetVector<mlir::Operation*> ops_to_reorder;
llvm::SmallVector<mlir::Value> worklist;
llvm::append_range(worklist, cluster->getResults());
while (!worklist.empty()) {
mlir::Value value = worklist.back();
worklist.pop_back();
for (mlir::Operation* const user : value.getUsers()) {
mlir::Operation* const op = cluster_block->findAncestorOpInBlock(*user);
if (op == nullptr || !op->isBeforeInBlock(cluster)) {
continue;
}
if (ops_to_reorder.insert(op)) {
llvm::append_range(worklist, op->getResults());
}
}
}
llvm::SmallVector<mlir::Operation*, 0> sorted = ops_to_reorder.takeVector();
llvm::sort(sorted, [](mlir::Operation* lhs, mlir::Operation* rhs) {
return lhs->isBeforeInBlock(rhs);
});
for (mlir::Operation* const op : llvm::reverse(sorted)) {
op->moveAfter(cluster);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/utils/cluster_util.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/serialize_mlir_module_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace mlir::TF {
namespace {
constexpr StringRef kTestClusterName = "tpu0";
absl::StatusOr<OwningOpRef<ModuleOp>> GetMlirModuleFromString(
StringRef string, MLIRContext* context) {
DialectRegistry mlir_registry;
RegisterAllTensorFlowDialects(mlir_registry);
context->appendDialectRegistry(mlir_registry);
OwningOpRef<ModuleOp> mlir_module;
auto status =
tensorflow::DeserializeMlirModule(string, context, &mlir_module);
if (!status.ok()) {
return status;
}
return mlir_module;
}
std::string GetDevice(Operation* op) {
auto device_attr = op->getAttrOfType<StringAttr>("device");
return device_attr ? device_attr.getValue().str() : "";
}
bool CanBeIgnoredInCluster(Operation* op) {
auto device_attr = op->getAttrOfType<StringAttr>("device");
return !device_attr || device_attr.getValue().empty();
}
llvm::StringMap<SmallVector<Cluster>> GetClusters(ModuleOp module) {
TF::SideEffectAnalysis side_effect_analysis(module);
auto main_func = module.lookupSymbol<func::FuncOp>("main");
const TF::SideEffectAnalysis::Info& info =
side_effect_analysis.GetAnalysisForFunc(main_func);
llvm::StringMap<SmallVector<Cluster>> clusters = BuildAllClusters(
main_func.front(), info, GetDevice, CanBeIgnoredInCluster);
return clusters;
}
TEST(BuildClusters, TestSingleCluster) {
static const char* const module_with_single_cluster =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.B"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.C"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.D"(%2) : (tensor<?xi32>) -> tensor<?xi32>
func.return %3 : tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_single_cluster, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters.lookup(kTestClusterName).size(), 1);
EXPECT_EQ(clusters.lookup(kTestClusterName)[0].ops.size(), 2);
}
TEST(BuildClusters, TestMultipleClusters) {
static const char* const module_with_two_clusters =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.B"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.C"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.D"(%2) : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.E"(%3) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.F"(%3, %4) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %5 : tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_two_clusters, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters[kTestClusterName].size(), 2);
EXPECT_EQ(clusters[kTestClusterName][0].ops.size(), 2);
EXPECT_EQ(clusters[kTestClusterName][1].ops.size(), 2);
}
TEST(BuildClusters, TestMultipleTargets) {
static const char* const module_with_two_clusters =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> tensor<?xi32> {
%0 = "tf.A"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.B"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.C"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.D"(%2) : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.E"(%3) {device = "tpu1"} : (tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.F"(%3, %4) {device = "tpu1"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %5 : tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_two_clusters, &context));
auto clusters = GetClusters(module.get());
constexpr StringRef kTarget0 = "tpu0";
EXPECT_EQ(clusters.count(kTarget0), 1);
EXPECT_EQ(clusters[kTarget0].size(), 1);
EXPECT_EQ(clusters[kTarget0][0].ops.size(), 2);
constexpr StringRef kTarget1 = "tpu1";
EXPECT_EQ(clusters.count(kTarget1), 1);
EXPECT_EQ(clusters[kTarget1].size(), 1);
EXPECT_EQ(clusters[kTarget1][0].ops.size(), 2);
}
TEST(BuildClusters, TestMergedClusters) {
static const char* const module_with_single_cluster =
R"(module {
func.func @main(%arg0: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) {
%0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.Relu"(%2) : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.Relu"(%1) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.Add"(%1, %2) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %3, %5 : tensor<?xi32>, tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_single_cluster, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters[kTestClusterName].size(), 1);
EXPECT_EQ(clusters[kTestClusterName][0].ops.size(), 4);
}
TEST(BuildClusters, TestMergedClustersWithDataDependen) {
static const char* const module_with_single_cluster =
R"(module {
func.func @main(%arg0: tensor<?xi32>, %arg1: tensor<?xi32>) -> (tensor<?xi32>, tensor<?xi32>) {
%0 = "tf.Relu"(%arg0) : (tensor<?xi32>) -> tensor<?xi32>
%1 = "tf.Relu"(%0) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%2 = "tf.Add"(%0, %1) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%3 = "tf.Relu"(%arg1) {device = "tpu1"} : (tensor<?xi32>) -> tensor<?xi32>
%4 = "tf.Add"(%3, %arg1) {device = "tpu1"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
%5 = "tf.Relu"(%4) {device = "tpu0"} : (tensor<?xi32>) -> tensor<?xi32>
%6 = "tf.Add"(%4, %5) {device = "tpu0"} : (tensor<?xi32>, tensor<?xi32>) -> tensor<?xi32>
func.return %3, %5 : tensor<?xi32>, tensor<?xi32>
}
}
)";
MLIRContext context;
TF_ASSERT_OK_AND_ASSIGN(
OwningOpRef<ModuleOp> module,
GetMlirModuleFromString(module_with_single_cluster, &context));
auto clusters = GetClusters(module.get());
EXPECT_EQ(clusters.count(kTestClusterName), 1);
EXPECT_EQ(clusters[kTestClusterName].size(), 1);
EXPECT_EQ(clusters[kTestClusterName][0].ops.size(), 4);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/cluster_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/utils/cluster_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f8652a50-48a4-49b0-8ae4-073028700611 | cpp | tensorflow/tensorflow | gpu_fusion | tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc | third_party/xla/xla/service/gpu/tests/gpu_fusion_test.cc | #include "llvm/ADT/STLExtras.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#define DEBUG_TYPE "tf-gpu-op-fusion"
namespace mlir {
namespace TF {
namespace {
#define GEN_PASS_DEF_TENSORFLOWGPUFUSION
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc"
class GpuOpFusionPass : public impl::TensorflowGPUFusionBase<GpuOpFusionPass> {
public:
void runOnOperation() final;
};
struct ReluToFusedBatchNorm : public OpRewritePattern<ReluOp> {
using OpRewritePattern<ReluOp>::OpRewritePattern;
LogicalResult matchAndRewrite(ReluOp relu_op,
PatternRewriter &rewriter) const override {
Operation *relu_input = relu_op.getFeatures().getDefiningOp();
if (!relu_input) return failure();
auto batch_norm = dyn_cast_or_null<FusedBatchNormV3Op>(relu_input);
AddV2Op add_op;
Value side_input;
if (!batch_norm) {
add_op = dyn_cast_or_null<AddV2Op>(relu_input);
if (!add_op) return failure();
batch_norm =
dyn_cast_or_null<FusedBatchNormV3Op>(add_op.getX().getDefiningOp());
if (batch_norm) {
side_input = add_op.getY();
} else {
batch_norm =
dyn_cast_or_null<FusedBatchNormV3Op>(add_op.getY().getDefiningOp());
if (!batch_norm) return failure();
side_input = add_op.getX();
}
}
assert(batch_norm);
if (batch_norm.getIsTraining()) return failure();
if (!batch_norm.getY().hasOneUse()) return failure();
OperationState state(batch_norm.getLoc(),
_FusedBatchNormExOp::getOperationName());
state.addOperands(batch_norm.getOperands());
if (side_input) state.operands.push_back(side_input);
state.addTypes(batch_norm.getResultTypes());
state.addAttributes(batch_norm->getAttrs());
Operation *op = rewriter.create(state);
rewriter.replaceOp(batch_norm, op->getResults());
if (!add_op || add_op.getZ().hasOneUse()) {
op->setAttr("activation_mode", rewriter.getStringAttr("Relu"));
rewriter.replaceOp(relu_op, op->getResult(0));
}
if (add_op) {
rewriter.replaceOp(add_op, op->getResult(0));
}
return success();
}
};
void GpuOpFusionPass::runOnOperation() {
func::FuncOp func = getOperation();
RewritePatternSet patterns(&getContext());
patterns.add<ReluToFusedBatchNorm>(&getContext());
(void)applyPatternsAndFoldGreedily(func, std::move(patterns));
}
}
std::unique_ptr<OperationPass<func::FuncOp>> CreateGpuOpFusionPass() {
return std::make_unique<GpuOpFusionPass>();
}
}
} | #include <cstdint>
#include <optional>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/gpu/transforms/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace {
class GpuFusionTest : public GpuCodegenTest {};
TEST_F(GpuFusionTest, FusedReshape) {
const char* hlo_text = R"(
HloModule test_module
fused_computation {
p0.param_0 = f32[4,1,1]{2,1,0} parameter(0)
p1.param_1 = f32[4,1]{1,0} parameter(1)
reshape = f32[4,1]{1,0} reshape(p0.param_0)
ROOT add = f32[4,1] add(reshape, p1.param_1)
}
ENTRY BroadcastIntoAdd {
p0 = f32[4,1,1]{2,1,0} parameter(0)
p1 = f32[4,1]{1,0} parameter(1)
ROOT fusion = f32[4,1]{1,0} fusion(p0, p1), kind=kLoop,
calls=fused_computation
}
)";
CompileAndVerifyIr(hlo_text,
R"(
; CHECK-LABEL: @fusion
; CHECK: fadd
; CHECK: }
)",
false,
false);
}
TEST_F(GpuFusionTest, FusedBiggerThenThresholdButDoNotChangeTheFusionl) {
constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1;
auto module = CreateNewVerifiedModule();
HloComputation::Builder b(TestName());
Shape input_shape = ShapeUtil::MakeShape(F32, {10, 100});
Shape slice_shape = ShapeUtil::MakeShape(F32, {10, 2});
Shape concat_shape = ShapeUtil::MakeShape(F32, {10, 2 * kNumParams});
HloInstruction* input =
b.AddInstruction(HloInstruction::CreateParameter(0, input_shape, "p"));
std::vector<HloInstruction*> slice_params;
for (int64_t i = 0; i < kNumParams; ++i) {
slice_params.push_back(b.AddInstruction(HloInstruction::CreateSlice(
slice_shape, input, {0, 0}, {10, 2}, {1, 1})));
}
b.AddInstruction(
HloInstruction::CreateConcatenate(concat_shape, slice_params, 1));
module->AddEntryComputation(b.Build());
EXPECT_TRUE(GpuInstructionFusion(false,
TestGpuDeviceInfo::RTXA6000DeviceInfo())
.Run(module.get())
.value());
EXPECT_TRUE(module->entry_computation()->root_instruction()->opcode() ==
HloOpcode::kFusion);
for (HloInstruction* instr : module->entry_computation()->instructions()) {
EXPECT_TRUE(instr->opcode() != HloOpcode::kSlice);
}
}
class TransposeFusionTest : public GpuFusionTest {
public:
void CheckGpuFusion(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(
hlo,
GpuInstructionFusion{true,
TestGpuDeviceInfo::RTXA6000DeviceInfo()},
expected);
}
};
TEST_F(TransposeFusionTest, ElementaryWithTranspose) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
s = sqrt(p)
ROOT t = f32[32,16]{1,0} transpose(s), dimensions={1,0}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ReshapeAfterTransposeFused) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
s = sqrt(p)
t = f32[32,16]{1,0} transpose(s), dimensions={1,0}
ROOT r = f32[32,16,1]{2,1,0} reshape(t)
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ReshapeSimpleFusion) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[256,16]{1,0} parameter(0)
r = f32[16,16,16]{2,1,0} reshape(p)
s = sqrt(r)
ROOT t = f32[16,16,16]{2,1,0} transpose(s), dimensions={0,2,1}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ElementaryLogical) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[1,16,32]{2,1,0} parameter(0)
s = f32[1,16,32]{2,1,0} sqrt(p)
ROOT c = f32[1,32,16]{2,1,0} transpose(s), dimensions={0,2,1}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
TEST_F(TransposeFusionTest, ReshapeSimpleFusionLogical) {
const char* hlo = R"(
HloModule module
ENTRY main {
p = f32[256,16]{1,0} parameter(0)
r = f32[16,16,16]{2,1,0} reshape(p)
s = sqrt(r)
ROOT c = f32[16,16,16]{2,1,0} transpose(s), dimensions={1,0,2}
}
)";
CheckGpuFusion(hlo, R"(
)");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/tests/gpu_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
885df65a-587e-4769-8329-7922bd1595c9 | cpp | tensorflow/tensorflow | verify_no_outside_compilation_markers_pass | tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass.cc | tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass_test.cc | #include <memory>
#include <string>
#include "mlir/Pass/Pass.h"
#include "absl/log/log.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
namespace mlir {
namespace TFDevice {
namespace {
constexpr char kXlaOutsideCompilationAttr[] = "_xla_outside_compilation";
#define GEN_PASS_DEF_VERIFYNOOUTSIDECOMPILATIONMARKERSPASS
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.h.inc"
class VerifyNoOutsideCompilationMarkersPass
: public impl::VerifyNoOutsideCompilationMarkersPassBase<
VerifyNoOutsideCompilationMarkersPass> {
public:
void runOnOperation() override;
};
bool IsLaunchOp(Operation& op) {
return dyn_cast<tf_device::LaunchOp>(op) != nullptr;
}
bool IsDeviceClusterOp(Operation& op) {
return dyn_cast<tf_device::ClusterOp>(op) != nullptr;
}
bool HasChildLaunchDeviceOp(Operation& op) {
auto cluster_op = dyn_cast<tf_device::ClusterOp>(op);
if (cluster_op == nullptr) return false;
auto walk_result = cluster_op->walk([&](Operation* op) {
if (IsLaunchOp(*op)) return WalkResult::interrupt();
return WalkResult::advance();
});
return walk_result.wasInterrupted();
}
bool HasXlaOutsideCompilationMarker(Operation& op) {
return op.getAttrOfType<StringAttr>(kXlaOutsideCompilationAttr) != nullptr;
}
void VerifyNoOutsideCompilationMarkersPass::runOnOperation() {
Operation* func_op = getOperation();
auto walk_result = func_op->walk([&](Operation* op) {
if (IsDeviceClusterOp(*op) && HasChildLaunchDeviceOp(*op)) {
std::string launch_error =
absl::StrCat("Node `", op->getName().getStringRef().str(), "` ",
"is a launch op which should have been removed by "
"outside compilation");
op->emitError() << launch_error;
LOG(ERROR) << launch_error;
return WalkResult::interrupt();
}
if (HasXlaOutsideCompilationMarker(*op)) {
std::string outside_compilation_error = absl::StrCat(
"Node `", op->getName().getStringRef().str(), "` ",
"has _xla_outside_compilation set which should have been removed by "
"outside compilation");
op->emitError() << outside_compilation_error;
LOG(ERROR) << outside_compilation_error;
return WalkResult::interrupt();
}
return WalkResult::advance();
});
if (walk_result.wasInterrupted()) {
signalPassFailure();
}
}
}
std::unique_ptr<mlir::OperationPass<func::FuncOp>>
CreateVerifyNoOutsideCompilationMarkersPass() {
return std::make_unique<VerifyNoOutsideCompilationMarkersPass>();
}
}
} | #include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tf2xla/transforms/test_utils.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFDevice {
using ::mlir::MLIRContext;
using ::mlir::ModuleOp;
using ::mlir::OwningOpRef;
using ::mlir::mhlo::test::GetMlirModuleFromString;
class VerifyNoOutsideCompilationMarkersPassTest : public ::testing::Test {
protected:
void CreateModule(const char* module_string) {
TF_ASSERT_OK_AND_ASSIGN(module_,
GetMlirModuleFromString(module_string, &context_));
pm_ = std::make_unique<mlir::PassManager>(&context_);
pm_->addNestedPass<func::FuncOp>(
CreateVerifyNoOutsideCompilationMarkersPass());
}
mlir::LogicalResult Run() { return pm_->run(module_.get()); }
private:
MLIRContext context_;
OwningOpRef<ModuleOp> module_;
std::unique_ptr<mlir::PassManager> pm_;
};
TEST_F(VerifyNoOutsideCompilationMarkersPassTest, PassesValidOps) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> tensor<1xi32> {
%0 = "tf.Const"() {value = dense<1000> : tensor<1xi32>} : () -> tensor<1xi32>
func.return %0 : tensor<1xi32>
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
}
TEST_F(VerifyNoOutsideCompilationMarkersPassTest,
FailsXlaOutsideCompilationMarkers) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
"tf.B"() {_xla_outside_compilation = "cluster1"} : () -> ()
func.return
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
}
TEST_F(VerifyNoOutsideCompilationMarkersPassTest,
FailsWithLaunchOpsInsideCluster) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
%0 = "tf_device.cluster"() ({
"tf_device.launch"() ({
"tf.B"() : () -> ()
tf_device.return
}) {device = "/job:worker/replica:0/task:0/device:CPU:0"} : () -> ()
tf_device.return
}) {cluster_attr = "cluster_attr"} : () -> tensor<*xi32>
func.return
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.failed());
}
TEST_F(VerifyNoOutsideCompilationMarkersPassTest,
PassesWithLaunchOpsOutsideCluster) {
static constexpr char kMlirModuleStr[] = R"(
module attributes {tf.versions = {bad_consumers = [], min_consumer = 0 : i32, producer = 268 : i32}} {
func.func @main() -> () {
"tf_device.launch"() ({
"tf.B"() : () -> ()
tf_device.return
}) {device = "/job:worker/replica:0/task:0/device:CPU:0"} : () -> ()
func.return
}
})";
CreateModule(kMlirModuleStr);
auto result = Run();
EXPECT_TRUE(result.succeeded());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/verify_no_outside_compilation_markers_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9e6a59cb-d456-47b0-8f26-afe5f12ed80f | cpp | tensorflow/tensorflow | lower_cluster_to_runtime_ops | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.cc | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops_test.cc | #include <memory>
#include <string>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/data_dumper_logger_config.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/error_logging.h"
#include "tsl/platform/errors.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
using mlir::LogicalResult;
using mlir::OpPassManager;
using mlir::PassManager;
using mlir::func::FuncOp;
using mlir::TF::StandardPipelineOptions;
void EnablePassIRPrinting(PassManager& pm, const std::string& dump_group_name,
llvm::StringRef module_name) {
pm.getContext()->disableMultithreading();
pm.enableIRPrinting(std::make_unique<::tensorflow::DataDumperLoggerConfig>(
[module_name, dump_group_name](const std::string& pass_tag_name,
mlir::Operation* op) {
return DEBUG_DATA_DUMPER()->GetDumpFilename(
module_name.str(), dump_group_name, pass_tag_name);
},
"",
true));
pm.enableTiming();
}
}
void AddTPULowerClusterToRuntimeOpsPassPipeline(OpPassManager& pm,
llvm::StringRef module_name) {
pm.addPass(mlir::TFTPU::CreateTPURewritePass(module_name));
pm.addPass(mlir::createSymbolDCEPass());
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateReplicateInvariantOpHoistingPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateEmbeddingProgramKeyPass());
pm.addPass(mlir::TFTPU::CreateTPUMergeVariablesWithExecutePass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateExtractTPUCopyWithDynamicShapeOpPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUColocateCompositeResourceOps());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_tpu_variable_runtime_reformatting_pass) {
pm.addPass(mlir::TFTPU::CreateTPUVariableRuntimeReformattingPass());
}
}
void AddNonTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, llvm::StringRef module_name) {
pm.addPass(mlir::TFDevice::CreateXlaRewritePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<FuncOp>(mlir::createCSEPass());
pm.addPass(mlir::createSymbolDCEPass());
}
void CreateTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, const StandardPipelineOptions& options) {
AddTPULowerClusterToRuntimeOpsPassPipeline(pm, "");
}
void CreateNonTPULowerClusterToRuntimeOpsPassPipeline(
OpPassManager& pm, const StandardPipelineOptions& options) {
AddNonTPULowerClusterToRuntimeOpsPassPipeline(pm, "");
}
tensorflow::Status RecordIfErrorStatus(const std::string error_prefix,
std::string bridge_type,
tsl::DeviceType device_type,
absl::Status status) {
if (status.ok()) {
return status;
}
VLOG(2) << error_prefix << " " << status;
tensorflow::metrics::UpdateTfMlirBridgeFirstPhaseCounter(
bridge_type,
mlir::TF::kMlirPh1BridgeCounterV2,
device_type.type_string(),
false,
"failure");
std::string bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_TPU_BRIDGE";
tsl::OkOrSetErrorCounterPayload(
tensorflow::core::platform::ErrorSourceProto::MLIR_BRIDGE_PHASE_1,
status);
if (device_type != DeviceType(DEVICE_TPU_XLA_JIT)) {
bridge_subcomponent = "TFXLA_PHASE_ONE_MLIR_CPU/GPU_BRIDGE";
}
tsl::error_logging::Log(mlir::TF::kBridgeComponent, bridge_subcomponent,
status.ToString())
.IgnoreError();
return status;
}
absl::Status RunLowerClusterToRuntimeOpsPassPipeline(
mlir::ModuleOp module, tsl::DeviceType xla_device_type,
llvm::StringRef module_name) {
PassManager runtime_lowering(module.getContext());
::tensorflow::applyTensorflowAndCLOptions(runtime_lowering);
if (xla_device_type == DeviceType(DEVICE_TPU_XLA_JIT)) {
AddTPULowerClusterToRuntimeOpsPassPipeline(runtime_lowering, module_name);
} else {
AddNonTPULowerClusterToRuntimeOpsPassPipeline(runtime_lowering,
module_name);
}
mlir::StatusScopedDiagnosticHandler diag_handler(
module.getContext(), false,
!VLOG_IS_ON(1));
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
"runtime_lowering_before"),
module, llvm::StringRef(), &runtime_lowering);
}
if (VLOG_IS_ON(2) || DEBUG_DATA_DUMPER()->ShouldDump(
module_name.str(), kDebugGroupRuntimeLowering)) {
EnablePassIRPrinting(runtime_lowering, kDebugGroupRuntimeLowering,
module_name);
}
LogicalResult result = runtime_lowering.run(module);
(void)result;
if (VLOG_IS_ON(1) ||
DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain)) {
::tensorflow::DumpMlirOpToFile(
DEBUG_DATA_DUMPER()->GetDumpFilename(module_name.str(), kDebugGroupMain,
"runtime_lowering_after"),
module, llvm::StringRef(), &runtime_lowering);
}
std::string bridge_type = xla_device_type == DeviceType(DEVICE_TPU_XLA_JIT)
? mlir::TF::kMlirPh1BridgeCounterReplicated
: mlir::TF::kMlirPh1BridgeCounterNonReplicated;
auto result_status = diag_handler.ConsumeStatus();
TF_RETURN_IF_ERROR(
RecordIfErrorStatus("lower_cluster_to_runtime",
bridge_type, xla_device_type, result_status));
return absl::OkStatus();
}
void RegisterTPULowerClusterToRuntimeOpsPassPipeline() {
static mlir::PassPipelineRegistration<StandardPipelineOptions> pipeline(
"tfrt-lower-cluster-to-runtime-ops-tpu",
"Run all the passes involved after the clustering transformations from "
"the TF2XLA Bridge. Takes as input a Module with tf_device.cluster ops "
"and outputs TFRT runtime ops such as TPUCompile. This pipeline is for "
"TPU.",
CreateTPULowerClusterToRuntimeOpsPassPipeline);
}
void RegisterNonTPULowerClusterToRuntimeOpsPassPipeline() {
static mlir::PassPipelineRegistration<StandardPipelineOptions> pipeline(
"tfrt-lower-cluster-to-runtime-ops-non-tpu",
"Run all the passes involved after the clustering transformations from "
"the TF2XLA Bridge. Takes as input a Module with tf_device.cluster ops "
"and outputs TFRT runtime ops such as XlaLaunch. This is for CPU/GPU",
CreateNonTPULowerClusterToRuntimeOpsPassPipeline);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Visitors.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/tsl/framework/device_type.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/tpu/tpu_defs.h"
#include "tensorflow/core/util/debug_data_dumper.h"
namespace tensorflow {
namespace tfrt_compiler {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
using mlir::func::FuncOp;
using ::tensorflow::monitoring::testing::CellReader;
using tsl::DeviceType;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
static constexpr char kCompilationStreamz[] =
"/tensorflow/core/tf_mlir_bridge_first_phase_v2_count";
class LowerClusterToRuntimeOpsTest : public ::testing::Test {
public:
LowerClusterToRuntimeOpsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
env_ = Env::Default();
test_group_name_ = "TestGroup";
test_dir_ = testing::TmpDir();
setenv("TF_DUMP_GRAPH_PREFIX", test_dir_.c_str(), 1);
}
absl::Status CreateMlirModule(std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
Env* env_;
std::string test_dir_;
std::string test_group_name_;
};
TEST_F(LowerClusterToRuntimeOpsTest, SanityCheck) {
TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsTPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsCPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_CPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, LowersClusterOpsGPU) {
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_GPU_XLA_JIT)));
FuncOp main = mlir_module_->lookupSymbol<FuncOp>("main");
ASSERT_TRUE(main);
bool has_cluster_op = false;
main.walk([&](mlir::tf_device::ClusterOp) {
has_cluster_op = true;
return mlir::WalkResult::interrupt();
});
EXPECT_FALSE(has_cluster_op);
}
TEST_F(LowerClusterToRuntimeOpsTest, ErrorsWithBadCluster) {
CellReader<int64_t> compilation_status(kCompilationStreamz);
TF_ASSERT_OK(CreateMlirModule("malformed_cluster.mlir"));
EXPECT_FALSE(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT))
.ok());
EXPECT_EQ(
compilation_status.Delta(mlir::TF::kMlirPh1BridgeCounterReplicated,
mlir::TF::kMlirPh1BridgeCounterV2, "XLA_TPU_JIT",
"fallback_disabled", "failure"),
1);
}
TEST_F(LowerClusterToRuntimeOpsTest, DumpsPipelinePasses) {
std::vector<std::string> files;
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::IsEmpty());
setenv("TF_DUMP_GRAPH_NAME_FILTER", "*", 1);
setenv("TF_DUMP_GRAPH_GROUPS", "main,runtime_lowering", 1);
DEBUG_DATA_DUMPER()->LoadEnvvars();
TF_ASSERT_OK(CreateMlirModule("basic_cluster.mlir"));
TF_EXPECT_OK(RunLowerClusterToRuntimeOpsPassPipeline(
*mlir_module_, DeviceType(DEVICE_TPU_XLA_JIT)));
TF_ASSERT_OK(env_->GetChildren(test_dir_, &files));
EXPECT_THAT(files, ::testing::SizeIs(15));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/lower_cluster_to_runtime_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
a5f1c7f7-4c8d-4a2d-b296-8e3453f03101 | cpp | tensorflow/tensorflow | tpu_metadata_utils | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc | tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils_test.cc | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <optional>
#include <string>
#include <utility>
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/FormatVariadic.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/Types.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/attribute_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
namespace mlir {
namespace TFTPU {
namespace {
constexpr char kStepMarkerLocationAttr[] = "step_marker_location";
constexpr char kUseXlaSpmdAttr[] = "use_spmd_for_xla_partitioning";
constexpr char kBadStringArrayElementMsg[] =
"bad '{0}' attribute at index {1}, not a string";
constexpr char kBadArrayElementMsg[] =
"bad '{0}' attribute at index {1} with value '{2}': failed to parse to {3}";
constexpr char kBadArrayAttrLengthMsg[] =
"bad '{0}' attribute, expected array attribute of size {1}, got size {2}";
std::string CreateMissingAttributeMsg(llvm::StringRef attribute) {
return llvm::formatv("requires attribute '{0}'", attribute).str();
}
LogicalResult SetMetadataProtoStepMarkerLocation(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto step_marker_location =
op->getAttrOfType<StringAttr>(kStepMarkerLocationAttr);
if (!step_marker_location)
return op.emitOpError(CreateMissingAttributeMsg(kStepMarkerLocationAttr));
xla::DebugOptions::StepMarkerLocation location =
xla::DebugOptions::STEP_MARK_AT_ENTRY;
if (!step_marker_location.getValue().empty() &&
!xla::DebugOptions::StepMarkerLocation_Parse(
std::string(step_marker_location.getValue()), &location))
return op.emitOpError(llvm::formatv("bad '{0}' attribute with value '{1}'",
kStepMarkerLocationAttr,
step_marker_location.getValue()));
metadata->set_step_marker_location(location);
return success();
}
LogicalResult SetOpSharding(Operation* op, Attribute attr, llvm::StringRef name,
int index, xla::OpSharding* sharding_ptr) {
auto sharding_attr = mlir::dyn_cast<StringAttr>(attr);
if (!sharding_attr)
return op->emitOpError(
llvm::formatv(kBadStringArrayElementMsg, name, index));
if (tensorflow::DecodeShardingAttribute(sharding_attr, *sharding_ptr)
.failed()) {
return op->emitOpError(llvm::formatv(kBadArrayElementMsg, name, index,
sharding_attr.getValue(),
"xla::OpSharding"));
}
return success();
}
LogicalResult SetMetadataProtoArgs(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto input_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kInputShardingAttr);
if (!input_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kInputShardingAttr));
if (input_shardings.size() != op.getNumOperands())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kInputShardingAttr,
op.getNumOperands(), input_shardings.size()));
mlir::StringAttr replication_attr_name = mlir::StringAttr::get(
op.getContext(), "mhlo.is_same_data_across_replicas");
auto dynamic_arg_idx = op->getAttrOfType<ArrayAttr>(TF::kDynamicArgIndexAttr);
llvm::SmallSet<int, 4> dynamic_arg_idx_set;
if (dynamic_arg_idx) {
for (auto idx : dynamic_arg_idx.getValue()) {
dynamic_arg_idx_set.insert(mlir::dyn_cast<IntegerAttr>(idx).getInt());
}
}
for (auto operand_type_and_idx : llvm::enumerate(op.getOperandTypes())) {
Type operand_type = operand_type_and_idx.value();
int index = operand_type_and_idx.index();
tensorflow::tpu::TPUCompileMetadataProto::Arg* arg = metadata->add_args();
tensorflow::DataType dtype;
tensorflow::Status status =
tensorflow::ConvertToDataType(operand_type, &dtype);
if (!status.ok())
return op.emitOpError(
llvm::formatv("failed to determine operand type at index {0}: {1}",
index, status.message()));
arg->set_dtype(dtype);
if (dtype == tensorflow::DT_RESOURCE)
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::VARIABLE);
else
arg->set_kind(tensorflow::tpu::TPUCompileMetadataProto::Arg::PARAMETER);
*arg->mutable_shape() = tensorflow::TensorShapeProto();
if (auto ranked_tensor_type =
mlir::dyn_cast<RankedTensorType>(operand_type)) {
tensorflow::TensorShapeProto shape_proto;
ConvertToTensorShapeProto(ranked_tensor_type.getShape(), &shape_proto);
*arg->mutable_shape() = std::move(shape_proto);
} else {
arg->mutable_shape()->set_unknown_rank(true);
}
if (failed(SetOpSharding(op, input_shardings.getValue()[index],
tensorflow::kInputShardingAttr, index,
arg->mutable_sharding())))
return failure();
auto attr = op.getFuncOp().getArgAttrOfType<mlir::BoolAttr>(
index, replication_attr_name);
arg->set_is_same_data_across_replicas(attr != nullptr && attr.getValue());
arg->mutable_is_bounded_dynamic_dim()->Add(
dynamic_arg_idx_set.contains(index));
}
return success();
}
LogicalResult SetMetadataProtoRetvals(
tf_device::ClusterFuncOp op,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
auto output_shardings =
op->getAttrOfType<ArrayAttr>(tensorflow::kOutputShardingAttr);
if (!output_shardings)
return op.emitOpError(
CreateMissingAttributeMsg(tensorflow::kOutputShardingAttr));
if (output_shardings.size() != op.getNumResults())
return op.emitOpError(
llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kOutputShardingAttr,
op.getNumResults(), output_shardings.size()));
for (auto output_sharding_and_idx : llvm::enumerate(output_shardings))
if (failed(SetOpSharding(op, output_sharding_and_idx.value(),
tensorflow::kOutputShardingAttr,
output_sharding_and_idx.index(),
metadata->add_retvals()->mutable_sharding())))
return failure();
return success();
}
}
LogicalResult SetMetadataProtoFromClusterFuncOp(
tf_device::ClusterFuncOp op, int num_replicas, int num_cores_per_replica,
std::optional<xla::DeviceAssignmentProto>&& xla_device_assignment,
tensorflow::tpu::TPUCompileMetadataProto* metadata) {
if (auto options_attr =
op->getAttrOfType<StringAttr>("tpu_compile_options_proto")) {
if (!metadata->mutable_compile_options()->ParseFromArray(
options_attr.data(), options_attr.size())) {
return failure();
}
}
metadata->set_num_replicas(num_replicas);
metadata->set_num_cores_per_replica(num_cores_per_replica);
if (failed(SetMetadataProtoStepMarkerLocation(op, metadata)))
return failure();
if (xla_device_assignment.has_value())
*metadata->mutable_device_assignment() =
std::move(xla_device_assignment.value());
auto use_spmd_attr = op->getAttrOfType<BoolAttr>(kUseXlaSpmdAttr);
if (!use_spmd_attr)
return op.emitOpError(CreateMissingAttributeMsg(kUseXlaSpmdAttr));
metadata->set_use_spmd_for_xla_partitioning(use_spmd_attr.getValue());
if (failed(SetMetadataProtoArgs(op, metadata))) return failure();
return SetMetadataProtoRetvals(op, metadata);
}
}
} | #include "tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.h"
#include <ostream>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/tpu/compile_metadata.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace mlir {
namespace TFTPU {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
class ProtoStringMatcher {
public:
explicit ProtoStringMatcher(const tsl::protobuf::Message& expected)
: expected_(expected.SerializeAsString()) {}
template <typename Message>
bool MatchAndExplain(const Message& p, testing::MatchResultListener*) const {
return p.SerializeAsString() == expected_;
}
void DescribeTo(::std::ostream* os) const { *os << expected_; }
void DescribeNegationTo(::std::ostream* os) const {
*os << "not equal to expected message: " << expected_;
}
private:
const std::string expected_;
};
inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto(
const tsl::protobuf::Message& x) {
return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x));
}
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/testdata/");
}
class TpuMetadataUtilsTest : public ::testing::Test {
public:
TpuMetadataUtilsTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::StatusOr<std::vector<mlir::tf_device::ClusterFuncOp>> GetClusterFuncOps(
absl::string_view mlir_module_filename) {
TF_RETURN_IF_ERROR(CreateMlirModule(mlir_module_filename));
std::vector<mlir::tf_device::ClusterFuncOp> cluster_func_ops;
mlir_module_->walk([&](mlir::tf_device::ClusterFuncOp op) {
cluster_func_ops.push_back(op);
});
return cluster_func_ops;
}
private:
absl::Status CreateMlirModule(absl::string_view mlir_module_filename) {
std::string mlir_module_path =
absl::StrCat(TestDataPath(), mlir_module_filename);
mlir_module_ =
mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
if (!mlir_module_) {
return absl::Status(
absl::StatusCode::kNotFound,
absl::StrCat("Could not find MLIR module at ", mlir_module_path));
}
return absl::OkStatus();
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TpuMetadataUtilsTest, SingleDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("basic_cluster.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 1, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
num_replicas: 1 num_cores_per_replica: 1
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
TEST_F(TpuMetadataUtilsTest, spmd) {
TF_ASSERT_OK_AND_ASSIGN(auto cluster_func_ops,
GetClusterFuncOps("spmd.mlir"));
mlir::tf_device::ClusterFuncOp cluster_func_op = cluster_func_ops.front();
tensorflow::tpu::TPUCompileMetadataProto compile_metadata;
ASSERT_TRUE(mlir::succeeded(SetMetadataProtoFromClusterFuncOp(
cluster_func_op,
1, 2, {}, &compile_metadata)));
tensorflow::tpu::TPUCompileMetadataProto expected_compile_metadata;
ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString(
R"pb(
args {
dtype: DT_FLOAT
shape { unknown_rank: true }
kind: PARAMETER
sharding {
type: OTHER
tile_assignment_dimensions: 2
tile_assignment_dimensions: 1
tile_assignment_devices: 0
tile_assignment_devices: 1
}
is_bounded_dynamic_dim: false
}
retvals { sharding {} }
num_replicas: 1
num_cores_per_replica: 2
use_spmd_for_xla_partitioning: true
compile_options {}
)pb",
&expected_compile_metadata));
EXPECT_THAT(compile_metadata, EqualsProto(expected_compile_metadata));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b145ff1e-2b2e-43d8-9b7d-e505ea1f4c60 | cpp | tensorflow/tensorflow | xla_platform_info | tensorflow/compiler/jit/xla_platform_info.cc | tensorflow/compiler/jit/xla_platform_info_test.cc | #include "tensorflow/compiler/jit/xla_platform_info.h"
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/device_executable_persistor.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_device_compiler_client.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/compiler.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
XlaDeviceCompiler* CreateXlaDeviceCompiler(
const XlaDeviceExecutablePersistor::Config& persistor_config,
DeviceType compilation_device_type, xla::LocalClient* local_client) {
return new XlaDeviceCompiler(
std::make_unique<XlaDeviceExecutablePersistor>(
std::move(persistor_config), compilation_device_type),
std::make_unique<XlaDeviceCompilerClient>(local_client));
}
PjRtDeviceCompiler* CreatePjRtDeviceCompiler(DeviceType compilation_device_type,
xla::PjRtClient* pjrt_client) {
std::string persistent_cache_directory =
GetPersistentCacheDirectory(compilation_device_type);
PjRtDeviceExecutablePersistor::Config persistor_config(
persistent_cache_directory,
GetMarkForCompilationPassFlags()->tf_xla_disable_strict_signature_checks,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_prefix,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_read_only);
return new PjRtDeviceCompiler(
std::make_unique<PjRtDeviceExecutablePersistor>(
std::move(persistor_config), compilation_device_type),
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client));
}
absl::StatusOr<std::optional<std::set<int>>> GetAllowedGpus(
FunctionLibraryRuntime* flr) {
std::optional<std::set<int>> gpu_ids = std::nullopt;
if (flr->config_proto()) {
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(gpu_ids, ParseVisibleDeviceList(allowed_gpus));
}
return gpu_ids;
}
Status GetCompilationDeviceTypeAndPjRtClient(
const XlaPlatformInfo& platform_info, FunctionLibraryRuntime* flr,
DeviceType* compilation_device_type, xla::PjRtClient** pjrt_client) {
DeviceType device_type = platform_info.device_type();
if (platform_info.xla_device_metadata()) {
VLOG(2) << "Building PjRtDeviceCompiler using "
"platform_info.xla_device_metadata().";
*compilation_device_type =
platform_info.xla_device_metadata()->jit_device_type();
TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type));
return absl::OkStatus();
}
if (platform_info.pjrt_device_metadata()) {
VLOG(2) << "Building PjRtDeviceCompiler using "
"platform_info.pjrt_device_metadata().";
*compilation_device_type =
platform_info.pjrt_device_metadata()->jit_device_type();
TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type));
return absl::OkStatus();
}
if (device_type == DEVICE_TPU) {
*compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type));
return absl::OkStatus();
}
VLOG(2) << "platform_info.xla_device_metadata not found and "
"platform_info.device_type() != DEVICE_TPU. Building "
"PjRtDeviceCompiler for non-XLA device.";
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(device_type.type(), ®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
device_type.type());
}
*compilation_device_type = DeviceType(registration->compilation_device_name);
TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr));
TF_ASSIGN_OR_RETURN(*pjrt_client,
GetOrCreatePjRtClient(device_type, allowed_gpus));
return absl::OkStatus();
}
}
std::string GetPersistentCacheDirectory(
const DeviceType& compilation_device_type) {
if (!GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types.empty() &&
!absl::c_any_of(absl::StrSplit(GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types,
','),
[&](absl::string_view device) {
return compilation_device_type == DeviceType(device);
})) {
return "";
}
return GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_directory;
}
absl::StatusOr<std::optional<std::set<int>>> ParseVisibleDeviceList(
absl::string_view visible_device_list) {
std::set<int> gpu_ids;
if (visible_device_list.empty()) {
return {{std::nullopt}};
}
const std::vector<string> visible_devices =
absl::StrSplit(visible_device_list, ',');
for (const string& platform_device_id_str : visible_devices) {
int32_t platform_device_id;
if (!absl::SimpleAtoi(platform_device_id_str, &platform_device_id)) {
return errors::InvalidArgument(
"Could not parse entry in 'visible_device_list': '",
platform_device_id_str,
"'. visible_device_list = ", visible_device_list);
}
gpu_ids.insert(platform_device_id);
}
return {{gpu_ids}};
}
absl::StatusOr<DeviceType> GetCompilationDeviceType(
const DeviceType& platform_device_type) {
DeviceType compilation_device_type = platform_device_type;
const XlaOpRegistry::DeviceRegistration* registration = nullptr;
if (!XlaOpRegistry::GetCompilationDevice(platform_device_type.type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_device_type.type());
}
compilation_device_type = DeviceType(registration->compilation_device_name);
return compilation_device_type;
}
Status BuildXlaDeviceCompiler(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
DeviceType compilation_device_type,
XlaDeviceCompiler** xla_device_compiler) {
if (platform_info.platform_id() == nullptr &&
platform_info.device_type() == DEVICE_GPU) {
*xla_device_compiler = new XlaDeviceCompiler(nullptr,
nullptr);
return absl::OkStatus();
}
std::string persistent_cache_directory =
GetPersistentCacheDirectory(platform_info.device_type());
XlaDeviceExecutablePersistor::Config persistor_config(
persistent_cache_directory,
GetMarkForCompilationPassFlags()->tf_xla_disable_strict_signature_checks,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_prefix,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_read_only);
if (platform_info.xla_device_metadata()) {
*xla_device_compiler = CreateXlaDeviceCompiler(
persistor_config,
platform_info.xla_device_metadata()->jit_device_type(),
platform_info.xla_device_metadata()->client());
return absl::OkStatus();
}
if (platform_info.device_type() == DEVICE_TPU) {
*xla_device_compiler = CreateXlaDeviceCompiler(
persistor_config, DeviceType(DEVICE_TPU_XLA_JIT), nullptr);
return absl::OkStatus();
}
if (platform_info.platform_id() == nullptr) {
return errors::InvalidArgument("platform_id is null.");
}
auto platform =
se::PlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
absl::StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.value());
if (!compiler_for_platform.ok()) {
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.value()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.value());
if (device != nullptr) {
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
}
if (flr != nullptr) {
TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr));
client_options.set_allowed_devices(allowed_gpus);
}
TF_ASSIGN_OR_RETURN(
auto client, xla::ClientLibrary::GetOrCreateLocalClient(client_options));
*xla_device_compiler = CreateXlaDeviceCompiler(
persistor_config, compilation_device_type, client);
return absl::OkStatus();
}
Status GetOrCreatePjRtDeviceCompilerAndProfiler(
const XlaPlatformInfo& platform_info, ResourceMgr* rm,
FunctionLibraryRuntime* flr, PjRtDeviceCompiler** pjrt_device_compiler,
DeviceCompilationProfiler** profiler) {
const auto& device_type = platform_info.device_type();
const std::string& compiler_name =
GetPjRtDeviceCompilerResourceName(device_type);
const std::string& profiler_name =
GetPjRtDeviceCompilationProfilerResourceName(device_type);
bool deleted_old_device_compiler = false;
Status s = rm->Lookup<PjRtDeviceCompiler>(
rm->default_container(), compiler_name, pjrt_device_compiler);
if (s.ok() && device_type == DEVICE_TPU) {
auto* existing_pjrt_client = (*pjrt_device_compiler)->client();
TF_ASSIGN_OR_RETURN(auto* latest_pjrt_client, GetPjRtClient(device_type));
if (existing_pjrt_client != latest_pjrt_client) {
TF_RETURN_IF_ERROR(rm->Delete<PjRtDeviceCompiler>(rm->default_container(),
compiler_name));
TF_RETURN_IF_ERROR(rm->Delete<DeviceCompilationProfiler>(
rm->default_container(), profiler_name));
deleted_old_device_compiler = true;
}
}
if (!s.ok() || deleted_old_device_compiler) {
DeviceType compilation_device_type("");
xla::PjRtClient* pjrt_client = nullptr;
TF_RETURN_IF_ERROR(GetCompilationDeviceTypeAndPjRtClient(
platform_info, flr, &compilation_device_type, &pjrt_client));
TF_RETURN_IF_ERROR(rm->LookupOrCreate<PjRtDeviceCompiler>(
rm->default_container(), compiler_name, pjrt_device_compiler,
[&](PjRtDeviceCompiler** pjrt_device_compiler) {
*pjrt_device_compiler =
CreatePjRtDeviceCompiler(compilation_device_type, pjrt_client);
return absl::OkStatus();
}));
}
TF_RETURN_IF_ERROR(rm->LookupOrCreate<DeviceCompilationProfiler>(
rm->default_container(), profiler_name, profiler,
[](DeviceCompilationProfiler** profiler) {
*profiler = new DeviceCompilationProfiler();
return absl::OkStatus();
}));
return absl::OkStatus();
}
Status GetOrCreatePjRtDeviceCompilerAndProfiler(
const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
FunctionLibraryRuntime* flr,
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>**
pjrt_device_compiler,
DeviceCompilationProfiler** profiler) {
TF_ASSIGN_OR_RETURN(ResourceMgr * rm, GetResourceMgrForDeviceCompiler(
ctx, platform_info.device_type()));
return GetOrCreatePjRtDeviceCompilerAndProfiler(
platform_info, rm, flr, pjrt_device_compiler, profiler);
}
XlaPlatformInfo XlaPlatformInfoFromDevice(DeviceBase* device_base) {
se::Platform::Id platform_id = nullptr;
const XlaDevice::Metadata* xla_device_metadata = nullptr;
const PjRtBaseDevice::Metadata* pjrt_device_metadata = nullptr;
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
const std::string& device_type = device_base->device_type();
if (device_type == DEVICE_CPU) {
platform_id = se::host::kHostPlatformId;
} else if (device_type == DEVICE_GPU) {
auto device = static_cast<Device*>(device_base);
platform_id = device->tensorflow_accelerator_device_info()
->stream->parent()
->GetPlatform()
->id();
} else if (XlaDevice::GetMetadataFromDevice(device_base, &xla_device_metadata)
.ok()) {
platform_id = xla_device_metadata->platform()->id();
custom_allocator =
xla_device_metadata->client()->backend().shared_memory_allocator();
} else if (auto metadata = PjRtBaseDevice::GetMetadataFromDevice(device_base);
metadata.ok()) {
pjrt_device_metadata = *metadata;
}
return XlaPlatformInfo(DeviceType(device_type), platform_id,
xla_device_metadata, pjrt_device_metadata,
custom_allocator);
}
std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator(
DeviceBase* device, se::Stream* stream,
const XlaPlatformInfo& platform_info) {
if (platform_info.custom_allocator()) {
return platform_info.custom_allocator();
}
auto* alloc = device->GetAllocator({});
if (!stream) {
se::Platform* platform =
se::PlatformManager::PlatformWithId(platform_info.platform_id())
.value();
return std::make_shared<se::TfAllocatorAdapter>(alloc, platform);
}
return std::make_shared<se::TfAllocatorAdapter>(alloc, stream);
}
} | #include "tensorflow/compiler/jit/xla_platform_info.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
class XlaPlatformInfoTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = "";
}
DeviceSetup device_setup_;
};
class StubDevice : public DeviceBase {
public:
StubDevice() : DeviceBase(nullptr) {}
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceMetadata) {
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
const XlaDevice::Metadata* metadata = nullptr;
TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata));
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
TF_ASSERT_OK_AND_ASSIGN(
DeviceType compilation_device_type,
GetCompilationDeviceType(platform_info.device_type()));
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
platform_info, compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), metadata->jit_device_type());
EXPECT_EQ(xla_device_compiler->client(), metadata->client());
}
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceCacheEnabled) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_XLA_GPU;
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
const XlaDevice::Metadata* metadata = nullptr;
TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata));
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
TF_ASSERT_OK_AND_ASSIGN(
DeviceType compilation_device_type,
GetCompilationDeviceType(platform_info.device_type()));
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
platform_info, compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), metadata->jit_device_type());
EXPECT_EQ(xla_device_compiler->client(), metadata->client());
EXPECT_EQ(xla_device_compiler->persistor()->persistent_cache_directory(),
"/tmp/xla_cache");
}
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerNonXlaDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_GPU});
Device* device = device_setup_.GetDevice(DEVICE_GPU);
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
TF_ASSERT_OK_AND_ASSIGN(
DeviceType compilation_device_type,
GetCompilationDeviceType(platform_info.device_type()));
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
platform_info, compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), DeviceType(DEVICE_GPU_XLA_JIT));
EXPECT_TRUE(xla_device_compiler->client() != nullptr);
}
TEST_F(XlaPlatformInfoTest, GetOrCreatePjRtDeviceCompilerAndProfilerXlaDevice) {
DeviceType device_type = DeviceType(DEVICE_XLA_GPU);
device_setup_.AddDevicesAndSetUp({device_type.type()});
Device* device = device_setup_.GetDevice(device_type.type());
const XlaDevice::Metadata* metadata = nullptr;
TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata));
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
params.device = device;
OpKernelContext ctx(¶ms, 0);
PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
DeviceCompilationProfiler* profiler = nullptr;
TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler(
ctx, platform_info, device_setup_.flr(), &pjrt_device_compiler,
&profiler));
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetOrCreatePjRtClient(device_type));
EXPECT_EQ(pjrt_device_compiler->device_type(), metadata->jit_device_type());
EXPECT_EQ(pjrt_device_compiler->client(), pjrt_client);
}
TEST_F(XlaPlatformInfoTest,
GetOrCreatePjRtDeviceCompilerAndProfilerGpuDeviceCacheEnabled) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_GPU_XLA_JIT;
device_setup_.AddDevicesAndSetUp({DEVICE_GPU});
Device* device = device_setup_.GetDevice(DEVICE_GPU);
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
params.device = device;
OpKernelContext ctx(¶ms, 0);
PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
DeviceCompilationProfiler* profiler = nullptr;
TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler(
ctx, platform_info, device_setup_.flr(), &pjrt_device_compiler,
&profiler));
EXPECT_EQ(pjrt_device_compiler->persistor()->persistent_cache_directory(),
"/tmp/xla_cache");
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
}
#endif
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerTpuDevice) {
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
Device* device = nullptr;
XlaPlatformInfo platform_info(DeviceType(DEVICE_TPU), nullptr,
nullptr,
nullptr,
nullptr);
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info,
compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), compilation_device_type);
EXPECT_EQ(xla_device_compiler->client(), nullptr);
}
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerNoCompilationCache) {
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_XLA_GPU;
Device* device = nullptr;
XlaPlatformInfo platform_info(DeviceType(DEVICE_TPU), nullptr,
nullptr,
nullptr,
nullptr);
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info,
compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), compilation_device_type);
EXPECT_TRUE(
xla_device_compiler->persistor()->persistent_cache_directory().empty());
}
TEST_F(XlaPlatformInfoTest,
GetOrCreatePjRtDeviceCompilerAndProfilerTpuDeviceNoCompilationCache) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_GPU_XLA_JIT;
DeviceType device_type = DeviceType(DEVICE_TPU);
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
TF_CHECK_OK(SetPjRtClientInTFGlobalResourceManager(
device_type,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetOrCreatePjRtClient(device_type));
XlaPlatformInfo platform_info(device_type, nullptr,
nullptr,
nullptr,
nullptr);
OpKernelContext::Params params;
StubDevice stub_device;
params.device = &stub_device;
OpKernelContext ctx(¶ms, 0);
PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
DeviceCompilationProfiler* profiler = nullptr;
TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler(
ctx, platform_info, nullptr, &pjrt_device_compiler, &profiler));
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
EXPECT_EQ(pjrt_device_compiler->device_type(), compilation_device_type);
EXPECT_EQ(pjrt_device_compiler->client(), pjrt_client);
EXPECT_TRUE(
pjrt_device_compiler->persistor()->persistent_cache_directory().empty());
}
TEST_F(XlaPlatformInfoTest, GetPersistentCacheDirectoryMultiple) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = "GPU,CPU";
DeviceType device_gpu = DeviceType(DEVICE_GPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_gpu), "/tmp/xla_cache");
DeviceType device_cpu = DeviceType(DEVICE_CPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_cpu), "/tmp/xla_cache");
DeviceType device_tpu = DeviceType(DEVICE_TPU);
EXPECT_TRUE(GetPersistentCacheDirectory(device_tpu).empty());
}
TEST_F(XlaPlatformInfoTest, GetPersistentCacheDirectoryNoDeviceTypes) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = "";
DeviceType device_gpu = DeviceType(DEVICE_GPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_gpu), "/tmp/xla_cache");
DeviceType device_cpu = DeviceType(DEVICE_CPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_cpu), "/tmp/xla_cache");
DeviceType device_tpu = DeviceType(DEVICE_TPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_tpu), "/tmp/xla_cache");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_platform_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_platform_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6500c3f4-05c8-4059-a2da-7525d9323fa0 | cpp | tensorflow/tensorflow | clone_constants_for_better_clustering | tensorflow/compiler/jit/clone_constants_for_better_clustering.cc | tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc | #include "tensorflow/compiler/jit/clone_constants_for_better_clustering.h"
#include <string>
#include "absl/algorithm/container.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
using tsl::StatusOr;
class CloneConstantsForBetterClusteringPassImpl {
public:
explicit CloneConstantsForBetterClusteringPassImpl(Graph* graph)
: graph_(graph), unique_name_counter_(0) {}
Status Run();
private:
Status CloneSmallConstantInputs(const absl::flat_hash_set<string>& name_set,
Node* n);
string GenerateUniqueName(const absl::flat_hash_set<string>& name_set,
absl::string_view prefix);
absl::StatusOr<Node*> CloneNode(const absl::flat_hash_set<string>& name_set,
Node* n);
Graph* graph_;
int unique_name_counter_;
};
string CloneConstantsForBetterClusteringPassImpl::GenerateUniqueName(
const absl::flat_hash_set<string>& name_set, absl::string_view prefix) {
string candidate;
do {
candidate = absl::StrCat(prefix, "/clone_", unique_name_counter_++);
} while (name_set.contains(candidate));
return candidate;
}
absl::StatusOr<Node*> CloneConstantsForBetterClusteringPassImpl::CloneNode(
const absl::flat_hash_set<string>& name_set, Node* n) {
NodeDef new_in_def = n->def();
new_in_def.clear_input();
new_in_def.set_name(GenerateUniqueName(name_set, new_in_def.name()));
TF_ASSIGN_OR_RETURN(Node * new_in, graph_->AddNode(new_in_def));
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
graph_->AddControlEdge(e->src(), new_in);
} else {
graph_->AddEdge(e->src(), e->src_output(), new_in, e->dst_input());
}
}
new_in->set_assigned_device_name(n->assigned_device_name());
return new_in;
}
namespace {
absl::StatusOr<bool> IsConstantSmall(Node* n) {
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "value", &proto));
int64_t total_elements = 1;
for (const auto& dim : proto->tensor_shape().dim()) {
if (dim.size() < 0) {
return errors::Internal("Unknown dimension size in constant tensor ",
n->name());
}
total_elements *= dim.size();
}
const int kSmallTensorThreshold = 16;
return total_elements < kSmallTensorThreshold;
}
absl::StatusOr<bool> IsSmallConstant(Node* n) {
if (!n->IsConstant()) {
return false;
}
return IsConstantSmall(n);
}
bool IsInPlaceOp(absl::string_view op_name) {
return op_name == "InplaceUpdate" || op_name == "InplaceAdd" ||
op_name == "InplaceSub";
}
}
Status CloneConstantsForBetterClusteringPassImpl::CloneSmallConstantInputs(
const absl::flat_hash_set<string>& name_set, Node* n) {
std::vector<const Edge*> in_edges;
absl::c_copy(n->in_edges(), std::back_inserter(in_edges));
absl::c_stable_sort(in_edges, [](const Edge* e1, const Edge* e2) {
return e1->id() < e2->id();
});
for (const Edge* e : in_edges) {
Node* input = e->src();
TF_ASSIGN_OR_RETURN(bool is_small_constant, IsSmallConstant(input));
if (is_small_constant && input->out_edges().size() != 1) {
VLOG(2) << "Cloning small constant " << input->name();
TF_ASSIGN_OR_RETURN(Node* const input_cloned, CloneNode(name_set, input));
if (e->IsControlEdge()) {
graph_->AddControlEdge(input_cloned, e->dst());
} else {
int dst_input = e->dst_input();
TF_RET_CHECK(e->src_output() == 0)
<< "expected constant to have exactly one non-control output, but "
"found output index = "
<< e->src_output();
graph_->RemoveEdge(e);
graph_->AddEdge(input_cloned, 0, n, dst_input);
}
}
}
return absl::OkStatus();
}
Status CloneConstantsForBetterClusteringPassImpl::Run() {
absl::flat_hash_set<string> name_set;
absl::c_transform(graph_->nodes(), std::inserter(name_set, name_set.begin()),
[](Node* n) { return n->name(); });
std::vector<Node*> nodes;
for (Node* n : graph_->nodes()) {
if (IsInPlaceOp(n->type_string())) {
return absl::OkStatus();
}
nodes.push_back(n);
}
for (Node* n : nodes) {
TF_RETURN_IF_ERROR(CloneSmallConstantInputs(name_set, n));
}
return absl::OkStatus();
}
Status CloneConstantsForBetterClusteringPass::Run(
const GraphOptimizationPassOptions& options) {
if (GetGlobalJitLevelForGraph(options) == OptimizerOptions::OFF) {
return absl::OkStatus();
}
Graph* g = options.graph->get();
if (VLOG_IS_ON(1)) {
DumpGraphToFile("before_clone_constants_for_better_clustering", *g);
}
TF_RETURN_IF_ERROR(CloneConstantsForBetterClusteringPassImpl{g}.Run());
if (VLOG_IS_ON(1)) {
DumpGraphToFile("after_clone_constants_for_better_clustering", *g);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/clone_constants_for_better_clustering.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/node_matchers.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
using ::tensorflow::testing::FindNodeByName;
Status CloneConstantsForBetterClustering(const Scope& s,
std::unique_ptr<Graph>* result) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
SessionOptions session_options;
session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
GraphOptimizationPassOptions options;
options.graph = &graph;
options.session_options = &session_options;
GraphConstructorOptions opts;
opts.expect_device_spec = true;
TF_RETURN_IF_ERROR(s.ToGraph(graph.get(), opts));
CloneConstantsForBetterClusteringPass rewriter;
TF_RETURN_IF_ERROR(rewriter.Run(options));
*result = std::move(graph);
return absl::OkStatus();
}
const char* kCPU = "/job:localhost/replica:0/task:0/device:CPU:0";
const char* kGPU = "/job:localhost/replica:0/task:0/device:GPU:0";
TEST(CloneConstantsForBetterClusteringTest, ScalarConstantPlacedOnGpu) {
Scope root = Scope::NewRootScope().ExitOnError();
Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
Output in = ops::Placeholder(on_gpu.WithOpName("in"), DT_FLOAT);
Output c = ops::Const(on_gpu.WithOpName("const"), 1.0f, {});
Output add1 = ops::AddV2(on_gpu.WithOpName("add1"), in, c);
Output add2 = ops::AddV2(on_gpu.WithOpName("add2"), add1, c);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
OutputTensor add1_operand;
TF_ASSERT_OK(
FindNodeByName(result.get(), "add1")->input_tensor(1, &add1_operand));
OutputTensor add2_operand;
TF_ASSERT_OK(
FindNodeByName(result.get(), "add2")->input_tensor(1, &add2_operand));
EXPECT_NE(add1_operand.node, add2_operand.node);
}
TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnCpu) {
Scope root = Scope::NewRootScope().ExitOnError();
Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU);
Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT);
Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
Output perm = ops::Const(on_cpu.WithOpName("perm"), {3, 1, 2, 0});
{
Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm);
Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm);
}
std::unique_ptr<Graph> result;
TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
OutputTensor tr0_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm));
OutputTensor tr1_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm));
EXPECT_NE(tr0_perm.node, tr1_perm.node);
}
TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnGpu) {
Scope root = Scope::NewRootScope().ExitOnError();
Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT);
Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
Output perm = ops::Const(on_gpu.WithOpName("perm"), {3, 1, 2, 0});
{
Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm);
Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm);
}
std::unique_ptr<Graph> result;
TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
OutputTensor tr0_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm));
OutputTensor tr1_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm));
EXPECT_NE(tr0_perm.node, tr1_perm.node);
}
TEST(CloneConstantsForBetterClusteringTest, CloneSmallDeviceConstants) {
Scope root = Scope::NewRootScope().ExitOnError();
Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT);
Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
Output perm_f32 = ops::Const(on_gpu.WithOpName("perm"), {3.0, 1.0, 2.0, 0.0});
Output perm_int0 =
ops::Cast(on_gpu.WithOpName("perm_cast_0"), perm_f32, DT_INT32);
Output perm_int1 =
ops::Cast(on_gpu.WithOpName("perm_cast_1"), perm_f32, DT_INT32);
{
Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm_int0);
Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm_int1);
}
std::unique_ptr<Graph> result;
TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
OutputTensor tr0_perm;
TF_ASSERT_OK(
FindNodeByName(result.get(), "perm_cast_0")->input_tensor(0, &tr0_perm));
OutputTensor tr1_perm;
TF_ASSERT_OK(
FindNodeByName(result.get(), "perm_cast_1")->input_tensor(0, &tr1_perm));
EXPECT_NE(tr0_perm.node, tr1_perm.node);
}
TEST(CloneConstantsForBetterClusteringTest, DontCloneLargeConstants) {
Scope root = Scope::NewRootScope().ExitOnError();
Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU);
Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT);
Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
Output perm = ops::Const(
on_cpu.WithOpName("perm"),
{17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0});
{
Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm);
Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm);
}
std::unique_ptr<Graph> result;
TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
OutputTensor tr0_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm));
OutputTensor tr1_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm));
EXPECT_EQ(tr0_perm.node, tr1_perm.node);
}
TEST(CloneConstantsForBetterClusteringTest, InplaceOps) {
Scope root = Scope::NewRootScope().ExitOnError();
Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU);
Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU);
Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT);
Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
Output perm = ops::Const(on_cpu.WithOpName("perm"), {3, 1, 2, 0});
{
Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm);
Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm);
}
Output in_place_add =
ops::InplaceAdd(on_cpu.WithOpName("tr0"), perm,
ops::Placeholder(on_cpu.WithOpName("i"), DT_INT32), perm);
std::unique_ptr<Graph> result;
TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
OutputTensor tr0_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm));
OutputTensor tr1_perm;
TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm));
EXPECT_EQ(tr0_perm.node, tr1_perm.node);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/clone_constants_for_better_clustering.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9d124380-5a97-495c-86d2-35ce131e46d1 | cpp | tensorflow/tensorflow | xla_compile_util | tensorflow/compiler/jit/xla_compile_util.cc | tensorflow/compiler/jit/xla_compile_util_test.cc | #include "tensorflow/compiler/jit/xla_compile_util.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
namespace {
constexpr const char* kPjRtDeviceCompilerResourceName = "pjrt_device_compiler";
constexpr const char* kPjRtDeviceCompilationProfilerResourceName =
"pjrt_device_compilation_profiler";
}
absl::StatusOr<std::unique_ptr<Graph>> CreateSingleOpGraph(
const NodeDef& node_def, absl::Span<const XlaArgument> args,
absl::Span<const DataType> result_types) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSIGN_OR_RETURN(Node * main_node, graph->AddNode(node_def));
for (int64_t i = 0, end = args.size(); i < end; ++i) {
Node* node;
string arg_name = absl::StrCat("_arg", i);
Status status =
NodeBuilder(arg_name, FunctionLibraryDefinition::kArgOp)
.ControlInput(graph->source_node())
.Attr("T", args[i].kind == XlaArgument::kResource ? DT_RESOURCE
: args[i].type)
.Attr("index", i)
.Finalize(graph.get(), &node);
TF_RETURN_IF_ERROR(status);
graph->AddEdge(node, 0, main_node, i);
}
for (int64_t i = 0, end = result_types.size(); i < end; ++i) {
Node* node;
string retval_name = absl::StrCat("_retval", i);
Status status = NodeBuilder(retval_name, FunctionLibraryDefinition::kRetOp)
.Input(main_node, i)
.Attr("T", result_types[i])
.Attr("index", i)
.Finalize(graph.get(), &node);
TF_RETURN_IF_ERROR(status);
}
FixupSourceAndSinkEdges(graph.get());
return graph;
}
bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type) {
const auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
return rollout_config.IsEnabledInXlaLaunchForDevice(device_type) ||
rollout_config.IsEnabledInXlaCompileOnDemandForDevice(device_type) ||
rollout_config.IsEnabledInXlaCompileAndRunForDevice(device_type);
}
std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type) {
return absl::StrCat(kPjRtDeviceCompilerResourceName, "_",
device_type.type_string());
}
std::string GetPjRtDeviceCompilationProfilerResourceName(
const DeviceType& device_type) {
return absl::StrCat(kPjRtDeviceCompilationProfilerResourceName, "_",
device_type.type_string());
}
absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler(
const OpKernelContext& ctx, const DeviceType& device_type) {
ResourceMgr* rm = nullptr;
if (device_type == DEVICE_TPU) {
rm = tfrt_global::GetTFGlobalResourceMgr();
} else {
rm = ctx.resource_manager();
}
if (!rm) {
return absl::InternalError("No resource manager found.");
}
return rm;
}
} | #include "tensorflow/compiler/jit/xla_compile_util.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
TEST_F(OpsTestBase, CreateSingleOpGraph) {
TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity")
.Input(FakeInput(DT_FLOAT))
.Attr("T", DT_FLOAT)
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<float>(TensorShape({1, 2}), {6.9, 4.2});
TF_EXPECT_OK(RunOpKernel());
XlaCompiler::SingleOpCompileArgument single_op_arg(*context_);
std::vector<XlaArgument> args(1);
args[0].kind = XlaArgument::kConstant;
args[0].type = DT_FLOAT;
args[0].shape = TensorShape({1, 2});
args[0].constant_value = GetInput(0);
args[0].initialized = true;
TF_ASSERT_OK_AND_ASSIGN(
auto graph,
CreateSingleOpGraph(*node_def(), args, single_op_arg.output_dtypes));
const auto& node_name_index = graph->BuildNodeNameIndex();
const Node* identity_node = node_name_index.at("identity_op");
EXPECT_EQ(identity_node->op_def().name(), "Identity");
EXPECT_EQ(identity_node->attrs().FindByString("T")->type(), DT_FLOAT);
EXPECT_EQ(identity_node->num_inputs(), 1);
const Node* identity_input_node = nullptr;
TF_EXPECT_OK(identity_node->input_node(0, &identity_input_node));
EXPECT_EQ(identity_input_node->name(), "_arg0");
const Node* arg_node = node_name_index.at("_arg0");
EXPECT_EQ(arg_node->op_def().name(), "_Arg");
EXPECT_EQ(arg_node->attrs().FindByString("T")->type(), DT_FLOAT);
const Node* retval_node = node_name_index.at("_retval0");
EXPECT_EQ(retval_node->op_def().name(), "_Retval");
EXPECT_EQ(retval_node->attrs().FindByString("T")->type(), DT_FLOAT);
EXPECT_EQ(identity_node->num_outputs(), 1);
EXPECT_EQ(retval_node->num_inputs(), 1);
const Node* retval_input_node = nullptr;
TF_EXPECT_OK(retval_node->input_node(0, &retval_input_node));
EXPECT_EQ(retval_input_node->name(), "identity_op");
}
TEST(XlaCompileUtilTest, PjRtXlaLaunchFlagTest) {
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
rollout_config.enabled_for_xla_launch_ = true;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaLaunch(DeviceType(DEVICE_GPU));
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaLaunch(DeviceType(DEVICE_CPU));
EXPECT_TRUE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.enabled_for_xla_launch_ = false;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
}
TEST(XlaCompileUtilTest, PjRtXlaCompileOnDemandFlagTest) {
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api;
rollout_config.enabled_for_compile_on_demand_ = true;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaCompileOnDemand(DeviceType(DEVICE_GPU));
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.AllowForDeviceInXlaCompileOnDemand(DeviceType(DEVICE_CPU));
EXPECT_TRUE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
rollout_config.enabled_for_compile_on_demand_ = false;
EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU)));
}
TEST(XlaCompileUtilTest, PjRtDeviceCompilerResourceName) {
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_TPU)),
"pjrt_device_compiler_TPU");
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_TPU_NODE)),
"pjrt_device_compiler_TPU");
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_CPU)),
"pjrt_device_compiler_CPU");
EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_GPU)),
"pjrt_device_compiler_GPU");
}
TEST(XlaCompileUtilTest, PjRtDeviceCompilationProfilerResourceName) {
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_TPU)),
"pjrt_device_compilation_profiler_TPU");
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_TPU_NODE)),
"pjrt_device_compilation_profiler_TPU");
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_CPU)),
"pjrt_device_compilation_profiler_CPU");
EXPECT_EQ(
GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_GPU)),
"pjrt_device_compilation_profiler_GPU");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compile_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compile_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c33600c5-a968-467c-a73d-776e13f851df | cpp | tensorflow/tensorflow | encapsulate_xla_computations_pass | tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc | tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc | #include "tensorflow/compiler/jit/encapsulate_xla_computations_pass.h"
#include <functional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
const char* const kXlaClusterOutput = "XlaClusterOutput";
bool IsCpuGpuCompile(const Graph* graph) {
for (Node* n : graph->nodes()) {
string name;
if (!TryGetNodeAttr(n->attrs(), kXlaClusterIdAttr, &name)) continue;
DeviceNameUtils::ParsedName parsed;
if (DeviceNameUtils::ParseFullName(n->requested_device(), &parsed)) {
if (parsed.type != DEVICE_CPU && parsed.type != DEVICE_GPU) {
return false;
}
}
}
return true;
}
bool is_guaranteed_constant(const Node& n) {
bool guaranteed_constant = false;
if (!TryGetNodeAttr(n.attrs(), "_is_guaranteed_constant",
&guaranteed_constant)) {
return false;
}
return guaranteed_constant;
}
Status GetIndexAttr(const Node& n, int num_args, int* index) {
TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", index));
if (*index < 0 || *index >= num_args) {
return errors::InvalidArgument("Invalid ", n.type_string(), " number ",
*index);
}
return absl::OkStatus();
}
DataType EdgeType(const Edge* edge) {
return edge->dst()->input_type(edge->dst_input());
}
void AddControlInputs(const Node& node, absl::flat_hash_set<Node*>* deps) {
for (const Edge* edge : node.in_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->src());
}
}
}
void AddControlOutputs(const Node& node, absl::flat_hash_set<Node*>* deps) {
for (const Edge* edge : node.out_edges()) {
if (edge->IsControlEdge()) {
deps->insert(edge->dst());
}
}
}
Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
Graph* graph = graph_ptr->get();
const int num_args = input_permutation->size();
const int num_retvals = output_permutation->size();
std::vector<Node*> args;
std::vector<Node*> retvals;
args.reserve(num_args);
retvals.reserve(num_retvals);
for (Node* n : graph->nodes()) {
if (n->type_string() == "_Arg") {
if (is_guaranteed_constant(*n)) {
return errors::InvalidArgument(
"Guaranteed constants are not supported (", n->name(), ")");
}
args.push_back(n);
} else if (n->type_string() == "_Retval") {
retvals.push_back(n);
}
}
if (std::find(args.begin(), args.end(), nullptr) != args.end()) {
return errors::InvalidArgument("Missing or non-consecutive arguments");
}
std::sort(args.begin(), args.end(), [&](Node* a, Node* b) {
bool a_is_resource = (a->output_type(0) == DT_RESOURCE);
bool b_is_resource = (b->output_type(0) == DT_RESOURCE);
StringPiece a_name(a->name());
StringPiece b_name(b->name());
return std::tie(a_is_resource, a_name) < std::tie(b_is_resource, b_name);
});
std::sort(retvals.begin(), retvals.end(),
[](Node* a, Node* b) { return a->name() < b->name(); });
int variable_start_index = num_args;
for (int i = 0; i < num_args; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*args[i], num_args, &index));
if (args[i]->output_type(0) == DT_RESOURCE &&
variable_start_index == num_args) {
variable_start_index = i;
}
(*input_permutation)[index] = i;
args[i]->AddAttr("index", i);
}
VLOG(4) << "variable_start_index: " << variable_start_index;
for (int i = 0; i < num_retvals; ++i) {
int index;
TF_RETURN_IF_ERROR(GetIndexAttr(*retvals[i], num_retvals, &index));
(*output_permutation)[index] = i;
retvals[i]->AddAttr("index", i);
}
AddNodeAttr(kXlaClusterIdAttr, call_def->name(), call_def);
AddNodeAttr("_variable_start_index", variable_start_index, call_def);
TF_ASSIGN_OR_RETURN(uint64 fingerprint, FingerprintGraph(*graph));
VLOG(1) << "Subgraph fingerprint:" << fingerprint;
call_def->set_op(absl::StrCat(call_def->op(), "_", fingerprint));
return absl::OkStatus();
}
}
Status EncapsulateXlaComputationsPass::Encapsulate(
std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def) {
for (const Edge* e : (*graph)->edges()) {
if (!e->IsControlEdge() &&
e->src()->attrs().Find(kXlaClusterIdAttr) != nullptr &&
e->dst()->attrs().Find(kXlaClusterIdAttr) == nullptr &&
e->dst()->type_string() != kXlaClusterOutput) {
return errors::InvalidArgument(
"Undeclared output of XLA computation. Some common causes of this "
"error are: 1) variable initializers that depend on the XLA "
"computation; 2) gradient computations that depend on the XLA "
"computation, which can be mitigated by moving gradient computations "
"inside XLA computation. Offending edge: ",
e->src()->name(), ":", e->src_output(), " -> ", e->dst()->name(), ":",
e->dst_input());
}
}
auto output = std::make_unique<Graph>((*graph)->op_registry());
TF_RETURN_WITH_CONTEXT_IF_ERROR(
EncapsulateSubgraphsInFunctions(
kXlaClusterIdAttr, **graph, RewriteSubgraph,
true, &output, flib_def),
"EncapsulateXlaComputationsPass failed");
graph->swap(output);
return absl::OkStatus();
}
Status EncapsulateXlaComputationsPass::BuildXlaLaunchOps(
Graph* graph,
const std::function<absl::StatusOr<bool>(const Node&)>& is_xla_launch_node,
const std::function<absl::StatusOr<XlaFunctionInfo>(const Node&)>&
get_xla_function_info,
const bool add_edges_to_output_of_downstream_nodes) {
std::vector<Node*> launch_nodes;
for (Node* n : graph->nodes()) {
TF_ASSIGN_OR_RETURN(const bool is_xla_launch_node, is_xla_launch_node(*n));
if (is_xla_launch_node) launch_nodes.push_back(n);
}
for (Node* launch : launch_nodes) {
TF_ASSIGN_OR_RETURN(const XlaFunctionInfo xla_function_info,
get_xla_function_info(*launch));
std::vector<const Edge*> in_edges;
TF_RETURN_IF_ERROR(launch->input_edges(&in_edges));
const int num_inputs = in_edges.size();
const int variable_start_index = xla_function_info.variable_start_index;
const int num_variables = num_inputs - variable_start_index;
const int num_args = variable_start_index;
VLOG(4) << "Launch node '" << launch->name() << "'"
<< " input edges: " << in_edges.size() << " num_args: " << num_args
<< " num_variables: " << num_variables;
std::vector<Node*> nodes_to_remove = {launch};
std::vector<std::pair<Node*, int>> data_inputs(num_inputs);
absl::flat_hash_set<Node*> control_inputs;
DataTypeVector arg_types(num_args);
AddControlInputs(*launch, &control_inputs);
for (int i = 0; i < num_args; ++i) {
const Edge* edge = in_edges[i];
data_inputs[i] = {edge->src(), edge->src_output()};
arg_types[i] = EdgeType(edge);
}
for (int i = 0; i < num_variables; ++i) {
int pos = variable_start_index + i;
const Edge* edge = in_edges[pos];
data_inputs[pos] = {edge->src(), edge->src_output()};
}
const int num_outputs = launch->output_types().size();
absl::flat_hash_set<Node*> control_outputs;
std::vector<std::vector<std::pair<Node*, int>>> data_outputs(num_outputs);
const DataTypeVector& output_types(launch->output_types());
for (const Edge* le : launch->out_edges()) {
if (le->IsControlEdge()) {
control_outputs.insert(le->dst());
} else {
TF_RET_CHECK(le->src_output() < num_outputs);
Node* output_node = le->dst();
if (add_edges_to_output_of_downstream_nodes) {
TF_RET_CHECK(output_node->type_string() == kXlaClusterOutput)
<< le->DebugString();
nodes_to_remove.push_back(output_node);
for (const Edge* oe : output_node->out_edges()) {
TF_RET_CHECK(!oe->IsControlEdge());
data_outputs[le->src_output()].push_back(
{oe->dst(), oe->dst_input()});
}
AddControlOutputs(*output_node, &control_outputs);
} else {
data_outputs[le->src_output()].push_back(
{le->dst(), le->dst_input()});
}
}
}
NodeDef def;
def.set_name(launch->name());
MergeDebugInfo(NodeDebugInfo(launch->def()), &def);
VLOG(2) << "Replacing with XlaLaunch";
VLOG(2) << "Device is " << launch->requested_device();
def.set_op("XlaLaunch");
def.set_device(launch->requested_device());
AddNodeAttr("Tconstants", DataTypeVector{}, &def);
AddNodeAttr("Targs", arg_types, &def);
AddNodeAttr("Nresources", num_variables, &def);
AddNodeAttr("Tresults", output_types, &def);
NameAttrList function;
function.set_name(xla_function_info.function_name);
AddNodeAttr("function", function, &def);
for (Node* node : nodes_to_remove) {
VLOG(2) << "Deleting node " << node->DebugString();
control_inputs.erase(node);
control_outputs.erase(node);
graph->RemoveNode(node);
}
TF_ASSIGN_OR_RETURN(Node * xla_launch, graph->AddNode(def));
for (int i = 0, end = data_inputs.size(); i < end; ++i) {
graph->AddEdge(data_inputs[i].first, data_inputs[i].second, xla_launch,
i);
}
for (Node* n : control_inputs) {
graph->AddControlEdge(n, xla_launch);
}
for (int i = 0, end = data_outputs.size(); i < end; ++i) {
for (const auto& successor : data_outputs[i]) {
graph->AddEdge(xla_launch, i, successor.first, successor.second);
}
}
for (Node* n : control_outputs) {
graph->AddControlEdge(xla_launch, n);
}
}
return absl::OkStatus();
}
Status EncapsulateXlaComputationsPass::BuildXlaLaunchOps(
Graph* graph) {
const auto is_xla_launch_node = [](const Node& node) -> absl::StatusOr<bool> {
const string& name = GetNodeAttrString(node.attrs(), kXlaClusterIdAttr);
return !name.empty();
};
const auto get_xla_function_info =
[](const Node& node) -> absl::StatusOr<XlaFunctionInfo> {
XlaFunctionInfo result;
TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), "_variable_start_index",
&result.variable_start_index));
result.function_name = node.type_string();
return result;
};
return BuildXlaLaunchOps(graph, is_xla_launch_node, get_xla_function_info,
true);
}
Status EncapsulateXlaComputationsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateXlaComputations(): "
<< DumpGraphToFile("encapsulate_xla_computations_before",
**options.graph, options.flib_def);
const char* additional_help =
IsCpuGpuCompile(options.graph->get())
? xla::status_macros::kPossibleAutoJitAlternative
: "";
TF_RETURN_WITH_CONTEXT_IF_ERROR(Encapsulate(options.graph, options.flib_def),
additional_help);
VLOG(1) << "EncapsulateXlaComputations() half-way: "
<< DumpGraphToFile("encapsulate_xla_computations_halfway",
**options.graph, options.flib_def);
TF_RETURN_WITH_CONTEXT_IF_ERROR(BuildXlaLaunchOps(options.graph->get()),
additional_help);
VLOG(1) << "EncapsulateXlaComputations() finished: "
<< DumpGraphToFile("encapsulate_xla_computations_after",
**options.graph, options.flib_def);
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/encapsulate_xla_computations_pass.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h"
#include "tensorflow/compiler/tf2xla/test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/strings/proto_serialization.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
static std::unique_ptr<Graph> MakeOuterGraph(
const FunctionLibraryDefinition& flib_def, const string& function) {
Scope scope = Scope::NewRootScope().ExitOnError();
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib_def.ToProto()));
auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
NodeDef def;
TF_CHECK_OK(NodeDefBuilder("launch0", function, &flib_def)
.Input(a.node()->name(), 0, DT_INT32)
.Input(b.node()->name(), 0, DT_FLOAT)
.Input(c.node()->name(), 0, DT_INT32)
.Input(d.node()->name(), 0, DT_FLOAT)
.Input(u.node()->name(), 0, DT_RESOURCE)
.Input(v.node()->name(), 0, DT_RESOURCE)
.Input(w.node()->name(), 0, DT_RESOURCE)
.Device("/gpu:0")
.Attr(kXlaClusterIdAttr, "launch0")
.Attr("_variable_start_index", 4)
.Finalize(&def));
Status status;
Node* launch = scope.graph()->AddNode(def, &status);
TF_CHECK_OK(status);
TF_CHECK_OK(scope.DoShapeInference(launch));
scope.graph()->AddEdge(a.node(), 0, launch, 0);
scope.graph()->AddEdge(b.node(), 0, launch, 1);
scope.graph()->AddEdge(c.node(), 0, launch, 2);
scope.graph()->AddEdge(d.node(), 0, launch, 3);
scope.graph()->AddEdge(u.node(), 0, launch, 4);
scope.graph()->AddEdge(v.node(), 0, launch, 5);
scope.graph()->AddEdge(w.node(), 0, launch, 6);
auto out0 =
ops::XlaClusterOutput(scope.WithOpName("Out0"), Output(launch, 0));
auto out1 =
ops::XlaClusterOutput(scope.WithOpName("Out1"), Output(launch, 1));
auto out2 =
ops::XlaClusterOutput(scope.WithOpName("Out2"), Output(launch, 2));
auto out3 =
ops::XlaClusterOutput(scope.WithOpName("Out3"), Output(launch, 3));
auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), out0);
auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), out0);
auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), out0);
auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), out1);
auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), out2);
auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), out3);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(scope.ToGraph(graph.get()));
return graph;
}
static std::unique_ptr<Graph> MakeBodyGraph() {
Scope scope = Scope::NewRootScope().ExitOnError();
auto arg0 = ops::_Arg(scope.WithOpName("a_0_arg"), DT_INT32, 0);
auto arg1 = ops::_Arg(scope.WithOpName("b_0_arg"), DT_FLOAT, 1);
auto arg2 = ops::_Arg(scope.WithOpName("c_0_arg"), DT_INT32, 2);
auto arg3 = ops::_Arg(scope.WithOpName("d_0_arg"), DT_FLOAT, 3);
auto arg4 = ops::_Arg(scope.WithOpName("u_0_arg"), DT_RESOURCE, 4);
auto arg5 = ops::_Arg(scope.WithOpName("v_0_arg"), DT_RESOURCE, 5);
auto arg6 = ops::_Arg(scope.WithOpName("w_0_arg"), DT_RESOURCE, 6);
auto add_attrs = [](Node* node) {
node->AddAttr(kXlaClusterIdAttr, "launch0");
node->set_requested_device("/gpu:0");
};
auto b_identity = ops::Identity(scope.WithOpName("B_identity"), arg1);
add_attrs(b_identity.node());
auto read_u = ops::ReadVariableOp(scope.WithOpName("ReadU"), arg4, DT_FLOAT);
add_attrs(read_u.node());
auto read_v = ops::ReadVariableOp(scope.WithOpName("ReadV"), arg5, DT_FLOAT);
add_attrs(read_v.node());
auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), arg6, DT_FLOAT);
add_attrs(read_w.node());
auto e = ops::Add(scope.WithOpName("E"), arg0, arg2);
add_attrs(e.node());
auto f = ops::Add(scope.WithOpName("F"), read_v, read_w);
add_attrs(f.node());
auto g = ops::Add(scope.WithOpName("G"), f, arg3);
add_attrs(g.node());
auto out0 = ops::_Retval(scope.WithOpName("b_identity_0_retval_RetVal"),
b_identity, 0);
auto out1 = ops::_Retval(scope.WithOpName("e_0_retval_RetVal"), e, 1);
auto out2 = ops::_Retval(scope.WithOpName("g_0_retval_RetVal"), g, 2);
auto out3 =
ops::_Retval(scope.WithOpName("readu_0_retval_RetVal"), read_u, 3);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_CHECK_OK(scope.ToGraph(graph.get()));
return graph;
}
TEST(EncapsulateXlaComputations, DeterministicEncapsulate) {
auto get_serialized_graph = [](bool control_input_reversed,
bool operand_reversed) -> string {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
std::unique_ptr<Graph> graph(new Graph(&flib_def));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto a0 = ops::Placeholder(scope.WithOpName("A0"), DT_INT32);
auto a1 = ops::Placeholder(scope.WithOpName("A1"), DT_INT32);
ops::Add e = operand_reversed ? ops::Add(scope.WithOpName("E"), a0, a1)
: ops::Add(scope.WithOpName("E"), a1, a0);
auto add_attrs = [](Node* node) {
node->AddAttr(kXlaClusterIdAttr, "launch0");
};
add_attrs(e.node());
TF_CHECK_OK(scope.ToGraph(graph.get()));
auto get_node_in_graph = [&graph](Node* node) {
return graph->FindNodeId(node->id());
};
if (!control_input_reversed) {
graph->AddControlEdge(get_node_in_graph(a0.node()),
get_node_in_graph(e.node()), true);
graph->AddControlEdge(get_node_in_graph(a1.node()),
get_node_in_graph(e.node()), true);
} else {
graph->AddControlEdge(get_node_in_graph(a1.node()),
get_node_in_graph(e.node()), true);
graph->AddControlEdge(get_node_in_graph(a0.node()),
get_node_in_graph(e.node()), true);
}
}
TF_CHECK_OK(EncapsulateXlaComputationsPass::Encapsulate(&graph, &flib_def));
return SerializeGraphDeterministic(*graph).value();
};
EXPECT_EQ(get_serialized_graph(true,
false),
get_serialized_graph(false,
false));
EXPECT_NE(get_serialized_graph(false,
true),
get_serialized_graph(false,
false));
}
TEST(EncapsulateXlaComputations, Encapsulate) {
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
std::unique_ptr<Graph> graph(new Graph(&flib_def));
{
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
auto add_attrs = [](Node* node) {
node->AddAttr(kXlaClusterIdAttr, "launch0");
node->set_requested_device("/gpu:0");
};
auto b_identity = ops::Identity(scope.WithOpName("B_identity"), b);
add_attrs(b_identity.node());
auto read_u = ops::ReadVariableOp(scope.WithOpName("ReadU"), u, DT_FLOAT);
add_attrs(read_u.node());
auto read_v = ops::ReadVariableOp(scope.WithOpName("ReadV"), v, DT_FLOAT);
add_attrs(read_v.node());
auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), w, DT_FLOAT);
add_attrs(read_w.node());
auto e = ops::Add(scope.WithOpName("E"), a, c);
add_attrs(e.node());
auto f = ops::Add(scope.WithOpName("F"), read_v, read_w);
add_attrs(f.node());
auto g = ops::Add(scope.WithOpName("G"), f, d);
add_attrs(g.node());
auto out0 = ops::XlaClusterOutput(scope.WithOpName("Out0"), b_identity);
auto out1 = ops::XlaClusterOutput(scope.WithOpName("Out1"), e);
auto out2 = ops::XlaClusterOutput(scope.WithOpName("Out2"), g);
auto out3 = ops::XlaClusterOutput(scope.WithOpName("Out3"), read_u);
auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), out0);
auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), out0);
auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), out0);
auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), out1);
auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), out2);
auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), out3);
TF_ASSERT_OK(scope.ToGraph(graph.get()));
}
std::unique_ptr<Graph> graph_copy(new Graph(&flib_def));
CopyGraph(*graph, graph_copy.get());
TF_ASSERT_OK(EncapsulateXlaComputationsPass::Encapsulate(&graph, &flib_def));
std::unordered_map<string, Node*> index = graph->BuildNodeNameIndex();
string function = index.at("launch0")->type_string();
{
std::unique_ptr<Graph> outer = MakeOuterGraph(flib_def, function);
GraphDef expected_def;
outer->ToGraphDef(&expected_def);
GraphDef actual_def;
graph->ToGraphDef(&actual_def);
TF_EXPECT_GRAPH_EQ_INTERNAL(expected_def, actual_def);
}
{
std::unique_ptr<Graph> body = MakeBodyGraph();
GraphDef expected_body_def;
body->ToGraphDef(&expected_body_def);
InstantiationResultForTest result;
TF_EXPECT_OK(InstantiateFunctionForTest(function, flib_def, &result));
EXPECT_EQ((DataTypeVector{DT_INT32, DT_FLOAT, DT_INT32, DT_FLOAT,
DT_RESOURCE, DT_RESOURCE, DT_RESOURCE}),
result.arg_types);
EXPECT_EQ((DataTypeVector{DT_FLOAT, DT_INT32, DT_FLOAT, DT_FLOAT}),
result.ret_types);
TF_EXPECT_GRAPH_EQ(expected_body_def, result.gdef);
}
TF_ASSERT_OK(
EncapsulateXlaComputationsPass::Encapsulate(&graph_copy, &flib_def));
std::unordered_map<string, Node*> index_copy =
graph_copy->BuildNodeNameIndex();
string function_copy = index_copy.at("launch0")->type_string();
EXPECT_EQ(function, function_copy);
}
TEST(EncapsulateXlaComputations, BuildXlaLaunchOp) {
std::unique_ptr<Graph> body_graph = MakeBodyGraph();
FunctionDefLibrary flib;
TF_ASSERT_OK(GraphToFunctionDef(*body_graph, "launch0", flib.add_function()));
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
std::unique_ptr<Graph> graph = MakeOuterGraph(flib_def, "launch0");
TF_ASSERT_OK(EncapsulateXlaComputationsPass::BuildXlaLaunchOps(graph.get()));
Scope scope = Scope::DisabledShapeInferenceScope().ExitOnError();
TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib));
auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32);
auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT);
auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32);
auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT);
auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE);
auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE);
auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE);
NameAttrList function;
function.set_name("launch0");
auto launch = ops::XlaLaunch(
scope.WithOpName("launch0").WithDevice("/gpu:0"),
std::initializer_list<Input>{}, std::initializer_list<Input>{a, b, c, d},
std::initializer_list<Input>{u, v, w},
DataTypeVector{DT_FLOAT, DT_INT32, DT_FLOAT, DT_FLOAT}, function);
auto consumer0_a =
ops::Identity(scope.WithOpName("consumer0_a"), launch.results[0]);
auto consumer0_b =
ops::Identity(scope.WithOpName("consumer0_b"), launch.results[0]);
auto consumer0_c =
ops::Identity(scope.WithOpName("consumer0_c"), launch.results[0]);
auto consumer1 =
ops::Identity(scope.WithOpName("consumer1"), launch.results[1]);
auto consumer2 =
ops::Identity(scope.WithOpName("consumer2"), launch.results[2]);
auto consumer3 =
ops::Identity(scope.WithOpName("consumer3"), launch.results[3]);
GraphDef expected_def;
TF_ASSERT_OK(scope.ToGraphDef(&expected_def));
GraphDef actual_def;
graph->ToGraphDef(&actual_def);
TF_EXPECT_GRAPH_EQ(expected_def, actual_def);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
436d8490-15c6-4601-8e7a-e454d6599354 | cpp | tensorflow/tensorflow | xla_compiler_options_util | tensorflow/compiler/jit/xla_compiler_options_util.cc | tensorflow/compiler/jit/xla_compiler_options_util_test.cc | #include "tensorflow/compiler/jit/xla_compiler_options_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/tsl/framework/device_id_utils.h"
#include "tensorflow/core/framework/function.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
inline void LogOptions(const XlaCompiler::Options& options) {
VLOG(2) << "XlaCompiler::Options[device_type=" << options.device_type
<< ",device_ordinal=" << options.device_ordinal
<< ",client=" << options.client << ",flib_def=" << options.flib_def
<< ",graph_def_version=" << options.graph_def_version
<< ",options.shape_determination_fns.layout_preference_fn?="
<< (options.shape_determination_fns.layout_preference_fn != nullptr)
<< ",options.shape_determination_fns.shape_representation_fn?="
<< (options.shape_determination_fns.shape_representation_fn !=
nullptr)
<< ",allow_cpu_custom_calls=" << options.allow_cpu_custom_calls
<< ",populate_resource_manager=" << options.populate_resource_manager
<< ",alias_passthrough_params=" << options.alias_passthrough_params
<< ",detailed_logging=" << options.detailed_logging << "]";
}
}
XlaCompiler::Options GenerateCompilerOptions(
const XlaDeviceCompiler& xla_device_compiler,
const FunctionLibraryRuntime& function_library, DeviceBase* device,
se::Stream* stream, const XlaPlatformInfo& platform_info,
bool has_ref_vars) {
XlaCompiler::Options options;
options.client = static_cast<xla::LocalClient*>(xla_device_compiler.client());
if (stream != nullptr) {
options.device_ordinal = stream->parent()->device_ordinal();
}
options.device_type = xla_device_compiler.device_type();
options.flib_def = function_library.GetFunctionLibraryDefinition();
options.graph_def_version = function_library.graph_def_version();
options.allow_cpu_custom_calls =
(platform_info.platform_id() == se::host::kHostPlatformId);
options.device_allocator = GetAllocator(device, stream, platform_info);
if (platform_info.xla_device_metadata()) {
options.shape_determination_fns =
platform_info.xla_device_metadata()->default_shape_determination_fns();
}
options.alias_passthrough_params =
!has_ref_vars && !platform_info.is_on_xla_device();
LogOptions(options);
return options;
}
XlaCompiler::Options GenerateCompilerOptionsForTfrtTpu(
const XlaDeviceCompiler& xla_device_compiler,
const FunctionLibraryRuntime& function_library) {
XlaCompiler::Options options;
options.device_type = xla_device_compiler.device_type();
options.flib_def = function_library.GetFunctionLibraryDefinition();
options.graph_def_version = function_library.graph_def_version();
options.allow_cpu_custom_calls = false;
options.alias_passthrough_params = false;
return options;
}
XlaCompiler::Options GenerateCompilerOptionsForPjRt(
const FunctionLibraryRuntime& function_library,
const DeviceBase* device_base, const XlaPlatformInfo& platform_info,
const DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>*
pjrt_device_compiler) {
return GenerateCompilerOptionsForPjRt(
function_library.GetFunctionLibraryDefinition(),
function_library.graph_def_version(), device_base, platform_info,
pjrt_device_compiler);
}
XlaCompiler::Options GenerateCompilerOptionsForPjRt(
const FunctionLibraryDefinition* function_library_def,
int graph_def_version, const DeviceBase* device_base,
const XlaPlatformInfo& platform_info,
const PjRtDeviceCompiler* pjrt_device_compiler) {
XlaCompiler::Options options;
absl::StatusOr<int> platform_device_id =
tsl::GetPlatformDeviceIdFromDeviceParsedName(
device_base->parsed_name(),
DeviceType(tensorflow::down_cast<const Device*>(device_base)
->device_type()));
if (platform_device_id.ok()) {
options.device_ordinal = *platform_device_id;
} else {
options.device_ordinal = device_base->parsed_name().id;
}
options.flib_def = function_library_def;
options.graph_def_version = graph_def_version;
if (const auto* metadata = platform_info.xla_device_metadata();
metadata != nullptr) {
options.device_type = metadata->jit_device_type();
options.shape_determination_fns =
metadata->default_shape_determination_fns();
} else if (const auto* metadata = platform_info.pjrt_device_metadata();
metadata != nullptr) {
options.device_type = metadata->jit_device_type();
options.shape_determination_fns =
metadata->default_shape_determination_fns();
} else if (pjrt_device_compiler != nullptr) {
options.device_type = pjrt_device_compiler->device_type();
}
options.allow_cpu_custom_calls = false;
options.alias_passthrough_params = false;
LogOptions(options);
return options;
}
XlaCompiler::CompileOptions GenerateCompileOptions(
bool has_ref_vars, bool may_alias_resource_update) {
XlaCompiler::CompileOptions compile_options;
compile_options.is_entry_computation = true;
compile_options.always_return_tuple = false;
compile_options.alias_resource_update =
!has_ref_vars && may_alias_resource_update;
return compile_options;
}
} | #include "tensorflow/compiler/jit/xla_compiler_options_util.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/compiler/jit/xla_platform_info.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/client/client_library.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
XlaDeviceCompiler* CreateXlaDeviceCompiler(DeviceType device_type,
xla::LocalClient* local_client) {
auto persistor = std::make_unique<XlaDeviceExecutablePersistor>(
XlaDeviceExecutablePersistor::Config(), device_type);
auto compiler_client =
std::make_unique<XlaDeviceCompilerClient>(local_client);
return new XlaDeviceCompiler(std::move(persistor),
std::move(compiler_client));
}
PjRtDeviceCompiler* CreatePjRtDeviceCompiler(DeviceType device_type,
xla::PjRtClient* pjrt_client) {
auto persistor = std::make_unique<PjRtDeviceExecutablePersistor>(
PjRtDeviceExecutablePersistor::Config(), device_type);
auto compiler_client =
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client);
return new PjRtDeviceCompiler(std::move(persistor),
std::move(compiler_client));
}
std::vector<XlaShapeLayoutHelpers::ShapeDeterminationFns>
GetShapeDeterminationFns() {
XlaHelpers::ShapeRepresentationFn shape_representation_fn =
[](const TensorShape&, DataType, bool, XlaLayoutPreference) {
return xla::Shape();
};
XlaShapeLayoutHelpers::LayoutPreferenceFn layout_preference_fn =
[](const TensorShape&, DataType, std::optional<XlaArgument::Kind>) {
return tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout;
};
return {XlaShapeLayoutHelpers::ShapeDeterminationFns{
layout_preference_fn, shape_representation_fn}};
}
std::unique_ptr<XlaDevice::Metadata> CreateXlaDeviceMetadata(
DeviceType compilation_device_type) {
return std::make_unique<XlaDevice::Metadata>(
0, nullptr, compilation_device_type,
GetShapeDeterminationFns(), XlaDevice::PaddedShapeFn(),
false);
}
std::unique_ptr<PjRtBaseDevice::Metadata> CreatePjRtDeviceMetadata(
DeviceType compilation_device_type) {
return std::make_unique<PjRtBaseDevice::Metadata>(compilation_device_type,
GetShapeDeterminationFns());
}
class XlaCompilerOptionsTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
}
DeviceSetup device_setup_;
};
TEST_F(XlaCompilerOptionsTest, PjRtOptionsXlaDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
DeviceType compilation_device_type = DeviceType(DEVICE_GPU_XLA_JIT);
se::Platform::Id platform_id = nullptr;
auto xla_device_metadata = CreateXlaDeviceMetadata(compilation_device_type);
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
XlaPlatformInfo platform_info(
compilation_device_type, platform_id, xla_device_metadata.get(),
nullptr, custom_allocator);
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*device_setup_.flr(), device, platform_info,
nullptr);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_EQ(options.device_ordinal, 0);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout));
EXPECT_EQ(shape, xla::Shape());
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout);
}
TEST_F(XlaCompilerOptionsTest, PjRtOptionsPjRtBaseDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_CPU});
Device* device = device_setup_.GetDevice(DEVICE_CPU);
DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
auto pjrt_device_metadata = CreatePjRtDeviceMetadata(compilation_device_type);
XlaPlatformInfo platform_info(
compilation_device_type, nullptr,
nullptr,
pjrt_device_metadata.get(),
nullptr);
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*device_setup_.flr(), device, platform_info,
nullptr);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_EQ(options.device_ordinal, 0);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout));
EXPECT_EQ(shape, xla::Shape());
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout);
}
TEST_F(XlaCompilerOptionsTest, PjRtOptionsNonXlaDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_CPU});
Device* device = device_setup_.GetDevice(DEVICE_CPU);
DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
XlaPlatformInfo platform_info(compilation_device_type,
nullptr,
nullptr,
nullptr,
nullptr);
auto pjrt_device_compiler =
CreatePjRtDeviceCompiler(compilation_device_type, nullptr);
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
XlaCompiler::Options options = GenerateCompilerOptionsForPjRt(
*device_setup_.flr(), device, platform_info, pjrt_device_compiler);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_EQ(options.device_ordinal, 0);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kNoPreference));
xla::ShapeProto shape_proto;
shape_proto.set_element_type(xla::PrimitiveType::F32);
shape_proto.mutable_layout();
EXPECT_EQ(shape, xla::Shape(shape_proto));
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kNoPreference);
}
TEST_F(XlaCompilerOptionsTest, XlaOptions) {
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
DeviceType device_type = DeviceType(DEVICE_XLA_GPU);
DeviceType compilation_device_type = DeviceType(DEVICE_GPU_XLA_JIT);
auto xla_device_compiler =
CreateXlaDeviceCompiler(compilation_device_type, client);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
se::Platform::Id platform_id = se::host::kHostPlatformId;
auto xla_device_metadata = CreateXlaDeviceMetadata(compilation_device_type);
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
XlaPlatformInfo platform_info(
device_type, platform_id, xla_device_metadata.get(),
nullptr, custom_allocator);
XlaCompiler::Options options =
GenerateCompilerOptions(*xla_device_compiler, *device_setup_.flr(),
device, nullptr, platform_info, false);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_TRUE(options.allow_cpu_custom_calls);
EXPECT_NE(options.device_allocator, nullptr);
EXPECT_FALSE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout));
EXPECT_EQ(shape, xla::Shape());
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout);
}
TEST_F(XlaCompilerOptionsTest, XlaOptionsHasRefVarsNoXlaDeviceMetadata) {
device_setup_.AddDevicesAndSetUp({DEVICE_CPU});
Device* device = device_setup_.GetDevice(DEVICE_CPU);
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
DeviceType device_type = DeviceType(DEVICE_CPU);
DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT);
auto xla_device_compiler =
CreateXlaDeviceCompiler(compilation_device_type, client);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
se::Platform::Id platform_id = se::host::kHostPlatformId;
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
XlaPlatformInfo platform_info(
device_type, platform_id, nullptr,
nullptr, custom_allocator);
XlaCompiler::Options options =
GenerateCompilerOptions(*xla_device_compiler, *device_setup_.flr(),
device, nullptr, platform_info, false);
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_TRUE(options.allow_cpu_custom_calls);
EXPECT_NE(options.device_allocator, nullptr);
EXPECT_TRUE(options.alias_passthrough_params);
TF_ASSERT_OK_AND_ASSIGN(
auto shape, options.shape_determination_fns.shape_representation_fn(
TensorShape(), DT_FLOAT, false,
tensorflow::XlaLayoutPreference::kNoPreference));
xla::ShapeProto shape_proto;
shape_proto.set_element_type(xla::PrimitiveType::F32);
shape_proto.mutable_layout();
EXPECT_EQ(shape, xla::Shape(shape_proto));
EXPECT_EQ(options.shape_determination_fns.layout_preference_fn(
TensorShape(), DT_FLOAT, std::nullopt),
tensorflow::XlaLayoutPreference::kNoPreference);
}
TEST_F(XlaCompilerOptionsTest, TfRtTpuOptions) {
device_setup_.AddDevicesAndSetUp({DEVICE_TPU_NODE});
xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie();
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
auto xla_device_compiler =
CreateXlaDeviceCompiler(compilation_device_type, client);
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
XlaCompiler::Options options = GenerateCompilerOptionsForTfrtTpu(
*xla_device_compiler, *device_setup_.flr());
EXPECT_EQ(options.device_type, compilation_device_type);
EXPECT_NE(options.flib_def, nullptr);
EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION);
EXPECT_FALSE(options.allow_cpu_custom_calls);
EXPECT_FALSE(options.alias_passthrough_params);
}
TEST_F(XlaCompilerOptionsTest, GenerateCompileOptions) {
XlaCompiler::CompileOptions option1 = GenerateCompileOptions(
false, false);
EXPECT_TRUE(option1.is_entry_computation);
EXPECT_FALSE(option1.always_return_tuple);
EXPECT_FALSE(option1.alias_resource_update);
XlaCompiler::CompileOptions option2 = GenerateCompileOptions(
false, true);
EXPECT_TRUE(option2.alias_resource_update);
XlaCompiler::CompileOptions option3 = GenerateCompileOptions(
true, false);
EXPECT_FALSE(option3.alias_resource_update);
XlaCompiler::CompileOptions option4 = GenerateCompileOptions(
true, true);
EXPECT_FALSE(option4.alias_resource_update);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compiler_options_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_compiler_options_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
642de5a1-8571-42ec-86b3-0c039e9b3c62 | cpp | tensorflow/tensorflow | device_compilation_profiler | tensorflow/compiler/jit/device_compilation_profiler.cc | tensorflow/compiler/jit/device_compilation_profiler_test.cc | #include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/compiler/jit/xla_activity_listener.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tsl/platform/mutex.h"
namespace tensorflow {
namespace {
bool ShouldBeMegamorphic(int64_t compile_count, int64_t execution_count) {
const int64_t kCompileThreshold = 10;
const int64_t kMinExecutionsPerCompile = 50;
return compile_count > kCompileThreshold &&
execution_count < kMinExecutionsPerCompile * compile_count;
}
void RegisterExecutionForCluster(
const NameAttrList& function,
DeviceCompilationProfiler::ClusterCompileStats* stats) {
++stats->execution_count;
if (!stats->is_megamorphic &&
ShouldBeMegamorphic(stats->compile_count, stats->execution_count)) {
VLOG(1) << "Marking " << function.name()
<< " as megamorphic, compile_count=" << stats->compile_count
<< " execution_count=" << stats->execution_count;
stats->is_megamorphic = true;
}
}
constexpr int64_t kDefaultCompilationThreshold = 2;
constexpr int64_t kMaxNumOngoingCompilations = kNumAsyncDeviceCompilerThreads;
}
DeviceCompilationProfiler::~DeviceCompilationProfiler() {
mutex_lock lock(mu_);
cluster_compile_stats_.clear();
}
absl::StatusOr<DeviceCompilationProfiler::ClusterCompileStats>
DeviceCompilationProfiler::GetCompileStats(const NameAttrList& function) const {
mutex_lock lock(mu_);
if (auto it = cluster_compile_stats_.find(function.name());
it != cluster_compile_stats_.end()) {
return it->second;
}
return errors::NotFound("Couldn't find compilation stats for cluster: ",
function.name());
}
void DeviceCompilationProfiler::RegisterExecution(
const NameAttrList& function) {
mutex_lock lock(mu_);
auto it =
cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{})
.first;
RegisterExecutionForCluster(function, &it->second);
}
Status DeviceCompilationProfiler::RegisterCompilation(
const NameAttrList& function, int64_t compile_time_us,
bool used_persistent_cache) {
metrics::UpdateXlaCompilationTime(compile_time_us);
const std::string& function_name = function.name();
mutex_lock lock(mu_);
auto it =
cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{})
.first;
const uint64 compile_time_s = compile_time_us / 1.0e6;
it->second.compile_count++;
it->second.cumulative_compile_time_us += compile_time_us;
VLOG(1) << "Compiled " << function_name << " " << it->second.compile_count
<< " times, compile time: " << compile_time_us
<< " us, cumulative: " << it->second.cumulative_compile_time_us
<< " us ("
<< tensorflow::strings::HumanReadableElapsedTime(compile_time_s)
<< " / "
<< tensorflow::strings::HumanReadableElapsedTime(
it->second.cumulative_compile_time_us / 1.0e6)
<< ")";
XlaJitCompilationActivity jit_compilation_activity;
jit_compilation_activity.set_cluster_name(function_name);
jit_compilation_activity.set_compile_count(it->second.compile_count);
jit_compilation_activity.set_compile_time_us(compile_time_us);
jit_compilation_activity.set_cumulative_compile_time_us(
it->second.cumulative_compile_time_us);
jit_compilation_activity.set_used_persistent_cache(used_persistent_cache);
return BroadcastXlaActivity(std::move(jit_compilation_activity));
}
bool DeviceCompilationProfiler::ShouldCompileCluster(
const NameAttrList& function, DeviceCompileMode compile_mode,
int64_t current_request_count) {
std::optional<int64_t> compile_threshold;
if (compile_mode == DeviceCompileMode::kLazy) {
compile_threshold = kDefaultCompilationThreshold;
} else if (compile_mode == DeviceCompileMode::kAsync) {
compile_threshold = 0;
}
if (compile_mode == DeviceCompileMode::kStrict) {
return true;
}
mutex_lock lock(mu_);
auto [it, cluster_not_found] =
cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{});
if (cluster_not_found) {
RegisterExecutionForCluster(function, &it->second);
}
if (it->second.is_megamorphic) {
BroadcastOptimizationRemark(XlaOptimizationRemark::MEGAMORPHIC_FUNCTION,
function.name())
.IgnoreError();
VLOG(2) << "Not compiling cluster " << function.name()
<< " because it is megamorphic.";
return false;
}
if (it->second.execution_count == 1) {
return true;
}
if (compile_mode == DeviceCompileMode::kAsync) {
if (num_ongoing_compilations_ >= kMaxNumOngoingCompilations) {
VLOG(2) << "Not asynchronously compiling cluster " << function.name()
<< " because of too many ongoing compilations.";
return false;
}
}
bool reached_compile_threshold = current_request_count >= *compile_threshold;
if (!reached_compile_threshold) {
VLOG(2) << "Not compiling cluster " << function.name()
<< " because it has not reached compile threshold; threshold is "
<< *compile_threshold << " execution count "
<< current_request_count << ".";
}
return reached_compile_threshold;
}
void DeviceCompilationProfiler::IncrementOngoingAsyncCompilations() {
mutex_lock lock(mu_);
num_ongoing_compilations_++;
}
void DeviceCompilationProfiler::DecrementOngoingAsyncCompilations() {
mutex_lock lock(mu_);
num_ongoing_compilations_--;
}
int64_t DeviceCompilationProfiler::GetNumOngoingAsyncCompilations() const {
mutex_lock lock(mu_);
return num_ongoing_compilations_;
}
std::string DeviceCompilationProfiler::DebugString() const {
std::string debug_string =
"DeviceCompilationProfiler {\ncluster_compile_stats: {\n";
{
mutex_lock lock(mu_);
for (const auto& [key, stats] : cluster_compile_stats_) {
absl::StrAppend(&debug_string, key, ": ", stats.DebugString(), "\n");
}
}
absl::StrAppend(&debug_string, "}\nnum_ongoing_compilations=",
GetNumOngoingAsyncCompilations(), "\n}\n");
return debug_string;
}
} | #include "tensorflow/compiler/jit/device_compilation_profiler.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/tests/device_compiler_test_helper.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/core/framework/attr_value.pb.h"
namespace tensorflow {
namespace {
TEST(DeviceCompilationProfilerTest, RegisterExecution) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
for (int i = 0; i < 5; ++i) {
profiler->RegisterExecution(function);
}
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
EXPECT_EQ(stats.execution_count, 5);
}
TEST(DeviceCompilationProfilerTest, RegisterCompilation) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
auto listener = std::make_unique<JitCompilationListener>();
auto listener_ptr = listener.get();
RegisterXlaActivityListener(std::move(listener));
NameAttrList function;
function.set_name("TestFunc");
std::vector<XlaJitCompilationActivity> expected_activities;
for (int i = 0; i < 5; ++i) {
EXPECT_TRUE(profiler->RegisterCompilation(function, 4, false).ok());
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
XlaJitCompilationActivity expected_activity;
expected_activity.set_cluster_name(function.name());
expected_activity.set_compile_count(stats.compile_count);
expected_activity.set_compile_time_us(4);
expected_activity.set_cumulative_compile_time_us(
stats.cumulative_compile_time_us);
expected_activity.set_used_persistent_cache(false);
expected_activities.push_back(expected_activity);
}
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
EXPECT_EQ(stats.compile_count, 5);
EXPECT_EQ(stats.cumulative_compile_time_us, 5 * 4);
const auto& actual_activities = listener_ptr->GetListenerHistory();
EXPECT_EQ(actual_activities.size(), expected_activities.size());
for (size_t i = 0; i < actual_activities.size(); ++i) {
EXPECT_EQ(actual_activities[i].SerializeAsString(),
expected_activities[i].SerializeAsString());
}
}
TEST(DeviceCompilationProfilerTest, OngoingAsyncCompilations) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
for (int i = 0; i < 5; ++i) {
profiler->IncrementOngoingAsyncCompilations();
}
EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 5);
for (int i = 0; i < 5; ++i) {
profiler->DecrementOngoingAsyncCompilations();
}
EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0);
for (int i = 0; i < 5; ++i) {
profiler->IncrementOngoingAsyncCompilations();
profiler->DecrementOngoingAsyncCompilations();
}
EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0);
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterNotFound) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterFirstExecution) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
profiler->RegisterExecution(function);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterMegamorphic) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
const int64_t kCompileThreshold = 10;
const int64_t kMinExecutionsPerCompile = 50;
for (int i = 0; i < kCompileThreshold + 1; ++i) {
EXPECT_TRUE(profiler->RegisterCompilation(function, 1, false).ok());
}
profiler->RegisterExecution(function);
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function));
EXPECT_TRUE(stats.is_megamorphic);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));
for (int i = 0; i < kCompileThreshold * kMinExecutionsPerCompile + 1; ++i) {
profiler->RegisterExecution(function);
}
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
TF_ASSERT_OK_AND_ASSIGN(stats, profiler->GetCompileStats(function));
EXPECT_TRUE(stats.is_megamorphic);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterAsync) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
const int64_t kMaxNumOngoingCompilations = 10;
for (int i = 0; i < kMaxNumOngoingCompilations; ++i) {
profiler->IncrementOngoingAsyncCompilations();
}
profiler->RegisterExecution(function);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
profiler->RegisterExecution(function);
EXPECT_FALSE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
profiler->DecrementOngoingAsyncCompilations();
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0));
}
TEST(DeviceCompilationProfilerTest, ShouldCompileClusterLazy) {
DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler();
core::ScopedUnref profiler_ref(profiler);
NameAttrList function;
function.set_name("TestFunc");
constexpr int64_t kDefaultCompilationThreshold = 2;
profiler->RegisterExecution(function);
EXPECT_TRUE(
profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0));
profiler->RegisterExecution(function);
for (int current_request_count = 0;
current_request_count < kDefaultCompilationThreshold;
++current_request_count) {
EXPECT_FALSE(profiler->ShouldCompileCluster(
function, DeviceCompileMode::kLazy, current_request_count));
}
EXPECT_TRUE(profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy,
kDefaultCompilationThreshold));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_profiler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0f85722-0479-4365-86f1-257131d04adf | cpp | tensorflow/tensorflow | deadness_analysis | tensorflow/compiler/jit/deadness_analysis.cc | tensorflow/compiler/jit/deadness_analysis_test.cc | #include "tensorflow/compiler/jit/deadness_analysis.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/deadness_analysis_internal.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
namespace {
using tsl::StatusOr;
class Predicate {
public:
enum class Kind { kAnd, kOr, kNot, kAndRecurrence, kSymbol, kIntSymbol };
virtual string ToString() const = 0;
int64_t id() const { return id_; }
virtual absl::Span<Predicate* const> GetOperands() const = 0;
virtual Kind kind() const = 0;
virtual ~Predicate() {}
template <typename FunctionTy>
static void Visit(Predicate* p, const FunctionTy& func);
protected:
explicit Predicate(int64_t id) : id_(id) {}
private:
const int64_t id_;
Predicate(const Predicate&) = delete;
void operator=(const Predicate&) = delete;
};
class AndPredicate : public Predicate {
public:
explicit AndPredicate(int64_t id, std::vector<Predicate*> operands)
: Predicate(id), operands_(std::move(operands)) {}
string ToString() const override {
if (operands().empty()) {
return "#true";
}
std::vector<string> operands_str;
std::transform(operands().begin(), operands().end(),
std::back_inserter(operands_str),
[](Predicate* pred) { return pred->ToString(); });
return absl::StrCat("(", absl::StrJoin(operands_str, " & "), ")");
}
Kind kind() const override { return Kind::kAnd; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
absl::Span<Predicate* const> operands() const { return operands_; }
private:
std::vector<Predicate*> operands_;
};
class OrPredicate : public Predicate {
public:
explicit OrPredicate(int64_t id, std::vector<Predicate*> operands)
: Predicate(id), operands_(std::move(operands)) {}
string ToString() const override {
if (operands().empty()) {
return "#false";
}
std::vector<string> operands_str;
std::transform(operands().begin(), operands().end(),
std::back_inserter(operands_str),
[](Predicate* pred) { return pred->ToString(); });
return absl::StrCat("(", absl::StrJoin(operands_str, " | "), ")");
}
Kind kind() const override { return Kind::kOr; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
absl::Span<Predicate* const> operands() const { return operands_; }
private:
std::vector<Predicate*> operands_;
};
class NotPredicate : public Predicate {
public:
explicit NotPredicate(int64_t id, Predicate* operand)
: Predicate(id), operands_({operand}) {}
string ToString() const override {
return absl::StrCat("~", operand()->ToString());
}
Kind kind() const override { return Kind::kNot; }
Predicate* operand() const { return operands_[0]; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
private:
std::array<Predicate*, 1> operands_;
};
class AndRecurrencePredicate : public Predicate {
public:
explicit AndRecurrencePredicate(int64_t id, Predicate* start, Predicate* step,
std::vector<string> frame)
: Predicate(id), operands_({start, step}), frame_(std::move(frame)) {}
Predicate* start() const { return operands_[0]; }
Predicate* step() const { return operands_[1]; }
absl::Span<const string> frame() const { return frame_; }
string ToString() const override {
return absl::StrCat("{", start()->ToString(), ",&,", step()->ToString(),
"}<", absl::StrJoin(frame(), ";"), ">");
}
Kind kind() const override { return Kind::kAndRecurrence; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
private:
std::array<Predicate*, 2> operands_;
std::vector<string> frame_;
};
class SymbolPredicate : public Predicate {
public:
explicit SymbolPredicate(int64_t id, TensorId tensor_id, bool must_be_true)
: Predicate(id),
tensor_id_(std::move(tensor_id)),
must_be_true_(must_be_true) {}
string ToString() const override {
return must_be_true() ? absl::StrCat("*", tensor_id_.ToString())
: tensor_id_.ToString();
}
Kind kind() const override { return Kind::kSymbol; }
absl::Span<Predicate* const> GetOperands() const override { return {}; }
TensorId tensor_id() const { return tensor_id_; }
bool must_be_true() const { return must_be_true_; }
private:
TensorId tensor_id_;
bool must_be_true_;
};
class IntSymbolPredicate : public Predicate {
public:
explicit IntSymbolPredicate(int64_t id, TensorId tensor_id,
std::optional<int> must_have_value)
: Predicate(id),
tensor_id_(std::move(tensor_id)),
must_have_value_(must_have_value) {}
string ToString() const override {
return must_have_value().has_value()
? absl::StrCat(tensor_id_.ToString(), "=", *must_have_value_)
: tensor_id_.ToString();
}
Kind kind() const override { return Kind::kIntSymbol; }
absl::Span<Predicate* const> GetOperands() const override { return {}; }
TensorId tensor_id() const { return tensor_id_; }
const std::optional<int>& must_have_value() const { return must_have_value_; }
private:
TensorId tensor_id_;
std::optional<int> must_have_value_;
};
template <typename FunctionTy>
void Predicate::Visit(Predicate* p, const FunctionTy& func) {
absl::flat_hash_set<Predicate*> visited;
std::vector<Predicate*> stack;
stack.push_back(p);
visited.insert(p);
while (!stack.empty()) {
Predicate* current = stack.back();
stack.pop_back();
bool done = func(current);
if (done) {
return;
}
for (Predicate* op : current->GetOperands()) {
if (visited.insert(op).second) {
stack.push_back(op);
}
}
}
}
class PredicateFactory {
public:
Predicate* MakeAndPredicate(absl::Span<Predicate* const> operands) {
return MakeAndOrImpl(operands, true);
}
Predicate* MakeOrPredicate(absl::Span<Predicate* const> operands) {
return MakeAndOrImpl(operands, false);
}
Predicate* MakeNotPredicate(Predicate* pred) {
auto it = make_not_predicate_cache_.find(pred);
if (it != make_not_predicate_cache_.end()) {
return it->second;
}
Predicate* result = MakeNotPredicateImpl(pred);
bool insert_successful =
make_not_predicate_cache_.insert({pred, result}).second;
(void)insert_successful;
DCHECK(insert_successful);
return result;
}
Predicate* MakeAndRecurrencePredicate(Predicate* start, Predicate* step,
std::vector<string> frame) {
SignatureForAndRec signature(start, step, std::move(frame));
auto it = interned_and_rec_instances_.find(signature);
if (it != interned_and_rec_instances_.end()) {
return it->second.get();
}
std::unique_ptr<Predicate> new_pred = Make<AndRecurrencePredicate>(
std::get<0>(signature), std::get<1>(signature), std::get<2>(signature));
Predicate* new_pred_ptr = new_pred.get();
bool inserted =
interned_and_rec_instances_.emplace(signature, std::move(new_pred))
.second;
(void)inserted;
DCHECK(inserted);
return new_pred_ptr;
}
Status MakeSymbolPredicate(Node* node, int output_idx, bool must_be_true,
Predicate** predicate) {
TensorId tensor_id(node->name(), output_idx);
bool is_boolean_tensor =
BaseType(node->output_type(tensor_id.index())) == DT_BOOL;
TF_RET_CHECK(!must_be_true || is_boolean_tensor);
if (node->type_string() == "Const" && must_be_true) {
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "value", &proto));
Tensor tensor(proto->dtype());
TF_RET_CHECK(tensor.FromProto(*proto));
*predicate = tensor.scalar<bool>()() ? MakeTrue() : MakeFalse();
return absl::OkStatus();
}
SignatureForSymbol signature = {tensor_id, must_be_true};
auto it = interned_symbol_instances_.find(signature);
if (it == interned_symbol_instances_.end()) {
std::unique_ptr<Predicate> new_pred =
Make<SymbolPredicate>(tensor_id, must_be_true);
Predicate* new_pred_ptr = new_pred.get();
interned_symbol_instances_.emplace(std::move(signature),
std::move(new_pred));
*predicate = new_pred_ptr;
} else {
*predicate = it->second.get();
}
return absl::OkStatus();
}
Status MakeSymbolPredicate(Node* node, int output_idx,
std::optional<int> must_have_value,
Predicate** predicate) {
TensorId tensor_id(node->name(), output_idx);
TF_RET_CHECK(BaseType(node->output_type(tensor_id.index())) == DT_INT32);
if (must_have_value.has_value() && node->type_string() == "Const") {
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "value", &proto));
Tensor tensor(proto->dtype());
TF_RET_CHECK(tensor.FromProto(*proto));
*predicate = tensor.scalar<int32>()() == *must_have_value ? MakeTrue()
: MakeFalse();
return absl::OkStatus();
}
SignatureForIntSymbol signature = {tensor_id, must_have_value};
auto it = interned_int_symbol_instances_.find(signature);
if (it == interned_int_symbol_instances_.end()) {
std::unique_ptr<Predicate> new_pred =
Make<IntSymbolPredicate>(tensor_id, must_have_value);
Predicate* new_pred_ptr = new_pred.get();
interned_int_symbol_instances_.emplace(std::move(signature),
std::move(new_pred));
*predicate = new_pred_ptr;
} else {
*predicate = it->second.get();
}
return absl::OkStatus();
}
Predicate* MakeTrue() { return MakeAndPredicate({}); }
Predicate* MakeFalse() { return MakeOrPredicate({}); }
~PredicateFactory() {
DCHECK_EQ(stack_depth_, 0) << "Unnested IncrementStackDepth?";
}
private:
Predicate* MakeNotPredicateImpl(Predicate* pred) {
IncrementStackDepth stack_frame(this);
if (!stack_frame.HasOverflowed()) {
if (Predicate* simplified = SimplifyUsingDeMorgan(pred)) {
return simplified;
}
if (auto* not_pred = dynamic_cast<NotPredicate*>(pred)) {
return not_pred->operand();
}
}
SignatureForNot signature = pred;
auto it = interned_not_instances_.find(signature);
if (it == interned_not_instances_.end()) {
std::unique_ptr<Predicate> new_pred = Make<NotPredicate>(pred);
Predicate* new_pred_ptr = new_pred.get();
interned_not_instances_.emplace(signature, std::move(new_pred));
return new_pred_ptr;
} else {
return it->second.get();
}
}
Predicate* SimplifyUsingDeMorgan(Predicate* pred) {
Predicate::Kind kind = pred->kind();
if (kind == Predicate::Kind::kAnd || kind == Predicate::Kind::kOr) {
std::vector<Predicate*> new_operands;
absl::c_transform(pred->GetOperands(), std::back_inserter(new_operands),
[&](Predicate* p) { return MakeNotPredicate(p); });
return kind == Predicate::Kind::kOr ? MakeAndPredicate(new_operands)
: MakeOrPredicate(new_operands);
}
return nullptr;
}
template <typename PredicateT, typename... Args>
std::unique_ptr<Predicate> Make(Args&&... args) {
return std::unique_ptr<PredicateT>(
new PredicateT(id_counter_++, std::forward<Args>(args)...));
}
Predicate* MakeAndOrImpl(absl::Span<Predicate* const> operands, bool is_and);
Predicate* MakeInternedAndOr(std::vector<Predicate*> simplified_ops,
Predicate::Kind pred_kind);
using SignatureForAndOr =
std::pair<Predicate::Kind, absl::Span<Predicate* const>>;
using SignatureForNot = Predicate*;
using SignatureForAndRec =
std::tuple<Predicate*, Predicate*, std::vector<string>>;
using SignatureForSymbol = std::pair<SafeTensorId, bool>;
using SignatureForIntSymbol = std::pair<SafeTensorId, std::optional<int32>>;
struct HashSignatureForAndOr {
size_t operator()(const SignatureForAndOr& signature) const {
size_t hash = ::tensorflow::hash<Predicate::Kind>()(signature.first);
for (Predicate* p : signature.second) {
hash = Hash64Combine(hash, ::tensorflow::hash<Predicate*>()(p));
}
return hash;
}
};
struct HashSignatureForSymbol {
size_t operator()(const SignatureForSymbol& signature) const {
return Hash64Combine(SafeTensorId::Hasher()(signature.first),
::tensorflow::hash<bool>()(signature.second));
}
};
struct HashSignatureForIntSymbol {
size_t operator()(const SignatureForIntSymbol& signature) const {
return Hash64Combine(
SafeTensorId::Hasher()(signature.first),
Hash64Combine(
::tensorflow::hash<bool>()(signature.second.has_value()),
::tensorflow::hash<int32>()(
signature.second.has_value() ? *signature.second : 0)));
}
};
class IncrementStackDepth {
public:
explicit IncrementStackDepth(PredicateFactory* parent) : parent_(parent) {
parent_->stack_depth_++;
}
bool HasOverflowed() const {
const int kMaxStackDepth = 8;
return parent_->stack_depth_ >= kMaxStackDepth;
}
~IncrementStackDepth() { parent_->stack_depth_--; }
private:
PredicateFactory* parent_;
};
absl::flat_hash_map<Predicate*, Predicate*> make_not_predicate_cache_;
absl::flat_hash_map<SignatureForAndOr, std::unique_ptr<Predicate>,
HashSignatureForAndOr>
interned_and_or_instances_;
absl::flat_hash_map<SignatureForNot, std::unique_ptr<Predicate>>
interned_not_instances_;
absl::flat_hash_map<SignatureForAndRec, std::unique_ptr<Predicate>>
interned_and_rec_instances_;
absl::flat_hash_map<SignatureForSymbol, std::unique_ptr<Predicate>,
HashSignatureForSymbol>
interned_symbol_instances_;
absl::flat_hash_map<SignatureForIntSymbol, std::unique_ptr<Predicate>,
HashSignatureForIntSymbol>
interned_int_symbol_instances_;
int64_t id_counter_ = 0;
int stack_depth_ = 0;
};
Predicate* PredicateFactory::MakeInternedAndOr(
std::vector<Predicate*> simplified_ops, Predicate::Kind pred_kind) {
std::stable_sort(
simplified_ops.begin(), simplified_ops.end(),
[](Predicate* a, Predicate* b) { return a->id() < b->id(); });
auto it = interned_and_or_instances_.find({pred_kind, simplified_ops});
if (it != interned_and_or_instances_.end()) {
return it->second.get();
}
simplified_ops.shrink_to_fit();
absl::Span<Predicate* const> operands_slice = simplified_ops;
std::unique_ptr<Predicate> new_pred =
pred_kind == Predicate::Kind::kAnd
? Make<AndPredicate>(std::move(simplified_ops))
: Make<OrPredicate>(std::move(simplified_ops));
Predicate* new_pred_ptr = new_pred.get();
interned_and_or_instances_.emplace(
SignatureForAndOr(pred_kind, operands_slice), std::move(new_pred));
return new_pred_ptr;
}
Predicate* PredicateFactory::MakeAndOrImpl(
absl::Span<Predicate* const> operands, bool is_and) {
Predicate::Kind pred_kind =
is_and ? Predicate::Kind::kAnd : Predicate::Kind::kOr;
IncrementStackDepth stack_frame(this);
if (stack_frame.HasOverflowed()) {
return MakeInternedAndOr(
std::vector<Predicate*>(operands.begin(), operands.end()), pred_kind);
}
Predicate::Kind other_pred_kind =
is_and ? Predicate::Kind::kOr : Predicate::Kind::kAnd;
absl::flat_hash_set<Predicate*> simplified_ops_set;
std::vector<Predicate*> simplified_ops;
for (Predicate* op : operands) {
if (!simplified_ops_set.insert(op).second) {
continue;
}
if (op->kind() == pred_kind) {
for (Predicate* subop : op->GetOperands()) {
if (simplified_ops_set.insert(subop).second) {
simplified_ops.push_back(subop);
}
}
} else {
simplified_ops.push_back(op);
}
}
if (simplified_ops.size() == 1) {
return simplified_ops[0];
}
absl::flat_hash_set<Predicate*> negated_ops;
for (Predicate* op : simplified_ops) {
if (negated_ops.count(op)) {
return is_and ? MakeFalse() : MakeTrue();
}
Predicate* negated_op = MakeNotPredicate(op);
if (negated_op->kind() == pred_kind) {
if (absl::c_all_of(negated_op->GetOperands(), [&](Predicate* p) {
return simplified_ops_set.contains(p);
})) {
return is_and ? MakeFalse() : MakeTrue();
}
}
negated_ops.insert(negated_op);
}
if (is_and) {
absl::flat_hash_set<Predicate*> to_remove;
std::vector<Predicate*> to_add;
for (Predicate* op : simplified_ops) {
if (op->kind() == Predicate::Kind::kAndRecurrence) {
auto* and_rec = static_cast<AndRecurrencePredicate*>(op);
if (negated_ops.contains(and_rec->step())) {
to_remove.insert(and_rec);
to_remove.insert(MakeNotPredicate(and_rec->step()));
to_add.push_back(and_rec->start());
}
}
}
auto it = simplified_ops.begin();
while (it != simplified_ops.end()) {
if (to_remove.contains(*it)) {
it = simplified_ops.erase(it);
} else {
++it;
}
}
simplified_ops.insert(simplified_ops.end(), to_add.begin(), to_add.end());
}
std::vector<Predicate*> common_inner_operands;
absl::flat_hash_set<Predicate*> common_inner_operands_set;
for (Predicate* op : simplified_ops) {
if (op->kind() != other_pred_kind) {
common_inner_operands.clear();
break;
}
if (common_inner_operands.empty()) {
common_inner_operands.insert(common_inner_operands.end(),
op->GetOperands().begin(),
op->GetOperands().end());
} else {
common_inner_operands.clear();
absl::c_copy_if(op->GetOperands(),
std::back_inserter(common_inner_operands),
[&](Predicate* sub_op) {
return common_inner_operands_set.count(sub_op) == 1;
});
}
if (common_inner_operands.empty()) break;
common_inner_operands_set.clear();
common_inner_operands_set.insert(common_inner_operands.begin(),
common_inner_operands.end());
}
if (common_inner_operands.empty()) {
return MakeInternedAndOr(std::move(simplified_ops), pred_kind);
}
std::vector<Predicate*> factored_ops;
for (Predicate* op : simplified_ops) {
std::vector<Predicate*> new_sub_op_ops;
absl::c_copy_if(op->GetOperands(), std::back_inserter(new_sub_op_ops),
[&](Predicate* sub_op) {
return std::find(common_inner_operands.begin(),
common_inner_operands.end(),
sub_op) == common_inner_operands.end();
});
factored_ops.push_back(MakeAndOrImpl(new_sub_op_ops, !is_and));
}
Predicate* new_inner_op = MakeAndOrImpl(factored_ops, is_and);
std::vector<Predicate*> outer_ops;
outer_ops.push_back(new_inner_op);
outer_ops.insert(outer_ops.end(), common_inner_operands.begin(),
common_inner_operands.end());
return MakeAndOrImpl(outer_ops, !is_and);
}
class DeadnessAnalysisImpl : public DeadnessAnalysis {
public:
explicit DeadnessAnalysisImpl(const Graph* graph)
: graph_(*graph), vlog_(VLOG_IS_ON(2)) {}
Status Populate(bool enable_optimistic);
Status PopulateFrame(absl::Span<Node* const> topo, bool use_optimistic_mode,
bool* success);
absl::StatusOr<DeadnessAnalysis::DeadnessPredicate> GetPredicateFor(
Node* n, int oidx) const override;
void Print() const override;
absl::flat_hash_map<TensorId, string, TensorId::Hasher> PredicateMapAsString()
const;
private:
enum class EdgeKind { kDataAndControl, kDataOnly, kControlOnly };
Status GetInputPreds(Node* n, EdgeKind edge_kind,
std::vector<Predicate*>* result);
void SetPredicate(Node* n, int output_idx, Predicate* pred,
std::vector<bool>* should_revisit) {
auto insert_result =
predicate_map_.insert({TensorId(n->name(), output_idx), pred});
if (!insert_result.second && insert_result.first->second != pred) {
VLOG(4) << "For " << n->name() << ":" << output_idx << " from "
<< insert_result.first->second->ToString() << " "
<< insert_result.first->second << " to " << pred->ToString()
<< " " << pred;
insert_result.first->second = pred;
if (should_revisit != nullptr) {
for (const Edge* e : n->out_edges()) {
(*should_revisit)[e->dst()->id()] = true;
}
}
}
}
void SetPredicate(Node* n, absl::Span<const int> output_idxs, Predicate* pred,
std::vector<bool>* should_revisit) {
for (int output_idx : output_idxs) {
SetPredicate(n, output_idx, pred, should_revisit);
}
}
Status HandleSwitch(Node* n, std::vector<bool>* should_revisit);
Status HandleMerge(Node* n, std::vector<bool>* should_revisit,
bool use_optimistic_mode);
Status HandleRecv(Node* n, std::vector<bool>* should_revisit);
Status HandleGeneric(Node* n, std::vector<bool>* should_revisit);
Status HandleNode(Node* n, std::vector<bool>* should_revisit,
bool use_optimistic_mode = false);
Status GetFrameBasedTopologicalOrder(std::vector<Node*>* order);
bool IsRootEnter(const Node* n) const {
return IsEnter(n) && control_flow_info_[n->id()].parent_frame->IsSource();
}
bool IsRootExit(const Node* n) const {
return IsExit(n) && control_flow_info_[n->id()].parent_frame->IsSource();
}
const Graph& graph_;
absl::flat_hash_map<TensorId, Predicate*, TensorId::Hasher> predicate_map_;
PredicateFactory predicate_factory_;
std::vector<ControlFlowInfo> control_flow_info_;
bool vlog_;
absl::flat_hash_map<absl::string_view, Node*> frame_to_merge_node_;
};
TensorId InputEdgeToTensorId(const Edge* e) {
return TensorId(e->src()->name(), e->src_output());
}
Status DeadnessAnalysisImpl::GetInputPreds(
Node* n, DeadnessAnalysisImpl::EdgeKind edge_kind,
std::vector<Predicate*>* result) {
result->clear();
for (const Edge* in_edge : n->in_edges()) {
bool should_process =
edge_kind == EdgeKind::kDataAndControl ||
(in_edge->IsControlEdge() && edge_kind == EdgeKind::kControlOnly) ||
(!in_edge->IsControlEdge() && edge_kind == EdgeKind::kDataOnly);
if (should_process) {
auto it = predicate_map_.find(InputEdgeToTensorId(in_edge));
if (it == predicate_map_.end()) {
xla::GraphCycles graph_cycles;
TF_RETURN_IF_ERROR(
CreateCycleDetectionGraph(&graph_, &graph_cycles).status());
return errors::Internal("Could not find input ", in_edge->DebugString(),
" to ", n->name(),
" when visiting the graph in post-order. Most "
"likely indicates a bug in deadness analysis.");
}
result->push_back(it->second);
}
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleSwitch(Node* n,
std::vector<bool>* should_revisit) {
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataAndControl, &input_preds));
const Edge* pred_edge;
TF_RETURN_IF_ERROR(n->input_edge(1, &pred_edge));
if (n->type_string() != "_SwitchN") {
Predicate* true_switch;
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
pred_edge->src(), pred_edge->src_output(),
true, &true_switch));
Predicate* false_switch = predicate_factory_.MakeNotPredicate(true_switch);
input_preds.push_back(false_switch);
SetPredicate(n, 0, predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
input_preds.pop_back();
input_preds.push_back(true_switch);
SetPredicate(n, 1, predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
input_preds.pop_back();
} else {
Predicate* branch_pred;
for (int i = 0; i < n->num_outputs() - 1; i++) {
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
pred_edge->src(), pred_edge->src_output(),
std::optional<int32>(i), &branch_pred));
input_preds.push_back(branch_pred);
SetPredicate(n, i, predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
input_preds.pop_back();
input_preds.push_back(predicate_factory_.MakeNotPredicate(branch_pred));
}
SetPredicate(n, n->num_outputs() - 1,
predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
}
SetPredicate(n, Graph::kControlSlot,
predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
return absl::OkStatus();
}
namespace {
Status CreateMultipleNextIterationInputsError(Node* merge) {
std::vector<string> backedges;
for (const Edge* backedge : merge->in_edges()) {
if (backedge->src()->IsNextIteration()) {
backedges.push_back(absl::StrCat(" ", SummarizeNode(*backedge->src())));
}
}
return errors::InvalidArgument(
"Multiple NextIteration inputs to merge node ",
FormatNodeForError(*merge), ": \n", absl::StrJoin(backedges, "\n"),
"\nMerge nodes can have at most one incoming NextIteration edge.");
}
Status FindUniqueBackedge(Node* merge, const Edge** result) {
*result = nullptr;
CHECK(merge->IsMerge());
for (const Edge* e : merge->in_edges()) {
if (e->src()->IsNextIteration()) {
if (*result != nullptr) {
return CreateMultipleNextIterationInputsError(merge);
}
*result = e;
}
}
return absl::OkStatus();
}
Predicate* DeduceStepPredicate(PredicateFactory* predicate_factory,
Predicate* symbolic_predicate,
Predicate* backedge_predicate) {
CHECK(dynamic_cast<SymbolPredicate*>(symbolic_predicate));
if (backedge_predicate->kind() != Predicate::Kind::kAnd) {
return nullptr;
}
std::vector<Predicate*> and_ops;
absl::Span<Predicate* const> recurrent_pred_ops =
backedge_predicate->GetOperands();
bool found_sym = false;
for (Predicate* and_op : recurrent_pred_ops) {
if (and_op == symbolic_predicate) {
found_sym = true;
continue;
}
bool found_sym_as_inner_operand = false;
auto has_self_as_inner_operand = [&](Predicate* p) {
if (p == symbolic_predicate) {
found_sym_as_inner_operand = true;
return true;
}
return false;
};
Predicate::Visit(and_op, has_self_as_inner_operand);
if (found_sym_as_inner_operand) {
return nullptr;
}
and_ops.push_back(and_op);
}
return found_sym ? predicate_factory->MakeAndPredicate(and_ops) : nullptr;
}
Status GetFullFrame(const Node* n, absl::Span<const ControlFlowInfo> cfi_infos,
std::vector<string>* frame) {
int depth = 0;
for (const ControlFlowInfo* cfi_iter = &cfi_infos[n->id()]; !n->IsSource();
n = cfi_iter->parent_frame, cfi_iter = &cfi_infos[n->id()]) {
frame->push_back(cfi_iter->frame_name);
if (depth++ > 5000) {
return errors::Internal(
"Frame of depth > 5000: Probably malformed graph or a bug in "
"BuildControlFlowInfo");
}
}
return absl::OkStatus();
}
Status GetRootFrame(const Node* n, absl::Span<const ControlFlowInfo> cfi_infos,
absl::string_view* frame) {
int depth = 0;
const ControlFlowInfo* cfi_iter = &cfi_infos[n->id()];
while (!cfi_iter->parent_frame->IsSource()) {
n = cfi_iter->parent_frame;
cfi_iter = &cfi_infos[n->id()];
if (depth++ > 5000) {
return errors::Internal(
"Frame of depth > 5000: Probably malformed graph or a bug in "
"BuildControlFlowInfo");
}
}
*frame = cfi_iter->frame_name;
return absl::OkStatus();
}
}
Status DeadnessAnalysisImpl::HandleMerge(Node* n,
std::vector<bool>* should_revisit,
bool use_optimistic_mode) {
bool has_unvisited_backedge = false;
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsNextIteration()) {
has_unvisited_backedge |= !predicate_map_.count(InputEdgeToTensorId(e));
}
}
auto it = predicate_map_.find(TensorId(n->name(), 0));
if (it == predicate_map_.end()) {
if (has_unvisited_backedge) {
Predicate* input_data_pred;
if (use_optimistic_mode) {
absl::string_view frame_name = control_flow_info_[n->id()].frame_name;
auto insert_result = frame_to_merge_node_.insert({frame_name, n});
Node* representative = insert_result.first->second;
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
representative, 0, false,
&input_data_pred));
} else {
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
n, 0, false, &input_data_pred));
}
SetPredicate(n, {0, 1, Graph::kControlSlot}, input_data_pred,
should_revisit);
return absl::OkStatus();
}
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataOnly, &input_preds));
Predicate* input_data_pred =
predicate_factory_.MakeOrPredicate(input_preds);
SetPredicate(n, {0, 1, Graph::kControlSlot}, input_data_pred,
should_revisit);
return absl::OkStatus();
}
if (it->second->kind() == Predicate::Kind::kSymbol) {
const Edge* unique_backedge;
TF_RETURN_IF_ERROR(FindUniqueBackedge(n, &unique_backedge));
if (unique_backedge) {
if (Predicate* step = DeduceStepPredicate(
&predicate_factory_, it->second,
predicate_map_[InputEdgeToTensorId(unique_backedge)])) {
std::vector<Predicate*> non_recurrent_inputs;
for (const Edge* e : n->in_edges()) {
if (e != unique_backedge) {
non_recurrent_inputs.push_back(
predicate_map_[InputEdgeToTensorId(e)]);
}
}
Predicate* start =
predicate_factory_.MakeOrPredicate(non_recurrent_inputs);
std::vector<string> frame;
TF_RETURN_IF_ERROR(GetFullFrame(n, control_flow_info_, &frame));
Predicate* and_rec = predicate_factory_.MakeAndRecurrencePredicate(
start, step, std::move(frame));
SetPredicate(n, {0, 1, Graph::kControlSlot}, and_rec, should_revisit);
return absl::OkStatus();
}
}
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleRecv(Node* n,
std::vector<bool>* should_revisit) {
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataAndControl, &input_preds));
Predicate* signal_is_alive;
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
n, 0, false, &signal_is_alive));
input_preds.push_back(signal_is_alive);
SetPredicate(n, {0, Graph::kControlSlot},
predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleGeneric(Node* n,
std::vector<bool>* should_revisit) {
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataAndControl, &input_preds));
Predicate* pred = predicate_factory_.MakeAndPredicate(input_preds);
for (int output_idx = 0; output_idx < n->num_outputs(); output_idx++) {
SetPredicate(n, output_idx, pred, should_revisit);
}
SetPredicate(n, Graph::kControlSlot, pred, should_revisit);
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleNode(Node* n,
std::vector<bool>* should_revisit,
bool use_optimistic_mode) {
if (n->IsSwitch()) {
TF_RETURN_IF_ERROR(HandleSwitch(n, should_revisit));
} else if (n->IsMerge()) {
TF_RETURN_IF_ERROR(HandleMerge(n, should_revisit, use_optimistic_mode));
} else if (n->IsControlTrigger()) {
SetPredicate(n, Graph::kControlSlot, predicate_factory_.MakeTrue(),
nullptr);
} else if (n->IsRecv() || n->IsHostRecv()) {
TF_RETURN_IF_ERROR(HandleRecv(n, should_revisit));
} else if (n->IsNextIteration()) {
TF_RETURN_IF_ERROR(HandleGeneric(n, should_revisit));
} else {
TF_RETURN_IF_ERROR(HandleGeneric(n, should_revisit));
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::GetFrameBasedTopologicalOrder(
std::vector<Node*>* order) {
absl::flat_hash_map<absl::string_view, size_t> num_enters_for_frame;
absl::flat_hash_map<absl::string_view, size_t> num_exits_for_frame;
std::vector<size_t> num_ready_inputs(graph_.num_node_ids(), 0);
Node* src_node = graph_.source_node();
for (const auto* node : graph_.op_nodes()) {
const ControlFlowInfo& cf = control_flow_info_[node->id()];
if (IsRootEnter(node)) {
++num_enters_for_frame[cf.frame_name];
} else if (IsRootExit(node)) {
++num_exits_for_frame[cf.frame_name];
}
if (IsMerge(node)) {
for (const Edge* e : node->in_edges()) {
if (IsNextIteration(e->src())) {
++num_ready_inputs[node->id()];
}
}
}
}
std::deque<Node*> ready;
ready.push_back(src_node);
absl::flat_hash_map<absl::string_view, std::vector<Node*>>
ready_enters_per_frame;
std::vector<Node*> ready_exits;
while (!ready.empty()) {
Node* curr_node = ready.front();
ready.pop_front();
VLOG(4) << "Visiting " << curr_node->name();
order->push_back(curr_node);
for (const Edge* out_edge : curr_node->out_edges()) {
Node* out = out_edge->dst();
int out_id = out->id();
if (IsNextIteration(curr_node) && IsMerge(out)) {
continue;
}
++num_ready_inputs[out->id()];
if (!out->IsOp()) continue;
if (num_ready_inputs[out->id()] != out->in_edges().size()) continue;
absl::string_view frame_name = control_flow_info_[out_id].frame_name;
if (IsRootEnter(out)) {
ready_enters_per_frame[frame_name].push_back(out);
} else if (IsRootExit(out)) {
ready_exits.push_back(out);
} else {
ready.push_back(out);
}
}
if (ready.empty()) {
if (!ready_exits.empty()) {
absl::string_view frame_name =
control_flow_info_[ready_exits.front()->id()].frame_name;
CHECK_EQ(ready_exits.size(), num_exits_for_frame[frame_name]);
ready.insert(ready.end(), ready_exits.begin(), ready_exits.end());
ready_exits.clear();
} else {
for (auto iter = ready_enters_per_frame.begin();
iter != ready_enters_per_frame.end(); ++iter) {
absl::string_view frame_name = iter->first;
const std::vector<Node*>& ready_enters = iter->second;
if (ready_enters.size() == num_enters_for_frame[frame_name]) {
ready.insert(ready.end(), ready_enters.begin(), ready_enters.end());
ready_enters_per_frame.erase(iter);
break;
}
}
}
}
}
if (!ready_enters_per_frame.empty() || !ready_exits.empty()) {
return errors::InvalidArgument(
"Some enters/exits have never been visited in the traversal."
" Most probably the input graph is malformed.");
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::Populate(bool enable_optimistic) {
std::vector<string> unreachable_nodes;
TF_RETURN_IF_ERROR(
BuildControlFlowInfo(&graph_, &control_flow_info_, &unreachable_nodes));
if (!unreachable_nodes.empty()) {
if (unreachable_nodes.size() > 5) {
unreachable_nodes.erase(unreachable_nodes.begin() + 5,
unreachable_nodes.end());
}
return errors::InvalidArgument(
"Found unreachable nodes, most likely source and sink nodes not "
"connected: ",
absl::StrJoin(unreachable_nodes, ", "));
}
std::vector<Node*> topo;
TF_RETURN_IF_ERROR(GetFrameBasedTopologicalOrder(&topo));
size_t frame_start = 0;
while (frame_start < topo.size()) {
absl::string_view cur_frame_name;
TF_RETURN_IF_ERROR(
GetRootFrame(topo[frame_start], control_flow_info_, &cur_frame_name));
size_t frame_end = frame_start;
for (size_t i = frame_start + 1; i < topo.size(); ++i) {
absl::string_view i_frame_name;
TF_RETURN_IF_ERROR(
GetRootFrame(topo[i], control_flow_info_, &i_frame_name));
if (i_frame_name == cur_frame_name) {
frame_end = i;
} else {
break;
}
}
absl::Span<Node*> sub_topo(topo.data() + frame_start,
frame_end - frame_start + 1);
frame_start = frame_end + 1;
bool success = false;
if (enable_optimistic && !cur_frame_name.empty()) {
TF_RETURN_IF_ERROR(
PopulateFrame(sub_topo, true, &success));
}
if (!success) {
TF_RETURN_IF_ERROR(
PopulateFrame(sub_topo, false, nullptr));
}
VLOG(2) << "Done populating frame " << cur_frame_name << " using the "
<< (success ? "optimistic" : "pessimistic") << " mode.";
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::PopulateFrame(absl::Span<Node* const> topo,
bool use_optimistic_mode,
bool* success) {
CHECK(use_optimistic_mode && success != nullptr ||
!use_optimistic_mode && success == nullptr);
std::vector<bool> should_revisit;
should_revisit.resize(graph_.num_node_ids());
for (Node* n : topo) {
VLOG(4) << "Visiting " << n->name();
TF_RETURN_IF_ERROR(
HandleNode(n, nullptr, use_optimistic_mode));
if (n->IsNextIteration()) {
for (const Edge* e : n->out_edges()) {
if (e->dst()->IsMerge()) {
should_revisit[e->dst()->id()] = true;
}
}
}
}
for (Node* n : topo) {
if (should_revisit[n->id()]) {
VLOG(4) << "Revisiting " << n->name();
TF_RETURN_IF_ERROR(HandleNode(n, &should_revisit));
}
}
if (use_optimistic_mode) {
bool is_converged = true;
absl::flat_hash_map<absl::string_view, Predicate*> frame_to_pred;
for (Node* n : topo) {
if (!n->IsMerge()) {
continue;
}
const Edge* e;
TF_RETURN_IF_ERROR(FindUniqueBackedge(n, &e));
if (e == nullptr) {
continue;
}
Node* merge = n;
absl::string_view frame_name = control_flow_info_[merge->id()].frame_name;
auto it = predicate_map_.find(TensorId(merge->name(), 0));
Predicate* merge_pred = it->second;
if (merge_pred->kind() != Predicate::Kind::kAndRecurrence) {
is_converged = false;
VLOG(2) << "Running the optimistic mode on frame " << frame_name
<< " does not converge because node " << merge->name()
<< " cannot be mapped into the AndRecurrence form.";
break;
}
auto insert_result = frame_to_pred.insert({frame_name, merge_pred});
if (!insert_result.second) {
Predicate* curr_andrec = merge_pred;
Predicate* prev_andrec = insert_result.first->second;
if (curr_andrec != prev_andrec) {
is_converged = false;
VLOG(2) << "Running the optimistic mode on frame " << frame_name
<< " does not converge. Seeing different Merge predicates: \n"
<< curr_andrec->ToString() << " and \n"
<< prev_andrec->ToString();
break;
}
}
}
if (!is_converged) {
for (Node* n : topo) {
for (int oid = 0; oid < n->num_outputs(); ++oid) {
predicate_map_.erase(TensorId(n->name(), oid));
}
predicate_map_.erase(TensorId(n->name(), Graph::kControlSlot));
}
}
if (success != nullptr) {
*success = is_converged;
}
}
return absl::OkStatus();
}
absl::StatusOr<DeadnessAnalysis::DeadnessPredicate>
DeadnessAnalysisImpl::GetPredicateFor(Node* n, int oidx) const {
auto it = predicate_map_.find(TensorId(n->name(), oidx));
TF_RET_CHECK(it != predicate_map_.end())
<< "could not find " << TensorId(n->name(), oidx).ToString()
<< " in predicate map";
return MakeDeadnessPredicate(it->second);
}
void DeadnessAnalysisImpl::Print() const {
std::vector<TensorId> tensor_ids;
tensor_ids.reserve(predicate_map_.size());
for (const auto& kv_pair : predicate_map_) {
tensor_ids.push_back(kv_pair.first);
}
std::sort(tensor_ids.begin(), tensor_ids.end());
for (TensorId tensor_id : tensor_ids) {
auto it = predicate_map_.find(tensor_id);
CHECK(it != predicate_map_.end()) << tensor_id.ToString();
VLOG(2) << tensor_id.ToString() << " -> " << it->second->ToString();
}
}
}
DeadnessAnalysis::~DeadnessAnalysis() {}
Status DeadnessAnalysis::Run(
const Graph& graph, std::unique_ptr<DeadnessAnalysis>* result) {
std::unique_ptr<DeadnessAnalysisImpl> analysis(
new DeadnessAnalysisImpl(&graph));
TF_RETURN_IF_ERROR(analysis->Populate(true));
if (VLOG_IS_ON(2)) {
analysis->Print();
}
*result = std::move(analysis);
return absl::OkStatus();
}
absl::flat_hash_map<TensorId, string, TensorId::Hasher>
DeadnessAnalysisImpl::PredicateMapAsString() const {
absl::flat_hash_map<TensorId, string, TensorId::Hasher> result;
for (const auto& kv_pair : predicate_map_) {
CHECK(result.insert({kv_pair.first, kv_pair.second->ToString()}).second);
}
return result;
}
namespace deadness_analysis_internal {
Status ComputePredicates(const Graph& graph, PredicateMapTy* out_predicate_map,
bool enable_optimistic) {
DeadnessAnalysisImpl impl(&graph);
TF_RETURN_IF_ERROR(impl.Populate(enable_optimistic));
*out_predicate_map = impl.PredicateMapAsString();
return absl::OkStatus();
}
}
string DeadnessAnalysis::DebugString(DeadnessPredicate predicate) const {
return static_cast<Predicate*>(predicate.pred_)->ToString();
}
} | #include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/deadness_analysis_internal.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
absl::StatusOr<bool> HasInputsWithMismatchingDeadness(
const DeadnessAnalysis& deadness_analysis, const Node& n) {
std::optional<DeadnessAnalysis::DeadnessPredicate> pred;
for (const Edge* edge : n.in_edges()) {
TF_ASSIGN_OR_RETURN(
DeadnessAnalysis::DeadnessPredicate this_pred,
deadness_analysis.GetPredicateFor(edge->src(), edge->src_output()));
if (pred && *pred != this_pred) {
return true;
}
pred = this_pred;
}
return false;
}
using deadness_analysis_internal::ComputePredicates;
using deadness_analysis_internal::PredicateMapTy;
Status AnalyzeDeadness(Graph* graph,
std::unique_ptr<DeadnessAnalysis>* result) {
FixupSourceAndSinkEdges(graph);
return DeadnessAnalysis::Run(*graph, result);
}
ops::Switch CreateSwitch(const Scope& root, const string& prefix) {
Output value = ops::Placeholder(root.WithOpName(prefix + "/value"), DT_FLOAT);
Output predicate =
ops::Placeholder(root.WithOpName(prefix + "/pred"), DT_BOOL);
return ops::Switch(root.WithOpName(prefix + "/switch"), value, predicate);
}
TensorId ControlOutputFor(const Output& o) {
return {o.node()->name(), Graph::kControlSlot};
}
void VLogGraphIfAsked(const Graph& graph) {
if (VLOG_IS_ON(3)) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
string serialized;
::tensorflow::protobuf::TextFormat::PrintToString(graph_def, &serialized);
LOG(INFO) << serialized;
}
}
struct InductionVarInfo {
Output induction_var;
Output loop_cond;
};
InductionVarInfo CreateInductionVariable(const Scope& root,
const string& prefix,
const string& frame_name,
const Output& initial_value) {
Output enter_initial_value = ops::internal::Enter(
root.WithOpName(prefix + "/enter"), initial_value, frame_name);
ops::Merge iv(root.WithOpName(prefix + "/iv"),
{enter_initial_value, enter_initial_value});
Output increment_by = ops::Const(root.WithOpName(prefix + "/incr"), 1);
Output final_value = ops::Const(root.WithOpName(prefix + "/final"), 10);
Output loop_cond_expr =
ops::Less(root.WithOpName(prefix + "/cond"), iv.output, final_value);
ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output,
loop_cond_expr);
ops::internal::Exit exit(root.WithOpName(prefix + "/exit"),
latch.output_false);
Output iv_next = ops::Add(root.WithOpName(prefix + "/ivnext"),
latch.output_true, increment_by);
Output next_iteration =
ops::NextIteration(root.WithOpName(prefix + "/next_iteration"), iv_next);
CHECK(root.graph()
->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1)
.ok());
root.graph()->AddControlEdge(iv.output.node(), increment_by.node());
root.graph()->AddControlEdge(iv.output.node(), final_value.node());
return {iv.output, loop_cond_expr};
}
InductionVarInfo CreateInductionVariable(const Scope& root,
const string& prefix,
const string& frame_name,
int32_t init) {
return CreateInductionVariable(
root, prefix, frame_name,
ops::Const(root.WithOpName(prefix + "/init"), init));
}
struct DependentInductionVar {
Output induction_var;
ops::Switch latch;
};
DependentInductionVar CreateDependentLoopInvariantValue(
const Scope& root, const string& prefix, const string& frame_name,
const Output& loop_cond, const Output& value) {
Output enter_value = ops::internal::Enter(root.WithOpName(prefix + "/enter"),
value, frame_name);
ops::Merge iv(root.WithOpName(prefix + "/iv"), {enter_value, enter_value});
ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output, loop_cond);
ops::internal::Exit exit(root.WithOpName(prefix + "/exit"),
latch.output_false);
Output next_iteration = ops::NextIteration(
root.WithOpName(prefix + "/next_iteration"), latch.output_true);
CHECK(root.graph()
->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1)
.ok());
return {iv.output, latch};
}
DependentInductionVar CreateDependentLoopInvariantValue(
const Scope& root, const string& prefix, const string& frame_name,
const Output& loop_cond, int32_t value) {
return CreateDependentLoopInvariantValue(
root, prefix, frame_name, loop_cond,
ops::Const(root.WithOpName(prefix + "/init"), value));
}
TEST(DeadnessAnalysisTest, BasicPositive) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output add =
ops::Add(root.WithOpName("add"), sw.output_true, sw.output_false);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, BasicNegative) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("b"), DT_FLOAT);
Output add = ops::Add(root.WithOpName("add"), a, b);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndIsCommutative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
Output a0 =
ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false);
Output a1 =
ops::Add(root.WithOpName("a1"), sw_1.output_false, sw_0.output_false);
Output b0 =
ops::Add(root.WithOpName("b0"), sw_0.output_false, sw_1.output_true);
Output b1 =
ops::Add(root.WithOpName("b1"), sw_1.output_true, sw_0.output_false);
Output live0 = ops::Add(root.WithOpName("live0"), a0, a1);
Output live1 = ops::Add(root.WithOpName("live1"), b0, b1);
Output halfdead0 = ops::Add(root.WithOpName("halfdead0"), a0, b0);
Output halfdead1 = ops::Add(root.WithOpName("halfdead1"), a1, b1);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
bool has_inputs_with_mismatching_deadness;
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live1.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead1.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndIsAssociative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
Output a0 =
ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false);
Output a1 = ops::Add(root.WithOpName("a1"), a0, sw_2.output_false);
Output b0 =
ops::Add(root.WithOpName("b0"), sw_1.output_false, sw_2.output_false);
Output b1 = ops::Add(root.WithOpName("b1"), sw_0.output_false, b0);
Output add = ops::Add(root.WithOpName("add"), a1, b1);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, OrIsCommutative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
ops::Merge m1(root.WithOpName("m1"), {sw_1.output_false, sw_0.output_false});
ops::Merge m2(root.WithOpName("m2"), {sw_0.output_false, sw_1.output_true});
ops::Merge m3(root.WithOpName("m3"), {sw_1.output_true, sw_0.output_false});
Output live0 = ops::Add(root.WithOpName("live0"), m0.output, m1.output);
Output live1 = ops::Add(root.WithOpName("live1"), m2.output, m3.output);
Output halfdead0 =
ops::Add(root.WithOpName("halfdead0"), m0.output, m2.output);
Output halfdead1 =
ops::Add(root.WithOpName("halfdead1"), m1.output, m3.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
bool has_inputs_with_mismatching_deadness;
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live1.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead1.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, OrIsAssociative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
ops::Merge m1(root.WithOpName("m1"), {m0.output, sw_2.output_false});
ops::Merge m2(root.WithOpName("m2"), {sw_1.output_false, sw_2.output_false});
ops::Merge m3(root.WithOpName("m3"), {sw_0.output_false, m2.output});
Output add = ops::Add(root.WithOpName("add"), m1.output, m3.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndOfOr) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Switch sw_3 = CreateSwitch(root, "3");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
ops::Merge m1(root.WithOpName("m1"), {sw_2.output_false, sw_3.output_false});
Output add0 = ops::Add(root.WithOpName("add0"), m0.output, m1.output);
Output add1 = ops::Add(root.WithOpName("add1"), m0.output, m1.output);
Output add2 = ops::Add(root.WithOpName("add2"), add0, add1);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add2.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, OrOfAnd) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Switch sw_3 = CreateSwitch(root, "3");
Output add0 =
ops::Add(root.WithOpName("add0"), sw_0.output_false, sw_1.output_false);
Output add1 =
ops::Add(root.WithOpName("add1"), sw_2.output_false, sw_3.output_false);
ops::Merge m0(root.WithOpName("m0"), {add0, add1});
ops::Merge m1(root.WithOpName("m1"), {add0, add1});
Output add2 = ops::Add(root.WithOpName("add2"), m0.output, m1.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add2.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndOrDistributiveSimplified) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "A");
ops::Switch sw_1 = CreateSwitch(root, "B");
Output add0 =
ops::Add(root.WithOpName("and0"), sw_0.output_false, sw_1.output_true);
Output add1 =
ops::Add(root.WithOpName("and1"), sw_0.output_false, sw_1.output_false);
ops::Merge or2(root.WithOpName("or2"), {add0, add1});
Output add3 =
ops::Add(root.WithOpName("and3"), or2.output, sw_0.output_false);
ops::Merge or4(root.WithOpName("or4"), {add3, sw_0.output_true});
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(or4.output)], "#true");
}
TEST(DeadnessAnalysisTest, AndOrDistributive) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
Output add0 = ops::Add(root.WithOpName("add0"), m0.output, sw_2.output_false);
Output add1 =
ops::Add(root.WithOpName("add1"), sw_0.output_false, sw_2.output_false);
Output add2 =
ops::Add(root.WithOpName("add2"), sw_1.output_false, sw_2.output_false);
ops::Merge m1(root.WithOpName("m1"), {add1, add2});
Output add3 = ops::Add(root.WithOpName("add3"), add0, m1.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add3.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, Ternary) {
Scope root = Scope::NewRootScope().ExitOnError();
Output predicate = ops::Placeholder(root.WithOpName("predicate"), DT_BOOL);
Output true_value = ops::Placeholder(root.WithOpName("true_value"), DT_FLOAT);
Output false_value =
ops::Placeholder(root.WithOpName("false_value"), DT_FLOAT);
ops::Switch predicated_true(root.WithOpName("predicated_true"), true_value,
predicate);
ops::Switch predicated_false(root.WithOpName("predicated_false"), true_value,
predicate);
ops::Merge merge(root.WithOpName("ternary"), {predicated_true.output_true,
predicated_false.output_false});
Output addend = ops::Placeholder(root.WithOpName("addend"), DT_FLOAT);
Output add = ops::Add(root.WithOpName("add"), merge.output, addend);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, Recv) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv_a = ops::_Recv(root.WithOpName("recv_a"), DT_FLOAT, "tensor_a",
"sender", 0, "receiver");
Output recv_b = ops::_Recv(root.WithOpName("recv_b"), DT_FLOAT, "tensor_b",
"sender", 0, "receiver");
Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, HostRecv) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv_a = ops::_HostRecv(root.WithOpName("recv_a"), DT_FLOAT,
"tensor_a", "sender", 0, "receiver");
Output recv_b = ops::_HostRecv(root.WithOpName("recv_b"), DT_FLOAT,
"tensor_b", "sender", 0, "receiver");
Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, Loop) {
Scope root = Scope::NewRootScope().ExitOnError();
Output iv0 = CreateInductionVariable(root, "iv0", "fr0", 0).induction_var;
Output iv1 = CreateInductionVariable(root, "iv1", "fr0", 0).induction_var;
Output iv2 = CreateInductionVariable(root, "iv2", "fr0", 1).induction_var;
Output add0 = ops::Add(root.WithOpName("add0"), iv0, iv1);
Output add1 = ops::Add(root.WithOpName("add1"), iv1, iv2);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
bool has_inputs_with_mismatching_deadness;
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add1.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(iv0)],
"{#true,&,*iv0/cond:0}<fr0>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv1)],
"{#true,&,*iv1/cond:0}<fr0>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv2)],
"{#true,&,*iv2/cond:0}<fr0>");
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
"({#true,&,*iv0/cond:0}<fr0> & {#true,&,*iv1/cond:0}<fr0>)");
EXPECT_EQ(predicate_map[ControlOutputFor(add1)],
"({#true,&,*iv1/cond:0}<fr0> & {#true,&,*iv2/cond:0}<fr0>)");
}
}
TEST(DeadnessAnalysisTest, ControlEquivalentLoopBodies) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv = CreateInductionVariable(root, "iv0", "loop", 0);
Output dependent_iv0 =
CreateDependentLoopInvariantValue(root, "div0", "loop", iv.loop_cond, 0)
.induction_var;
Output dependent_iv1 =
CreateDependentLoopInvariantValue(root, "div1", "loop", iv.loop_cond, 0)
.induction_var;
Output add0 = ops::Add(root.WithOpName("add0"), dependent_iv0, dependent_iv1);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv0)],
predicate_map[ControlOutputFor(iv.induction_var)]);
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv1)],
predicate_map[ControlOutputFor(iv.induction_var)]);
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
predicate_map[ControlOutputFor(iv.induction_var)]);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv0)],
"{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv1)],
"{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
"{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>");
}
}
TEST(DeadnessAnalysisTest, LoopInvariantPredicateOnBackedge) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv = CreateInductionVariable(root, "iv0", "frame", 0);
DependentInductionVar dependent_iv =
CreateDependentLoopInvariantValue(root, "div0", "frame", iv.loop_cond, 0);
FixupSourceAndSinkEdges(root.graph());
TF_ASSERT_OK(root.graph()->UpdateEdge(
iv.induction_var.node(), 0, dependent_iv.latch.output_true.node(), 0));
VLogGraphIfAsked(*root.graph());
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv.induction_var)],
"{#true,&,*iv0/cond:0}<frame>");
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv.induction_var)],
"div0/iv:0");
}
}
TEST(DeadnessAnalysisTest, ControlEquivalentNestedLoopBodies) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv_outer =
CreateInductionVariable(root, "iv_outer", "outer_loop", 0);
Output enter_constant_outer_loop = ops::internal::Enter(
root.WithOpName("constant_enter_outer_loop"),
ops::Const(root.WithOpName("constant"), 5), "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
ops::Switch inner_value(root.WithOpName("outer_is_live"),
enter_constant_outer_loop, iv_outer.loop_cond);
InductionVarInfo iv_inner = CreateInductionVariable(
root, "iv_inner", "inner_loop", inner_value.output_true);
Output dependent_outer_iv0 =
CreateDependentLoopInvariantValue(root, "dependent_outer_iv0",
"outer_loop", iv_outer.loop_cond, 0)
.induction_var;
Output dependent_outer_iv1 =
CreateDependentLoopInvariantValue(root, "dependent_outer_iv1",
"outer_loop", iv_outer.loop_cond, 0)
.induction_var;
Output dependent_inner_iv0 = CreateDependentLoopInvariantValue(
root, "dependent_inner_iv0", "inner_loop",
iv_inner.loop_cond, dependent_outer_iv0)
.induction_var;
Output dependent_inner_iv1 = CreateDependentLoopInvariantValue(
root, "dependent_inner_iv1", "inner_loop",
iv_inner.loop_cond, dependent_outer_iv1)
.induction_var;
Output add0 = ops::Add(root.WithOpName("add0"), dependent_inner_iv0,
dependent_inner_iv1);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(iv_outer.induction_var)],
"{#true,&,*iv_outer/cond:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv_inner.induction_var)],
"{(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv0)],
"{{#true,&,(iv_outer/iv:0 & "
"*iv_outer/cond:0)}<outer_loop>,&,(*iv_inner/cond:0 & "
"iv_inner/iv:0)}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv1)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(iv_outer.induction_var)],
"{#true,&,*iv_outer/cond:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv_inner.induction_var)],
"{(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv0)],
"{{#true,&,(iv_outer/iv:0 & "
"*iv_outer/cond:0)}<outer_loop>,&,(iv_inner/iv:0 & "
"*iv_inner/cond:0)}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv1)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
}
}
TEST(DeadnessAnalysisTest, ControlNonEquivalentNestedLoopBodies) {
Scope root = Scope::NewRootScope().ExitOnError();
std::array<Output, 2> outer_iv;
std::array<Output, 2> inner_iv;
for (int i : {0, 1}) {
InductionVarInfo iv_outer =
CreateInductionVariable(root, "iv_outer", "outer_loop", 0);
Output enter_constant_outer_loop = ops::internal::Enter(
root.WithOpName("constant_enter_outer_loop"),
ops::Const(root.WithOpName("constant"), 5), "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
ops::Switch inner_value(root.WithOpName("outer_is_live"),
enter_constant_outer_loop, iv_outer.loop_cond);
InductionVarInfo iv_inner = CreateInductionVariable(
root, "iv_inner", "inner_loop", inner_value.output_true);
outer_iv[i] = iv_outer.induction_var;
inner_iv[i] = iv_inner.induction_var;
}
Output add0 = ops::Add(root.WithOpName("add0"), inner_iv[0], inner_iv[1]);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(outer_iv[0])],
"{#true,&,*iv_outer/cond:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(inner_iv[0])],
"{(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(outer_iv[1])],
"{#true,&,*iv_outer/cond_1:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(inner_iv[1])],
"{(*iv_outer/cond_1:0 & "
"{#true,&,*iv_outer/cond_1:0}<outer_loop>),&,*iv_inner/"
"cond_1:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
"({(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop> & {(*iv_outer/cond_1:0 & "
"{#true,&,*iv_outer/cond_1:0}<outer_loop>),&,*iv_inner/"
"cond_1:0}<inner_loop;outer_loop>)");
}
}
TEST(DeadnessAnalysisTest, NestedLoopBodiesWithACapture) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv_outer =
CreateInductionVariable(root, "iv_outer", "outer_loop", 0);
Output enter_constant_outer_loop = ops::internal::Enter(
root.WithOpName("constant_enter_outer_loop"),
ops::Const(root.WithOpName("constant"), 5), "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
ops::Switch inner_value(root.WithOpName("outer_is_live"),
enter_constant_outer_loop, iv_outer.loop_cond);
InductionVarInfo iv_inner = CreateInductionVariable(
root, "iv_inner", "inner_loop", inner_value.output_true);
DependentInductionVar div0_outer = CreateDependentLoopInvariantValue(
root, "div0_outer", "outer_loop", iv_outer.loop_cond, 0);
DependentInductionVar div1_outer = CreateDependentLoopInvariantValue(
root, "div1_outer", "outer_loop", iv_outer.loop_cond, 0);
DependentInductionVar div0_inner = CreateDependentLoopInvariantValue(
root, "div0_inner", "inner_loop", iv_inner.loop_cond,
div0_outer.induction_var);
DependentInductionVar div1_inner = CreateDependentLoopInvariantValue(
root, "div1_inner", "inner_loop", iv_inner.loop_cond,
div1_outer.induction_var);
Output captured = ops::_Recv(root.WithOpName("captured"), DT_INT32,
"tensor_a", "sender", 0, "receiver");
Output capture_enter_outer = ops::internal::Enter(
root.WithOpName("capture_enter_outer"), captured, "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
Output capture_enter_inner = ops::internal::Enter(
root.WithOpName("capture_enter_inner"), capture_enter_outer, "inner_loop",
ops::internal::Enter::Attrs().IsConstant(true));
Output mul0 = ops::Mul(root.WithOpName("mul0"), div1_inner.induction_var,
capture_enter_inner);
TF_ASSERT_OK(root.graph()->UpdateEdge(
mul0.node(), 0, div1_inner.latch.output_true.node(), 0));
Output add0 = ops::Add(root.WithOpName("add0"), div0_inner.induction_var,
div1_inner.induction_var);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
}
TEST(DeadnessAnalysisTest, CyclicRecurrence) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv = CreateInductionVariable(root, "iv0", "loop", 0);
DependentInductionVar div0 =
CreateDependentLoopInvariantValue(root, "div0", "loop", iv.loop_cond, 0);
DependentInductionVar div1 =
CreateDependentLoopInvariantValue(root, "div1", "loop", iv.loop_cond, 0);
FixupSourceAndSinkEdges(root.graph());
TF_ASSERT_OK(root.graph()->UpdateEdge(div1.induction_var.node(), 0,
div0.latch.output_true.node(), 0));
TF_ASSERT_OK(root.graph()->UpdateEdge(div0.induction_var.node(), 0,
div1.latch.output_true.node(), 0));
VLogGraphIfAsked(*root.graph());
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(div0.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(div1.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
TensorId switch_false_out = {div1.latch.output_false.node()->name(),
div1.latch.output_false.index()};
EXPECT_EQ(predicate_map[switch_false_out], "(#true)");
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(div0.induction_var)], "div0/iv:0");
EXPECT_EQ(predicate_map[ControlOutputFor(div1.induction_var)], "div1/iv:0");
}
}
TEST(DeadnessAnalysisTest, AndRecurrenceNeedsFrameName) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv_0 = CreateInductionVariable(root, "iv_0", "frame_0", 10);
InductionVarInfo iv_1 = CreateInductionVariable(root, "iv_1", "frame_1", 9);
Output init = CreateSwitch(root, "init").output_true;
Output step = CreateSwitch(root, "step").output_true;
std::array<Output, 2> exits;
std::array<Output, 2> next_iterations;
for (int i : {0, 1}) {
Output init_enter = ops::internal::Enter(
root.WithOpName(absl::StrCat("init_enter_frame_", i)), init,
absl::StrCat("frame_", i),
ops::internal::Enter::Attrs().IsConstant(true));
Output step_enter = ops::internal::Enter(
root.WithOpName(absl::StrCat("step_enter_frame_", i)), step,
absl::StrCat("frame_", i),
ops::internal::Enter::Attrs().IsConstant(true));
ops::Merge iv(root.WithOpName(absl::StrCat("expr_", i)),
{init_enter, init_enter});
Output add = ops::Add(root.WithOpName(absl::StrCat("add_", i)), iv.output,
step_enter);
next_iterations[i] = ops::NextIteration(
root.WithOpName(absl::StrCat("expr_", i, "_next_iteration")), add);
EXPECT_TRUE(
root.graph()
->UpdateEdge(next_iterations[i].node(), 0, iv.output.node(), 1)
.ok());
exits[i] = ops::internal::Exit(root.WithOpName(absl::StrCat("exit_", i)),
iv.output);
}
FixupSourceAndSinkEdges(root.graph());
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_NE(predicate_map[ControlOutputFor(exits[0])],
predicate_map[ControlOutputFor(exits[1])]);
EXPECT_NE(predicate_map[ControlOutputFor(exits[0])], "");
EXPECT_NE(predicate_map[ControlOutputFor(exits[1])], "");
EXPECT_NE(predicate_map[ControlOutputFor(next_iterations[0])],
predicate_map[ControlOutputFor(next_iterations[1])]);
EXPECT_NE(predicate_map[ControlOutputFor(next_iterations[0])], "");
EXPECT_NE(predicate_map[ControlOutputFor(next_iterations[1])], "");
}
}
TEST(DeadnessAnalysisTest, ControlInputs) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
Output const0 = ops::Const(root.WithOpName("const0"), 1);
Output const1 = ops::Const(root.WithOpName("const1"), 2);
Output add = ops::Add(root.WithOpName("add"), const0, const1);
root.graph()->AddControlEdge(id0.node(), const0.node());
root.graph()->AddControlEdge(id1.node(), const1.node());
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, ControlTrigger) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
ops::ControlTrigger ctrl_trigger0(root.WithOpName("ctrl_trigger0"));
ops::ControlTrigger ctrl_trigger1(root.WithOpName("ctrl_trigger1"));
Output const0 = ops::Const(root.WithOpName("const0"), 1);
Output const1 = ops::Const(root.WithOpName("const1"), 2);
Output add = ops::Add(root.WithOpName("add"), const0, const1);
root.graph()->AddControlEdge(id0.node(), ctrl_trigger0.operation.node());
root.graph()->AddControlEdge(ctrl_trigger0.operation.node(), const0.node());
root.graph()->AddControlEdge(id1.node(), ctrl_trigger1.operation.node());
root.graph()->AddControlEdge(ctrl_trigger1.operation.node(), const1.node());
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, ControlInputsToMerge) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
Output constant = ops::Const(root.WithOpName("constant"), 5);
ops::Merge m0(root.WithOpName("m0"), {constant});
ops::Merge m1(root.WithOpName("m0"), {constant});
Output add = ops::Add(root.WithOpName("add"), m0.output, m1.output);
root.graph()->AddControlEdge(id0.node(), m0.output.node());
root.graph()->AddControlEdge(id1.node(), m1.output.node());
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, RecvVsSwitch) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv = ops::_Recv(root.WithOpName("recv"), DT_BOOL, "tensor", "sender",
0, "receiver");
Output value = ops::Placeholder(root.WithOpName("value"), DT_BOOL);
ops::Switch sw(root.WithOpName("switch"), value, recv);
Output logical_and =
ops::LogicalAnd(root.WithOpName("and"), recv, sw.output_true);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *logical_and.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, RecvVsSwitchText) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv = ops::_Recv(root.WithOpName("recv"), DT_BOOL, "tensor", "sender",
0, "receiver");
Output value = ops::Placeholder(root.WithOpName("value"), DT_BOOL);
ops::Switch sw(root.WithOpName("switch"), value, recv);
Output logical_and =
ops::LogicalAnd(root.WithOpName("and"), recv, sw.output_true);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
TensorId logical_and_output_0 = {logical_and.node()->name(),
Graph::kControlSlot};
EXPECT_EQ(predicate_map[logical_and_output_0], "(recv:0 & *recv:0)");
}
TEST(DeadnessAnalysisTest, DeMorgan) {
Scope root = Scope::NewRootScope().ExitOnError();
Output cond_0 = ops::Placeholder(root.WithOpName("cond_0"), DT_BOOL);
Output cond_1 = ops::Placeholder(root.WithOpName("cond_1"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw_0(root.WithOpName("switch_0"), value, cond_0);
ops::Switch sw_1(root.WithOpName("switch_1"), value, cond_1);
Output and_0_1 =
ops::Add(root.WithOpName("and_0_1"), sw_0.output_true, sw_1.output_true);
Output or_not0_not1 = ops::Merge(root.WithOpName("or_not0_not1"),
{sw_0.output_false, sw_1.output_false})
.output;
Output should_always_be_dead =
ops::Add(root.WithOpName("should_always_be_dead"), and_0_1, or_not0_not1);
Output should_always_be_alive =
ops::Merge(root.WithOpName("should_always_be_alive"),
{and_0_1, or_not0_not1})
.output;
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(should_always_be_dead)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(should_always_be_alive)], "#true");
}
TEST(DeadnessAnalysisTest, ConstantTrueSwitchCondition) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_true = ops::Const(root.WithOpName("const_true"), true);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), value, constant_true);
Output id_false = ops::Identity(root.WithOpName("id_false"), sw.output_false);
Output id_true = ops::Identity(root.WithOpName("id_true"), sw.output_true);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_false)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_true)], "#true");
}
TEST(DeadnessAnalysisTest, ConstantFalseSwitchCondition) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_false = ops::Const(root.WithOpName("const_false"), false);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), value, constant_false);
Output id_false = ops::Identity(root.WithOpName("id_false"), sw.output_false);
Output id_true = ops::Identity(root.WithOpName("id_true"), sw.output_true);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_false)], "#true");
EXPECT_EQ(predicate_map[ControlOutputFor(id_true)], "#false");
}
TEST(DeadnessAnalysisTest, RefBoolSwitchCondition) {
Scope root = Scope::NewRootScope().ExitOnError();
Output condition_ref_var =
ops::Variable(root.WithOpName("cond_ref"), TensorShape({}), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), value, condition_ref_var);
Output id_false = ops::Identity(root.WithOpName("id_false"), sw.output_false);
Output id_true = ops::Identity(root.WithOpName("id_true"), sw.output_true);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_false)], "~*cond_ref:0");
EXPECT_EQ(predicate_map[ControlOutputFor(id_true)], "*cond_ref:0");
}
void CreateSwitchN(const Scope& scope, Input data, Input output_index,
int64_t num_outs, OutputList* outputs) {
if (!scope.ok()) return;
auto _data = ops::AsNodeOut(scope, data);
if (!scope.ok()) return;
auto _output_index = ops::AsNodeOut(scope, output_index);
if (!scope.ok()) return;
Node* ret;
const auto unique_name = scope.GetUniqueNameForOp("_SwitchN");
auto builder = NodeBuilder(unique_name, "_SwitchN")
.Input(_data)
.Input(_output_index)
.Attr("num_outs", num_outs);
scope.UpdateBuilder(&builder);
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
if (!scope.ok()) return;
scope.UpdateStatus(scope.DoShapeInference(ret));
for (int32_t i = 0; i < ret->num_outputs(); ++i) {
outputs->push_back(Output(ret, i));
}
}
TEST(DeadnessAnalysisTest, Constant1_SwitchN_2Branches_DoesNotFail) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_1 = ops::Const(root.WithOpName("const_1"), 1);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
OutputList outputs;
CreateSwitchN(root.WithOpName("switchn"), value, constant_1, 2, &outputs);
Output id_0 = ops::Identity(root.WithOpName("id_0"), outputs[0]);
Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "#true");
}
TEST(DeadnessAnalysisTest, Constant7_SwitchN_3Branches) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_7 = ops::Const(root.WithOpName("const_7"), 7);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
OutputList outputs;
CreateSwitchN(root.WithOpName("switchn"), value, constant_7, 3, &outputs);
Output id_0 = ops::Identity(root.WithOpName("id_0"), outputs[0]);
Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
Output id_2 = ops::Identity(root.WithOpName("id_2"), outputs[2]);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_2)], "#true");
}
TEST(DeadnessAnalysisTest, RefInt_SwitchN_3Branches) {
Scope root = Scope::NewRootScope().ExitOnError();
Output condition_ref_var =
ops::Variable(root.WithOpName("bidx"), TensorShape({}), DT_INT32);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
OutputList outputs;
CreateSwitchN(root.WithOpName("switchn"), value, condition_ref_var, 3,
&outputs);
Output id_0 = ops::Identity(root.WithOpName("id_0"), outputs[0]);
Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
Output id_2 = ops::Identity(root.WithOpName("id_2"), outputs[2]);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "bidx:0=0");
EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "(~bidx:0=0 & bidx:0=1)");
EXPECT_EQ(predicate_map[ControlOutputFor(id_2)], "(~bidx:0=0 & ~bidx:0=1)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/deadness_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/deadness_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
256c9467-813a-47bb-bf44-f7c554ce6472 | cpp | tensorflow/tensorflow | encapsulate_subgraphs_pass | tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc | tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc | #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include <functional>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
#include "tensorflow/compiler/jit/shape_inference_helpers.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/shape_refiner.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/device_name_utils.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
const char* const kXlaCompiledKernelAttr = "_XlaCompiledKernel";
const char* const kXlaNumConstantArgsAttr = "_XlaNumConstantArgs";
const char* const kXlaNumResourceArgsAttr = "_XlaNumResourceArgs";
const char* const kXlaHostTransferSequencerAttr =
"_xla_host_transfer_sequencer";
const char* const kXlaHasReferenceVarsAttr = "_XlaHasReferenceVars";
namespace {
bool AreAllParentsGuaranteedConst(
const Node& n,
const absl::flat_hash_set<const Node*>& runtime_const_nodes) {
if (n.type_string() == "GuaranteeConst") {
return true;
}
bool all_parents_const = true;
bool atleast_one_non_control_edge = false;
for (const Edge* in : n.in_edges()) {
atleast_one_non_control_edge =
atleast_one_non_control_edge || !in->IsControlEdge();
if (!in->IsControlEdge() && runtime_const_nodes.count(in->src()) == 0) {
all_parents_const = false;
break;
}
}
return all_parents_const && atleast_one_non_control_edge;
}
void MarkGuaranteedConstants(
const Graph& graph,
const std::vector<std::pair<const Node*, Node*>>& src_arg_pairs) {
absl::flat_hash_set<const Node*> guaranteed_const_nodes;
std::vector<const Node*> srcs;
srcs.reserve(src_arg_pairs.size());
for (const auto& src_arg : src_arg_pairs) {
srcs.push_back(src_arg.first);
}
ReverseDFSFrom(
graph, srcs, nullptr,
[&guaranteed_const_nodes](const Node* n) {
if (AreAllParentsGuaranteedConst(*n, guaranteed_const_nodes)) {
guaranteed_const_nodes.insert(n);
}
});
for (auto& src_arg : src_arg_pairs) {
if (guaranteed_const_nodes.count(src_arg.first) != 0) {
VLOG(1) << "Guaranteed const found: " << src_arg.first->DebugString();
src_arg.second->AddAttr("_is_guaranteed_constant", true);
}
}
}
struct OutputInputTensorPairHasher {
uint64 operator()(std::pair<OutputTensor, InputTensor> const& s) const {
return Hash64Combine(OutputTensor::Hash()(s.first),
InputTensor::Hash()(s.second));
}
};
static const char* const kArgOp = "_Arg";
static const char* const kRetValOp = "_Retval";
class Encapsulator {
public:
Encapsulator(string group_attribute, Graph const* graph_in)
: group_attribute_(std::move(group_attribute)), graph_in_(graph_in) {}
Status SplitIntoSubgraphs(FunctionLibraryDefinition* library);
Status BuildFunctionDefs(const RewriteSubgraphFn& rewrite_subgraph_fn,
bool reuse_existing_functions,
FunctionLibraryDefinition* library);
Status BuildOutputGraph(Graph* graph_out, FunctionLibraryDefinition* library);
private:
class Subgraph {
public:
Node* MakeNodeImage(const Graph* graph_in, Node* node);
Graph* GetGraph() const;
Status BuildFunctionDef(const string& name_in,
const RewriteSubgraphFn& rewrite_subgraph_fn,
bool reuse_existing_functions,
FunctionLibraryDefinition* library);
Status AddFunctionCallNode(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out);
Node* GetCallNode() const;
int GetArgIndexForEdge(const Edge* edge) const;
int GetResultIndexForEdge(const Edge* edge) const;
Status RecordArg(const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs);
Status RecordControlResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images);
Status RecordResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images);
Status MakeSequencingNode(const string& subgraph_name, Graph* graph_out);
void ConnectSequencerToCallNode(Graph* graph_out);
Status ReplaceFunctionDef(FunctionLibraryDefinition* library);
private:
std::unique_ptr<Graph> graph_;
string device_;
NodeDef call_node_def_;
string function_def_name_;
Node* host_compute_key_placeholder_ = nullptr;
Node* call_node_;
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash> args_by_src_;
absl::flat_hash_map<InputTensor, int, InputTensor::Hash> args_by_dst_;
std::vector<Node*> args_;
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash> results_;
absl::flat_hash_set<string> control_output_nodes_;
Node* sequencer_ = nullptr;
};
Status GetFunctionNameAttr(Node const* node, string* attr) const;
Status CopySubgraphEdges(
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs);
Status CopySubgraphNodes(
absl::flat_hash_map<const Node*, Node*>* node_images);
Status CopyNodesToOutputGraph(
Graph* graph_out, absl::flat_hash_map<const Node*, Node*>* node_images);
Status AddFunctionCallNodes(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out);
Status FindOutputImageOfEdgeSrc(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_src_node, Node** src_image);
int FindOutputSlotOfEdgeSrc(const string& src_func_id,
const string& dst_func_id,
const Edge* edge);
Status FindOutputImageOfEdgeDst(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_dst_node, Node** dst_image);
int FindOutputSlotOfEdgeDst(const string& src_func_id,
const string& dst_func_id,
const Edge* edge);
Status CopyEdgeToOutputGraph(
const Edge* edge, const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out,
absl::flat_hash_set<std::pair<OutputTensor, InputTensor>,
OutputInputTensorPairHasher>* edges_added);
Status AddEdgesToOutputGraph(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out);
Status MakePrunedGraphCopyAndInline(
const Graph& graph, const std::vector<Node*>& sink_nodes,
std::unique_ptr<Graph>* pruned_graph,
absl::flat_hash_map<const Node*, Node*>* node_images,
FunctionLibraryDefinition* library);
const string group_attribute_;
const Graph* graph_in_;
absl::flat_hash_map<string, Subgraph> subgraphs_;
Encapsulator(const Encapsulator&) = delete;
void operator=(const Encapsulator&) = delete;
};
namespace {
void TopologicalClusterSort(
const absl::flat_hash_set<string>& clusters,
const absl::flat_hash_set<string>& has_successors,
const absl::flat_hash_map<string, absl::flat_hash_set<string>>& ancestors,
std::vector<string>* sorted) {
sorted->clear();
struct Work {
string cluster;
bool leave;
};
std::set<string> visited;
std::vector<Work> stack;
for (const auto& cluster : clusters) {
if (has_successors.find(cluster) == has_successors.end()) {
stack.push_back({cluster, false});
}
}
while (!stack.empty()) {
const Work item = stack.back();
stack.pop_back();
if (item.leave) {
sorted->push_back(item.cluster);
continue;
}
if (visited.find(item.cluster) != visited.end()) continue;
visited.insert(item.cluster);
stack.push_back({item.cluster, true});
const auto& iter = ancestors.find(item.cluster);
if (iter != ancestors.end()) {
for (const auto& ancestor : iter->second) {
stack.push_back({ancestor, false});
}
}
}
CHECK(sorted->size() == clusters.size());
}
}
Node* Encapsulator::Subgraph::GetCallNode() const { return call_node_; }
int Encapsulator::Subgraph::GetArgIndexForEdge(const Edge* edge) const {
return args_by_dst_.at(InputTensor(edge->dst(), edge->dst_input()));
}
int Encapsulator::Subgraph::GetResultIndexForEdge(const Edge* edge) const {
return results_.at(OutputTensor(edge->src(), edge->src_output()));
}
Node* Encapsulator::Subgraph::MakeNodeImage(const Graph* graph_in, Node* node) {
if (!graph_) {
graph_.reset(new Graph(graph_in->op_registry()));
graph_->set_versions(graph_in->versions());
}
if (device_.empty()) {
device_ = node->assigned_device_name().empty()
? node->requested_device()
: node->assigned_device_name();
}
return graph_->CopyNode(node);
}
Graph* Encapsulator::Subgraph::GetGraph() const { return graph_.get(); }
Status Encapsulator::Subgraph::RecordArg(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs) {
Node* src_node = edge->src();
int src_slot = edge->src_output();
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash>::iterator iter;
bool inserted;
std::tie(iter, inserted) = args_by_src_.emplace(
OutputTensor(src_node, src_slot), args_by_src_.size());
int arg_index = iter->second;
if (inserted) {
NodeDef arg_def;
NodeDefBuilder builder(
absl::StrCat(src_node->name(), "_", src_slot, "_arg"), kArgOp,
NodeDebugInfo(src_node->def()));
DataType dtype = edge->dst()->input_type(edge->dst_input());
builder.Attr("T", dtype);
builder.Attr("index", arg_index);
Status s = builder.Finalize(&arg_def);
if (!s.ok()) return s;
TF_ASSIGN_OR_RETURN(Node * arg, graph_->AddNode(arg_def));
src_arg_pairs->push_back({src_node, arg});
args_.push_back(arg);
}
Node* dst_node = edge->dst();
Node* dst_image = node_images.at(dst_node);
int dst_slot = edge->dst_input();
args_by_dst_[InputTensor(dst_node, dst_slot)] = arg_index;
graph_->AddEdge(args_[arg_index], 0, dst_image, dst_slot);
return absl::OkStatus();
}
Status Encapsulator::Subgraph::RecordControlResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images) {
Node* src_node = edge->src();
Node* src_image = node_images.at(src_node);
control_output_nodes_.insert(src_image->name());
return absl::OkStatus();
}
Status Encapsulator::Subgraph::RecordResult(
const Edge* edge,
const absl::flat_hash_map<const Node*, Node*>& node_images) {
Node* src_node = edge->src();
Node* src_image = node_images.at(src_node);
int src_slot = edge->src_output();
absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash>::iterator iter;
bool inserted;
std::tie(iter, inserted) =
results_.emplace(OutputTensor(src_node, src_slot), results_.size());
int ret_index = iter->second;
if (inserted) {
NodeDef ret_def;
NodeDefBuilder builder(
absl::StrCat(src_node->name(), "_", src_slot, "_retval"), kRetValOp,
NodeDebugInfo(src_node->def()));
DataType dtype = src_node->output_type(src_slot);
builder.Attr("T", dtype);
builder.Attr("index", ret_index);
builder.Input(src_image->name(), src_slot, dtype);
Status s = builder.Finalize(&ret_def);
if (!s.ok()) return s;
TF_ASSIGN_OR_RETURN(Node * ret, graph_->AddNode(ret_def));
graph_->AddEdge(src_image, src_slot, ret, 0);
}
return absl::OkStatus();
}
Status Encapsulator::Subgraph::MakeSequencingNode(const string& subgraph_name,
Graph* graph_out) {
if (sequencer_ == nullptr) {
NodeDef seq_def;
NodeDefBuilder builder(absl::StrCat(subgraph_name, "_sequencer"), "NoOp");
builder.Attr(kXlaHostTransferSequencerAttr, subgraph_name);
builder.Device(device_);
Status s = builder.Finalize(&seq_def);
if (!s.ok()) return s;
TF_ASSIGN_OR_RETURN(sequencer_, graph_out->AddNode(seq_def));
}
return absl::OkStatus();
}
void Encapsulator::Subgraph::ConnectSequencerToCallNode(Graph* graph_out) {
if (sequencer_ != nullptr) {
VLOG(2) << "ConnectSequencerToCallNode";
graph_out->AddControlEdge(sequencer_, call_node_,
true);
}
}
Status Encapsulator::Subgraph::BuildFunctionDef(
const string& name_in, const RewriteSubgraphFn& rewrite_subgraph_fn,
bool reuse_existing_functions, FunctionLibraryDefinition* library) {
string name = name_in;
call_node_def_.set_op(name);
call_node_def_.set_name(name);
call_node_def_.set_device(device_);
if (rewrite_subgraph_fn) {
std::vector<OutputTensor> arg_source_tensors(args_by_src_.size());
for (const auto& arg : args_by_src_) {
arg_source_tensors.at(arg.second) = arg.first;
}
std::vector<int> input_permutation(args_by_src_.size());
std::iota(input_permutation.begin(), input_permutation.end(), 0);
std::vector<int> output_permutation(results_.size());
std::iota(output_permutation.begin(), output_permutation.end(), 0);
TF_RETURN_IF_ERROR(
rewrite_subgraph_fn(arg_source_tensors, &graph_, &input_permutation,
&output_permutation, &call_node_def_));
if (input_permutation.size() != args_by_src_.size()) {
return errors::InvalidArgument("Input permutation has incorrect size.");
}
if (output_permutation.size() != results_.size()) {
return errors::InvalidArgument("Output permutation has incorrect size.");
}
for (auto& arg : args_by_src_) {
arg.second = input_permutation[arg.second];
}
for (auto& arg : args_by_dst_) {
arg.second = input_permutation[arg.second];
}
for (auto& result : results_) {
result.second = output_permutation[result.second];
}
name = call_node_def_.op();
}
function_def_name_ = name;
FunctionDef fdef;
auto lookup = [this](const Node* node) -> std::optional<string> {
if (control_output_nodes_.contains(node->name())) {
return std::make_optional(node->name());
}
return std::nullopt;
};
std::vector<ControlFlowInfo> dummy;
TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph_.get(), &dummy));
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph_, name, lookup, &fdef));
if (VLOG_IS_ON(1)) {
VLOG(2) << "Build function def " << name;
DumpGraphToFile(absl::StrCat("encapsulate_fdef_graph_", name), *graph_,
library);
DumpFunctionDefToFile(absl::StrCat("encapsulate_fdef_", name), fdef);
}
const FunctionDef* original_fdef = library->Find(name);
if (!reuse_existing_functions || original_fdef == nullptr) {
TF_RETURN_IF_ERROR(library->AddFunctionDef(fdef));
} else if (!FunctionDefsEqual(*original_fdef, fdef)) {
TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef));
}
return absl::OkStatus();
}
Status Encapsulator::Subgraph::ReplaceFunctionDef(
FunctionLibraryDefinition* library) {
const string& name = function_def_name_;
FunctionDef fdef;
TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph_, name, &fdef));
if (VLOG_IS_ON(1)) {
VLOG(2) << "Replace function def " << name;
DumpGraphToFile(absl::StrCat("replace_encapsulate_fdef_graph_", name),
*graph_, library);
DumpFunctionDefToFile(absl::StrCat("replace_encapsulate_fdef_", name),
fdef);
}
TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef));
return absl::OkStatus();
}
Status Encapsulator::Subgraph::AddFunctionCallNode(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out) {
TF_ASSIGN_OR_RETURN(call_node_, graph_out->AddNode(call_node_def_));
call_node_->set_assigned_device_name(device_);
return absl::OkStatus();
}
Status Encapsulator::GetFunctionNameAttr(Node const* node, string* attr) const {
AttrSlice attrs = node->attrs();
attr->clear();
for (const auto& node_attr : attrs) {
if (node_attr.first == group_attribute_) {
TF_RETURN_IF_ERROR(AttrValueHasType(node_attr.second, "string"));
*attr = node_attr.second.s();
break;
}
}
return absl::OkStatus();
}
bool IsInSubgraph(const string& func_id) { return !func_id.empty(); }
Status Encapsulator::CopySubgraphNodes(
absl::flat_hash_map<const Node*, Node*>* node_images) {
for (Node* node : graph_in_->op_nodes()) {
string func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(node, &func_id));
if (!IsInSubgraph(func_id)) continue;
Subgraph& subgraph = subgraphs_[func_id];
Node* image = subgraph.MakeNodeImage(graph_in_, node);
image->ClearAttr(group_attribute_);
(*node_images)[node] = image;
}
return absl::OkStatus();
}
Status Encapsulator::CopySubgraphEdges(
const absl::flat_hash_map<const Node*, Node*>& node_images,
std::vector<std::pair<const Node*, Node*>>* src_arg_pairs) {
for (const Edge* edge : graph_in_->edges()) {
string src_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->src(), &src_func_id));
string dst_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->dst(), &dst_func_id));
Node* src_image = gtl::FindWithDefault(node_images, edge->src(), nullptr);
Node* dst_image = gtl::FindWithDefault(node_images, edge->dst(), nullptr);
if (IsInSubgraph(src_func_id) && IsInSubgraph(dst_func_id) &&
src_func_id == dst_func_id) {
Graph* g = subgraphs_[src_func_id].GetGraph();
if (edge->IsControlEdge()) {
g->AddControlEdge(src_image, dst_image,
true);
} else {
g->AddEdge(src_image, edge->src_output(), dst_image, edge->dst_input());
}
continue;
}
if (IsInSubgraph(src_func_id)) {
if (!edge->IsControlEdge()) {
DataType dtype = edge->src()->output_type(edge->src_output());
if (IsRefType(dtype)) {
return errors::InvalidArgument(
"Ref Tensors (e.g., Variables) are not supported as results: "
"tensor ",
edge->src()->name(), ":", edge->src_output());
}
}
Subgraph& src_subgraph = subgraphs_[src_func_id];
if (edge->IsControlEdge()) {
TF_RETURN_IF_ERROR(src_subgraph.RecordControlResult(edge, node_images));
} else {
TF_RETURN_IF_ERROR(src_subgraph.RecordResult(edge, node_images));
}
}
if (IsInSubgraph(dst_func_id)) {
if (!edge->IsControlEdge()) {
DataType dtype = edge->dst()->input_type(edge->dst_input());
if (IsRefType(dtype)) {
return errors::InvalidArgument(
"Ref Tensors (e.g., Variables) are not supported as args: "
"tensor ",
edge->src()->name(), ":", edge->src_output());
}
}
Subgraph& dst_subgraph = subgraphs_[dst_func_id];
if (!edge->IsControlEdge()) {
TF_RETURN_IF_ERROR(
dst_subgraph.RecordArg(edge, node_images, src_arg_pairs));
}
}
}
return absl::OkStatus();
}
Status Encapsulator::SplitIntoSubgraphs(FunctionLibraryDefinition* library) {
Status s;
absl::flat_hash_map<const Node*, Node*> node_images;
std::vector<std::pair<const Node*, Node*>> src_arg_pairs;
TF_RETURN_IF_ERROR(CopySubgraphNodes(&node_images));
TF_RETURN_IF_ERROR(CopySubgraphEdges(node_images, &src_arg_pairs));
MarkGuaranteedConstants(*graph_in_, src_arg_pairs);
for (auto& entry : subgraphs_) {
Subgraph& subgraph = entry.second;
FixupSourceAndSinkEdges(subgraph.GetGraph());
}
if (VLOG_IS_ON(1)) {
for (auto& entry : subgraphs_) {
DumpGraphToFile(
absl::StrCat("encapsulate_subgraphs_subgraph_", entry.first),
*entry.second.GetGraph(), library);
}
}
return s;
}
Status Encapsulator::BuildFunctionDefs(
const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions,
FunctionLibraryDefinition* library) {
for (auto& subgraph_entry : subgraphs_) {
string name = subgraph_entry.first;
Subgraph& subgraph = subgraph_entry.second;
TF_RETURN_IF_ERROR(subgraph.BuildFunctionDef(
name, rewrite_subgraph_fn, reuse_existing_functions, library));
}
return absl::OkStatus();
}
Status Encapsulator::CopyNodesToOutputGraph(
Graph* graph_out, absl::flat_hash_map<const Node*, Node*>* node_images) {
for (Node* node : graph_in_->op_nodes()) {
string func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(node, &func_id));
if (IsInSubgraph(func_id)) continue;
Node* image = graph_out->CopyNode(node);
(*node_images)[node] = image;
}
(*node_images)[graph_in_->source_node()] = graph_out->source_node();
(*node_images)[graph_in_->sink_node()] = graph_out->sink_node();
return absl::OkStatus();
}
Status Encapsulator::AddFunctionCallNodes(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out) {
for (auto& subgraph_entry : subgraphs_) {
TF_RETURN_IF_ERROR(
subgraph_entry.second.AddFunctionCallNode(node_images, graph_out));
}
return absl::OkStatus();
}
Status Encapsulator::FindOutputImageOfEdgeSrc(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_src_node, Node** src_image) {
if (IsInSubgraph(src_func_id)) {
*src_image = subgraphs_.at(src_func_id).GetCallNode();
} else {
*src_image = node_images.at(original_src_node);
}
return absl::OkStatus();
}
int Encapsulator::FindOutputSlotOfEdgeSrc(const string& src_func_id,
const string& dst_func_id,
const Edge* edge) {
if (IsInSubgraph(src_func_id)) {
const Subgraph& src_subgraph = subgraphs_.at(src_func_id);
return src_subgraph.GetResultIndexForEdge(edge);
} else {
return edge->src_output();
}
}
Status Encapsulator::FindOutputImageOfEdgeDst(
const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
const Node* original_dst_node, Node** dst_image) {
if (IsInSubgraph(dst_func_id)) {
*dst_image = subgraphs_.at(dst_func_id).GetCallNode();
} else {
*dst_image = node_images.at(original_dst_node);
}
return absl::OkStatus();
}
int Encapsulator::FindOutputSlotOfEdgeDst(const string& src_func_id,
const string& dst_func_id,
const Edge* edge) {
if (IsInSubgraph(dst_func_id)) {
const Subgraph& dst_subgraph = subgraphs_.at(dst_func_id);
return dst_subgraph.GetArgIndexForEdge(edge);
} else {
return edge->dst_input();
}
}
Status Encapsulator::CopyEdgeToOutputGraph(
const Edge* edge, const string& src_func_id, const string& dst_func_id,
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out,
absl::flat_hash_set<std::pair<OutputTensor, InputTensor>,
OutputInputTensorPairHasher>* edges_added) {
Node* src_image;
TF_RETURN_IF_ERROR(FindOutputImageOfEdgeSrc(
src_func_id, dst_func_id, node_images, edge->src(), &src_image));
Node* dst_image;
TF_RETURN_IF_ERROR(FindOutputImageOfEdgeDst(
src_func_id, dst_func_id, node_images, edge->dst(), &dst_image));
if (edge->IsControlEdge()) {
if (edges_added
->emplace(OutputTensor(src_image, -1), InputTensor(dst_image, -1))
.second) {
graph_out->AddControlEdge(src_image, dst_image,
true);
}
return absl::OkStatus();
}
int src_output = FindOutputSlotOfEdgeSrc(src_func_id, dst_func_id, edge);
int dst_input = FindOutputSlotOfEdgeDst(src_func_id, dst_func_id, edge);
if (edges_added
->emplace(OutputTensor(src_image, src_output),
InputTensor(dst_image, dst_input))
.second) {
graph_out->AddEdge(src_image, src_output, dst_image, dst_input);
}
return absl::OkStatus();
}
Status Encapsulator::AddEdgesToOutputGraph(
const absl::flat_hash_map<const Node*, Node*>& node_images,
Graph* graph_out) {
absl::flat_hash_set<std::pair<OutputTensor, InputTensor>,
OutputInputTensorPairHasher>
edges_added;
for (const Edge* edge : graph_in_->edges()) {
string src_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->src(), &src_func_id));
string dst_func_id;
TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->dst(), &dst_func_id));
if (IsInSubgraph(src_func_id) && IsInSubgraph(dst_func_id) &&
src_func_id == dst_func_id) {
continue;
}
TF_RETURN_IF_ERROR(CopyEdgeToOutputGraph(
edge, src_func_id, dst_func_id, node_images, graph_out, &edges_added));
}
for (auto& subgraph_entry : subgraphs_) {
Subgraph& subgraph = subgraph_entry.second;
subgraph.ConnectSequencerToCallNode(graph_out);
}
return absl::OkStatus();
}
namespace {
Node* AddDummyShapedNode(const Node* src_node, int src_port,
const std::vector<ControlFlowInfo>& control_flow_info,
const TensorShapeProto& shape, Graph* graph_out) {
DataType data_type = src_node->output_type(src_port);
TensorProto dummy_proto;
dummy_proto.set_dtype(data_type);
*dummy_proto.mutable_tensor_shape() = shape;
GraphDefBuilder::Options options(graph_out, nullptr);
NodeBuilder node_builder(options.GetNameForOp("KnownShape"), "Const",
options.op_registry());
node_builder.Attr("dtype", data_type).Attr("value", dummy_proto);
Node* node = options.FinalizeBuilder(&node_builder);
while (!control_flow_info[src_node->id()].frame_name.empty()) {
NodeDebugInfo debug_info(*src_node);
NodeBuilder enter_builder(options.GetNameForOp("Enter"), "Enter",
options.op_registry(), &debug_info);
enter_builder.Attr("frame_name",
control_flow_info[src_node->id()].frame_name);
enter_builder.Attr("is_constant", true);
enter_builder.Input(node, 0);
Node* enter_node = options.FinalizeBuilder(&enter_builder);
node = enter_node;
src_node = control_flow_info[src_node->id()].parent_frame;
}
return node;
}
}
Status Encapsulator::MakePrunedGraphCopyAndInline(
const Graph& graph, const std::vector<Node*>& sink_nodes,
std::unique_ptr<Graph>* pruned_graph,
absl::flat_hash_map<const Node*, Node*>* node_images,
FunctionLibraryDefinition* library) {
pruned_graph->reset(new Graph(library));
(*pruned_graph)->set_versions(graph.versions());
ReverseDFSFrom(graph, sink_nodes,
nullptr,
[&](Node* n) {
if (!n->IsSource()) {
Node* copied = (*pruned_graph)->CopyNode(n);
node_images->emplace(n, copied);
}
});
for (auto entry : *node_images) {
const Node* orig = entry.first;
Node* image = entry.second;
for (const Edge* out_edge : orig->out_edges()) {
auto iter = node_images->find(out_edge->dst());
if (iter != node_images->end()) {
(*pruned_graph)
->AddEdge(image, out_edge->src_output(), iter->second,
out_edge->dst_input());
}
}
}
std::vector<Node*> function_nodes;
for (auto node : (*pruned_graph)->nodes()) {
const OpRegistrationData* op_reg_data;
TF_RETURN_IF_ERROR(library->LookUp(node->type_string(), &op_reg_data));
if (op_reg_data->is_function_op) {
function_nodes.push_back(node);
}
}
for (auto node : function_nodes) {
VLOG(2) << "Inlining function " << node->name();
const FunctionDef* fdef = library->Find(node->type_string());
if (fdef == nullptr) {
return errors::Internal("Failed to find function ", node->type_string(),
" in function library.");
}
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(*fdef, node->attrs(), library, &fbody));
InlineFunctionBodyOptions inline_opts;
TF_RETURN_IF_ERROR(InlineFunctionBody(*library, pruned_graph->get(), node,
fbody.get(), inline_opts));
}
return absl::OkStatus();
}
Status Encapsulator::BuildOutputGraph(Graph* graph_out,
FunctionLibraryDefinition* library) {
absl::flat_hash_map<const Node*, Node*> node_images;
TF_RETURN_IF_ERROR(CopyNodesToOutputGraph(graph_out, &node_images));
TF_RETURN_IF_ERROR(AddFunctionCallNodes(node_images, graph_out));
TF_RETURN_IF_ERROR(AddEdgesToOutputGraph(node_images, graph_out));
return absl::OkStatus();
}
}
Status EncapsulateSubgraphsInFunctions(
string group_attribute, const Graph& graph_in,
const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions,
std::unique_ptr<Graph>* graph_out, FunctionLibraryDefinition* library) {
Encapsulator encapsulator(std::move(group_attribute),
&graph_in);
TF_RETURN_IF_ERROR(encapsulator.SplitIntoSubgraphs(library));
TF_RETURN_IF_ERROR(encapsulator.BuildFunctionDefs(
rewrite_subgraph_fn, reuse_existing_functions, library));
std::unique_ptr<Graph> out(new Graph(library));
out->set_versions(graph_in.versions());
TF_RETURN_IF_ERROR(encapsulator.BuildOutputGraph(out.get(), library));
*graph_out = std::move(out);
return absl::OkStatus();
}
static Status GetArgTypes(const Graph& graph, DataTypeVector* types) {
for (Node* n : graph.op_nodes()) {
if (n->type_string() == kArgOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
const int num_types = types->size();
if (index < 0 || index >= num_types) {
return errors::InvalidArgument("Invalid argument number");
}
(*types)[index] = n->output_type(0);
}
}
return absl::OkStatus();
}
static Status RenumberArguments(Graph* graph,
const std::vector<int>& permutation) {
for (Node* n : graph->op_nodes()) {
if (n->type_string() == kArgOp) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index));
const int permutation_size = permutation.size();
if (index < 0 || index >= permutation_size) {
return errors::InvalidArgument("Invalid argument number");
}
n->AddAttr("index", permutation[index]);
}
}
return absl::OkStatus();
}
Status EncapsulateSubgraphsPass::Run(
const GraphOptimizationPassOptions& options) {
VLOG(1) << "EncapsulateSubgraphsPass::Run";
if (VLOG_IS_ON(1)) {
DumpGraphToFile("encapsulate_subgraphs_before", **options.graph,
options.flib_def);
}
for (Node* n : (*options.graph)->nodes()) {
if (n->type_string() == "TPUExecute" ||
n->type_string() == "TPUExecuteAndUpdateVariables") {
return absl::OkStatus();
}
}
std::unique_ptr<Graph> graph_out;
FunctionLibraryDefinition* const library = options.flib_def;
SessionOptions session_options;
auto* device_count = session_options.config.mutable_device_count();
device_count->insert({"CPU", 1});
std::vector<std::unique_ptr<Device>> devices;
DeviceFactory* cpu_factory = DeviceFactory::GetFactory("CPU");
if (!cpu_factory) {
return errors::NotFound(
"CPU Factory not registered. Can't run EncapsulateSubgraphsPass");
}
TF_RETURN_IF_ERROR(cpu_factory->CreateDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
if (devices.empty()) {
return errors::NotFound(
"Failed to create a CPU device for EncapsulateSubgraphsPass");
}
std::unique_ptr<DeviceMgr> device_mgr =
std::make_unique<StaticDeviceMgr>(std::move(devices));
const auto* config = &options.session_options->config;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(
device_mgr.get(), options.session_options->env,
config, TF_GRAPH_DEF_VERSION, library,
config->graph_options().optimizer_options()));
FunctionLibraryRuntime* flr =
pflr->GetFLR("/job:localhost/replica:0/task:0/device:CPU:0");
if (flr == nullptr) {
return errors::Internal(
"Failed to create and retrieve function library runtime to run "
"constant folding");
}
auto rewrite_subgraph =
[flr](const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* subgraph,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation, NodeDef* node) {
bool disable_constant_folding =
GetBuildXlaOpsPassFlags()->tf_xla_disable_constant_folding;
auto cf_consider_fn = [disable_constant_folding](const Node* n) {
if (disable_constant_folding) return false;
for (const auto& output_arg : n->op_def().output_arg()) {
if (output_arg.type() == DT_VARIANT) {
return false;
}
}
return true;
};
GraphOptimizer::Options graph_optimizer_options;
graph_optimizer_options.cf_consider_fn = cf_consider_fn;
OptimizeGraph(flr, subgraph, graph_optimizer_options);
const int num_args = input_permutation->size();
std::vector<bool> const_args(num_args);
TF_RETURN_IF_ERROR(
BackwardsConstAnalysis(**subgraph, &const_args,
nullptr, flr));
DataTypeVector arg_types(num_args);
TF_RETURN_IF_ERROR(GetArgTypes(**subgraph, &arg_types));
const int num_consts =
std::count(const_args.begin(), const_args.end(), true);
const int num_resources =
std::count(arg_types.begin(), arg_types.end(), DT_RESOURCE);
const int num_nonconsts = num_args - num_resources - num_consts;
if (num_nonconsts < 0) {
return errors::Internal("num_nonconsts should be >= 0, was ",
num_nonconsts);
}
int const_pos = 0;
int arg_pos = num_consts;
int resource_pos = num_consts + num_nonconsts;
for (int i = 0; i < num_args; ++i) {
if (const_args[i]) {
if (arg_types[i] == DT_RESOURCE) {
return errors::Internal(
"Resource arguments cannot be constant (argument ", i, ")");
}
(*input_permutation)[i] = const_pos;
++const_pos;
} else if (arg_types[i] == DT_RESOURCE) {
(*input_permutation)[i] = resource_pos;
++resource_pos;
} else {
(*input_permutation)[i] = arg_pos;
++arg_pos;
}
}
TF_RETURN_IF_ERROR(
RenumberArguments(subgraph->get(), *input_permutation));
AddNodeAttr(kXlaCompiledKernelAttr, true, node);
AddNodeAttr(kXlaNumConstantArgsAttr, num_consts, node);
AddNodeAttr(kXlaNumResourceArgsAttr, num_resources, node);
return absl::OkStatus();
};
TF_RETURN_WITH_CONTEXT_IF_ERROR(
EncapsulateSubgraphsInFunctions(
kXlaClusterAttr, **options.graph, rewrite_subgraph,
false, &graph_out, library),
"EncapsulateSubgraphsPass failed");
if (VLOG_IS_ON(1)) {
DumpGraphToFile("encapsulate_subgraphs_after", *graph_out,
options.flib_def);
}
*options.graph = std::move(graph_out);
TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Node*> ref_related_nodes,
GetNodesRelatedToRefVariables(**options.graph, flr));
for (Node* node : (*options.graph)->nodes()) {
bool has_ref_vars = ref_related_nodes.contains(node);
node->AddAttr(kXlaHasReferenceVarsAttr, has_ref_vars);
VLOG(3) << "Has ref vars = " << has_ref_vars
<< ", node: " << node->def().DebugString();
}
return absl::OkStatus();
}
bool IsXlaCompiledKernel(const Node& node) {
bool is_compiled = false;
bool has_compilation_attr =
TryGetNodeAttr(node.attrs(), kXlaCompiledKernelAttr, &is_compiled) &&
is_compiled;
return has_compilation_attr ? is_compiled : false;
}
} | #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include <memory>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/encapsulate_util.h"
#include "tensorflow/compiler/jit/extract_outside_compilation_pass.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/compiler/tf2xla/side_effect_util.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/equal_graph_def.h"
namespace tensorflow {
namespace {
const char* const kXlaHostTransferSequencerAttr =
"_xla_host_transfer_sequencer";
Status AddGraphDefToFunctionLibrary(const GraphDefBuilder& graphdef_builder,
const string& name_suffix,
FunctionDefLibrary* library) {
GraphDef graphdef;
TF_RETURN_IF_ERROR(graphdef_builder.ToGraphDef(&graphdef));
std::unique_ptr<Graph> graph =
std::unique_ptr<Graph>(new Graph(OpRegistry::Global()));
GraphConstructorOptions opts;
opts.allow_internal_ops = true;
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, graphdef, graph.get()));
FunctionDef* fdef = library->add_function();
TF_RETURN_IF_ERROR(GraphToFunctionDef(
*graph,
absl::StrCat("_outside_compilation_shape_inference_", name_suffix),
fdef));
return absl::OkStatus();
}
template <class Tkey, class Tvalue>
bool EqualProtoMap(const ::tensorflow::protobuf::Map<Tkey, Tvalue>& a,
const ::tensorflow::protobuf::Map<Tkey, Tvalue>& b,
const std::function<string(const Tkey&)>& key_to_string,
const std::function<string(const Tvalue&)>& value_to_string,
const std::function<bool(const Tkey&, const Tvalue&,
const Tvalue&)>& compare,
const string& map_name, string* diff) {
for (const auto& elt_a : a) {
const auto iter = b.find(elt_a.first);
if (iter == b.end()) {
if (diff) {
*diff = absl::StrCat(map_name, " expected: contains element with key '",
key_to_string(elt_a.first),
"' got: map has no such element");
}
return false;
}
if (!compare(elt_a.first, elt_a.second, iter->second)) {
if (diff) {
*diff = absl::StrCat(map_name, " expected: element with key '",
key_to_string(elt_a.first), "' has value '",
value_to_string(elt_a.second), "' got: '",
value_to_string(iter->second), "'");
}
return false;
}
}
for (const auto& elt_b : b) {
const auto iter = a.find(elt_b.first);
if (iter == a.end()) {
if (diff) {
*diff = absl::StrCat(map_name, " got: contains element with key '",
key_to_string(elt_b.first),
"' expected: map has no such element");
}
return false;
}
}
return true;
}
bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b,
const string& diff_preamble, string* diff) {
if (a.op() != b.op()) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
", expected op '", a.op(), "' got '", b.op());
}
return false;
}
if (a.device() != b.device()) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
", expected device '", a.device(), "' got '",
b.device());
}
return false;
}
if (a.input_size() != b.input_size()) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
", expected ", a.input_size(), " inputs got ",
b.input_size(), " expected:\n", a.DebugString(),
"\ngot:\n", b.DebugString());
}
return false;
}
std::unordered_set<string> control_input_a;
std::unordered_set<string> control_input_b;
for (int i = 0; i < a.input_size(); ++i) {
if (absl::StartsWith(a.input(i), "^")) {
if (!absl::StartsWith(b.input(i), "^")) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
" input ", i, ", expected control input ",
a.input(i), " got ", b.input(i), " expected:\n",
a.DebugString(), "\ngot:\n", b.DebugString());
}
return false;
}
control_input_a.insert(a.input(i));
control_input_b.insert(b.input(i));
} else if (a.input(i) != b.input(i)) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
" input ", i, ", expected ", a.input(i), " got ",
b.input(i), " expected:\n", a.DebugString(),
"\ngot:\n", b.DebugString());
}
return false;
}
}
if (control_input_a != control_input_b) {
if (diff) {
*diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(),
" control inputs differ expected:\n",
a.DebugString(), "\ngot:\n", b.DebugString());
}
return false;
}
return EqualProtoMap<string, AttrValue>(
a.attr(), b.attr(), [](const string& s) { return s; },
[](const AttrValue& v) { return v.DebugString(); },
[](const string& key, const AttrValue& av, const AttrValue& bv) {
if (key == "ancestors") {
std::unordered_set<string> a_set(av.list().s().begin(),
av.list().s().end());
std::unordered_set<string> b_set(bv.list().s().begin(),
bv.list().s().end());
return a_set == b_set;
} else {
return av.DebugString() == bv.DebugString();
}
},
absl::StrCat(diff_preamble, " attr mismatch for node ", a.name()), diff);
}
bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b,
string* diff) {
if (a.signature().DebugString() != b.signature().DebugString()) {
if (diff) {
*diff =
absl::StrCat("Signature mismatch for function ", a.signature().name(),
", expected:\n", a.signature().DebugString(), "\ngot:\n",
b.signature().DebugString());
}
return false;
}
if (!EqualProtoMap<string, AttrValue>(
a.attr(), b.attr(), [](const string& s) { return s; },
[](const AttrValue& v) { return v.DebugString(); },
[](const string& key, const AttrValue& av, const AttrValue& bv) {
return av.DebugString() == bv.DebugString();
},
absl::StrCat("attr mismatch for function ", a.signature().name()),
diff)) {
return false;
}
if (!EqualProtoMap<string, string>(
a.ret(), b.ret(), [](const string& s) { return s; },
[](const string& s) { return s; },
[](const string& key, const string& av, const string& bv) {
return av == bv;
},
absl::StrCat("ret mismatch for function ", a.signature().name()),
diff)) {
return false;
}
for (int i = 0; i < a.node_def_size(); ++i) {
bool found = false;
for (int j = 0; j < b.node_def_size(); ++j) {
if (a.node_def(i).name() == b.node_def(j).name()) {
if (!EqualFunctionNodeDef(
a.node_def(i), b.node_def(j),
absl::StrCat("Function ", a.signature().name()), diff)) {
return false;
}
found = true;
break;
}
}
if (!found) {
if (diff) {
*diff = absl::StrCat("Function ", a.signature().name(),
", expected: has node '", a.node_def(i).name(),
"' got: no node of that name");
}
return false;
}
}
for (int i = 0; i < b.node_def_size(); ++i) {
bool found = false;
for (int j = 0; j < a.node_def_size(); ++j) {
if (b.node_def(i).name() == a.node_def(j).name()) {
found = true;
break;
}
}
if (!found) {
if (diff) {
*diff = absl::StrCat("Function ", a.signature().name(),
", got: has node '", b.node_def(i).name(),
"' expected: no node of that name");
}
return false;
}
}
return true;
}
bool EqualFunctionDefLibrary(const FunctionDefLibrary& expected,
const FunctionDefLibrary& actual, string* diff) {
std::unordered_map<string, const FunctionDef*> actual_index;
for (const FunctionDef& function : actual.function()) {
actual_index[function.signature().name()] = &function;
}
for (const FunctionDef& expected_function : expected.function()) {
auto it = actual_index.find(expected_function.signature().name());
if (it == actual_index.end()) {
if (diff) {
*diff = absl::StrCat("Did not find expected function '",
expected_function.signature().name(), "'");
}
return false;
}
if (!EqualFunctionDef(expected_function, *it->second, diff)) return false;
actual_index.erase(it);
}
if (!actual_index.empty()) {
if (diff != nullptr) {
*diff =
absl::StrCat("Found unexpected function '",
actual_index.begin()->second->signature().name(), "'");
}
return false;
}
return true;
}
#define TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(expected, actual) \
do { \
string diff; \
EXPECT_TRUE(EqualFunctionDefLibrary(expected, actual, &diff)) \
<< diff << "\nActual: " << actual.DebugString(); \
} while (false)
REGISTER_OP("InputTest")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
REGISTER_OP("InputTestShaped")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
});
REGISTER_OP("UnaryTest")
.Input("a: float")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
::tensorflow::shape_inference::ShapeHandle o;
TF_RETURN_IF_ERROR(c->Merge(c->UnknownShape(), c->input(0), &o));
c->set_output(0, o);
return absl::OkStatus();
});
REGISTER_OP("BinaryTest")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
::tensorflow::shape_inference::ShapeHandle o;
TF_RETURN_IF_ERROR(c->Merge(c->UnknownShape(), c->input(0), &o));
c->set_output(0, o);
return absl::OkStatus();
});
REGISTER_OP("BinaryTest2")
.Input("a: float")
.Input("b: float")
.Output("o: float")
.SetShapeFn(::tensorflow::shape_inference::UnknownShape);
REGISTER_OP("AddNLikeTest")
.Input("inputs: N * T")
.Output("sum: T")
.Attr("N: int >= 1")
.Attr("T: numbertype")
.SetIsCommutative()
.SetIsAggregate();
Node* Sequencer(const GraphDefBuilder::Options& opts,
const string& call_node_name) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("NoOp"), "NoOp",
opts.op_registry());
return opts.WithAttr(kXlaHostTransferSequencerAttr, call_node_name)
.FinalizeBuilder(&node_builder);
}
Node* Input(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("InputTest", opts);
}
Node* InputShaped(const GraphDefBuilder::Options& opts) {
return ops::SourceOp("InputTestShaped", opts);
}
Node* KnownShapeBase(DataType dtype, absl::Span<const int> shape,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("Const"), "Const",
opts.op_registry());
TensorProto value;
value.set_dtype(dtype);
for (int dim : shape) {
value.mutable_tensor_shape()->add_dim()->set_size(dim);
}
return opts.WithAttr("value", value)
.WithAttr("dtype", dtype)
.FinalizeBuilder(&node_builder);
}
Node* KnownShape(absl::Span<const int> shape,
const GraphDefBuilder::Options& opts) {
return KnownShapeBase(DT_FLOAT, shape, opts);
}
Node* KeyPlaceholderShape(const GraphDefBuilder::Options& opts) {
return KnownShapeBase(DT_STRING, {2}, opts);
}
Node* KeyPlaceholder(const string& call_node,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(absl::StrCat(call_node, "_key_placeholder"),
"Placeholder", opts.op_registry());
TensorShapeProto shape;
shape.add_dim()->set_size(2);
return opts.WithAttr("shape", shape)
.WithAttr("dtype", DT_STRING)
.WithAttr("_host_compute_call_node", call_node)
.FinalizeBuilder(&node_builder);
}
Node* RecvAtHost(ops::NodeOut key_input, const string& cluster,
const string& new_func_name, const string& oc_cluster,
absl::Span<const DataType> dtypes,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
string key = absl::StrCat("host_compute_channel_", cluster, "_",
new_func_name, "_", oc_cluster);
string name = absl::StrCat("outside_compilation_", cluster, "_",
new_func_name, "_", oc_cluster, "_recv");
NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaRecvAtHost"),
"_XlaRecvAtHost", opts.op_registry());
node_builder.Input(std::move(key_input));
return opts.WithAttr("Toutputs", dtypes)
.WithAttr("key", key)
.WithAttr("device_ordinal", 0)
.WithAttr("_encapsulate", cluster)
.WithAttr("_outside", oc_cluster)
.FinalizeBuilder(&node_builder);
}
Node* SendFromHost(ops::NodeOut key_input, const string& cluster,
const string& new_func_name, const string& oc_cluster,
const std::vector<ops::NodeOut>& inputs,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
string key = absl::StrCat("host_compute_channel_", cluster, "_",
new_func_name, "_", oc_cluster);
string name = absl::StrCat("outside_compilation_", cluster, "_",
new_func_name, "_", oc_cluster, "_send");
NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaSendFromHost"),
"_XlaSendFromHost", opts.op_registry());
node_builder.Input(inputs);
node_builder.Input(std::move(key_input));
std::vector<DataType> dtypes;
for (const auto& node : inputs) {
dtypes.push_back(node.dt);
}
return opts.WithAttr("Tinputs", dtypes)
.WithAttr("key", key)
.WithAttr("device_ordinal", 0)
.WithAttr("_encapsulate", cluster)
.WithAttr("_outside", oc_cluster)
.FinalizeBuilder(&node_builder);
}
Node* Unary(ops::NodeOut a, const GraphDefBuilder::Options& opts) {
return ops::UnaryOp("UnaryTest", std::move(a), opts);
}
Node* Binary(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("BinaryTest", std::move(a), std::move(b), opts);
}
Node* BinaryUnknownShape(ops::NodeOut a, ops::NodeOut b,
const GraphDefBuilder::Options& opts) {
return ops::BinaryOp("BinaryTest2", std::move(a), std::move(b), opts);
}
Node* AddNLike(const std::vector<ops::NodeOut>& inputs,
const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("AddN"), "AddNLikeTest",
opts.op_registry());
node_builder.Input(inputs);
return opts.FinalizeBuilder(&node_builder);
}
Node* ArgOp(int index, DataType type, const GraphDefBuilder::Options& opts) {
return ops::SourceOp("_Arg",
opts.WithAttr("T", type).WithAttr("index", index));
}
Node* RetOp(int index, ops::NodeOut a, const GraphDefBuilder::Options& opts) {
if (opts.HaveError()) return nullptr;
NodeBuilder node_builder(opts.GetNameForOp("Retval"), "_Retval",
opts.op_registry());
node_builder.Input(std::move(a)).Attr("index", index);
return opts.FinalizeBuilder(&node_builder);
}
Status Encapsulate(GraphDef* graphdef, FunctionDefLibrary* library,
const std::vector<string>& encapsulated_functions) {
Status s;
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), *library));
GraphConstructorOptions options;
options.allow_internal_ops = true;
std::unique_ptr<Graph> graph(new Graph(lib_def.get()));
s = ConvertGraphDefToGraph(options, *graphdef, graph.get());
if (!s.ok()) return s;
s = PerformStaticShapeInferenceBeforeEncapsulation(graph.get());
if (!s.ok()) return s;
SessionOptions session_options;
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::AddDevices(
session_options, "/job:localhost/replica:0/task:0", &devices));
OptimizerOptions opts;
auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
device_mgr.get(), Env::Default(), nullptr,
TF_GRAPH_DEF_VERSION, lib_def.get(), opts,
nullptr, nullptr);
auto flr = pflr->GetFLR("/job:localhost/replica:0/task:0/cpu:0");
std::unique_ptr<Graph> graph_out;
s = EncapsulateSubgraphsInFunctions("_encapsulate", *graph,
{},
false,
&graph_out, lib_def.get());
if (!s.ok()) return s;
std::unordered_map<string, XlaClusterInfo> clusters;
for (const auto& func : encapsulated_functions) {
Node* xla_computation_node;
for (Node* n : graph_out->nodes()) {
if (n->name() == func) {
xla_computation_node = n;
}
}
if (!xla_computation_node) {
return errors::Internal("Cannot find node ", func);
}
NameAttrList func_name_attrs;
func_name_attrs.set_name(func);
clusters.emplace(func,
XlaClusterInfo{func, func_name_attrs, xla_computation_node,
std::map<string, int>{}});
}
bool modified;
s = ExtractOutsideCompilation("_encapsulate", "_outside", clusters,
graph_out.get(), flr, lib_def.get(), &modified);
if (!s.ok()) return s;
GraphDef graphdef_out;
graph_out->ToGraphDef(&graphdef_out);
graphdef->Swap(&graphdef_out);
*library = lib_def->ToProto();
for (FunctionDef& fdef : *library->mutable_function()) {
for (NodeDef& node_def : *fdef.mutable_node_def()) {
node_def.mutable_attr()->erase("_xla_inferred_shapes");
}
}
return s;
}
Status Encapsulate(GraphDef* graphdef, FunctionDefLibrary* library) {
std::vector<string> encapsulated_functions;
return Encapsulate(graphdef, library, encapsulated_functions);
}
TEST(EncapsulateSubgraphsTest, NoFunctions) {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = Input(builder.opts().WithName("A"));
Node* b = Input(builder.opts().WithName("B"));
Node* c = Unary(a, builder.opts().WithName("C"));
Binary(b, c, builder.opts().WithName("D"));
GraphDef graphdef_in;
FunctionDefLibrary library_in;
TF_EXPECT_OK(builder.ToGraphDef(&graphdef_in));
*library_in.add_function() = test::function::XTimesTwo();
GraphDef graphdef_out = graphdef_in;
FunctionDefLibrary library_out = library_in;
TF_EXPECT_OK(Encapsulate(&graphdef_out, &library_out));
TF_EXPECT_GRAPH_EQ(graphdef_in, graphdef_out);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_in, library_out);
}
TEST(EncapsulateSubgraphsTest, OneFunction) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d = Binary(b, c,
b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1"));
Binary(a, d, b1.opts().WithName("E"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
TF_EXPECT_OK(Encapsulate(&graphdef, &library));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"c_0_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"c"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}, {"C"}},
},
{{"c_0_retval", "c:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(a).Input(b);
Node* call = b2.opts().FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("E"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, TwoFunctions) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* control = Input(b1.opts().WithName("Control"));
Node* c =
Unary(a, b1.opts().WithName("C").WithControlInput(control).WithAttr(
"_encapsulate", "F1"));
Node* d = Binary(b, c,
b1.opts().WithName("D").WithControlInput(control).WithAttr(
"_encapsulate", "F2"));
Binary(a, d, b1.opts().WithName("E"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
TF_EXPECT_OK(Encapsulate(&graphdef, &library));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float"}, {"c_0_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
},
{{"c_0_retval", "C:o:0"}});
*library_expected.add_function() = FunctionDefHelper::Create(
"F2", {"b_0_arg:float", "c_0_arg:float"}, {"d_0_retval:float"}, {},
{
{{"D"}, "BinaryTest", {"b_0_arg", "c_0_arg"}},
},
{{"d_0_retval", "D:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* control = Input(b2.opts().WithName("Control"));
NodeBuilder nb("F1", "F1", lib_def.get());
nb.Input(a).ControlInput(control);
Node* call1 = b2.opts().FinalizeBuilder(&nb);
NodeBuilder nb2("F2", "F2", lib_def.get());
nb2.Input(b).Input(call1).ControlInput(control);
Node* call2 = b2.opts().FinalizeBuilder(&nb2);
Binary(a, call2, b2.opts().WithName("E"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
std::vector<string> GraphNodes(const Graph& graph) {
std::vector<string> nodes;
for (const auto& node : graph.nodes()) {
if (!node->IsSource() && !node->IsSink()) {
nodes.push_back(node->name());
}
}
std::sort(nodes.begin(), nodes.end());
return nodes;
}
std::vector<std::pair<string, string>> GraphEdges(const Graph& graph) {
std::vector<std::pair<string, string>> edges;
for (const Edge* edge : graph.edges()) {
if (edge->src()->IsSource() || edge->dst()->IsSink()) continue;
edges.emplace_back(
absl::StrCat(edge->src()->name(), ":", edge->src_output()),
absl::StrCat(edge->dst()->name(), ":", edge->dst_input()));
}
std::sort(edges.begin(), edges.end());
return edges;
}
TEST(EncapsulateSubgraphsTest, InputDeduplication) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT);
auto add1 = ops::Add(root.WithOpName("add1"), x, x);
add1.node()->AddAttr("_cluster", "cluster1");
auto add2 = ops::Add(root.WithOpName("add2"), add1, add1);
add2.node()->AddAttr("_cluster", "cluster2");
auto out = ops::Mul(root.WithOpName("mul"), add1, add2);
Graph graph_before_encapsulation(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph_before_encapsulation));
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(EncapsulateSubgraphsInFunctions(
"_cluster", graph_before_encapsulation,
{},
false, &graph, &library));
std::vector<string> expected_nodes = {"cluster1", "cluster2", "mul", "x"};
EXPECT_EQ(expected_nodes, GraphNodes(*graph));
std::vector<std::pair<string, string>> expected_edges = {
{"cluster1:0", "cluster2:0"},
{"cluster1:0", "mul:0"},
{"cluster2:0", "mul:1"},
{"x:0", "cluster1:0"}};
EXPECT_EQ(expected_edges, GraphEdges(*graph));
}
const Node* FindNodeByName(const Graph& graph, const string& name) {
for (const Node* node : graph.nodes()) {
if (node->name() == name) return node;
}
return nullptr;
}
bool HasGuaranteeConstAttr(const Node& n) {
bool is_guaranteed_constant = false;
if (!GetNodeAttr(n.attrs(), "_is_guaranteed_constant",
&is_guaranteed_constant)
.ok()) {
return false;
}
return is_guaranteed_constant;
}
TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Simple) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT);
auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT);
auto const_guarantee_x2 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2);
auto const_guarantee_x1 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1);
auto add1 =
ops::Add(root.WithOpName("add1"), const_guarantee_x1, const_guarantee_x2);
add1.node()->AddAttr("_encapsulate", "encapsulate1");
Graph graph_before(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph_before));
std::unique_ptr<Graph> graph_after;
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
int guaranteed_consts = 0;
TF_ASSERT_OK(EncapsulateSubgraphsInFunctions(
"_encapsulate", graph_before,
[&guaranteed_consts](const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
Graph* graph = graph_ptr->get();
for (const Node* n : graph->nodes()) {
if (n->type_string() == "_Arg" &&
absl::StartsWith(n->name(), "const")) {
++guaranteed_consts;
EXPECT_TRUE(HasGuaranteeConstAttr(*n));
} else {
EXPECT_FALSE(HasGuaranteeConstAttr(*n));
}
}
return absl::OkStatus();
},
false, &graph_after, &library));
EXPECT_EQ(2, guaranteed_consts);
}
TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Add) {
Scope root = Scope::NewRootScope().ExitOnError().WithDevice(
"/job:localhost/replica:0/task:0/cpu:0");
auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT);
auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT);
auto const_guarantee_x1 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1);
auto const_guarantee_x2 =
ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2);
auto const_guarantee_add1 = ops::Add(root.WithOpName("const_guarantee_add1"),
const_guarantee_x1, const_guarantee_x2);
auto add2 = ops::Add(root.WithOpName("add2"), const_guarantee_x1, x2);
auto mul1 = ops::Mul(root.WithOpName("mul1"), const_guarantee_add1, add2);
mul1.node()->AddAttr("_encapsulate", "encapsulate1");
Graph graph_before(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(&graph_before));
std::unique_ptr<Graph> graph_after;
FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary());
int guaranteed_consts = 0;
TF_ASSERT_OK(EncapsulateSubgraphsInFunctions(
"_encapsulate", graph_before,
[&guaranteed_consts](const std::vector<OutputTensor>& arg_source_tensors,
std::unique_ptr<Graph>* graph_ptr,
std::vector<int>* input_permutation,
std::vector<int>* output_permutation,
NodeDef* call_def) {
Graph* graph = graph_ptr->get();
for (const Node* n : graph->nodes()) {
if (n->type_string() == "_Arg" &&
absl::StartsWith(n->name(), "const")) {
++guaranteed_consts;
EXPECT_TRUE(HasGuaranteeConstAttr(*n));
} else {
EXPECT_FALSE(HasGuaranteeConstAttr(*n));
}
}
return absl::OkStatus();
},
false, &graph_after, &library));
EXPECT_EQ(1, guaranteed_consts);
}
TEST(EncapsulateSubgraphsTest, OneFunctionOneOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d = Binary(b, c,
b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Binary(a, f, b1.opts().WithName("G").WithControlInput(e));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape.opts());
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv, 0), ops::NodeOut(recv, 1),
shape.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"c"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}, {"C"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "c:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"c"}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv, 0), ops::NodeOut(recv, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv, send}),
"F1");
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(a).Input(b);
Node* call =
b2.opts().WithControlInputs({s, b}).FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("G").WithControlInputs({call}));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OneFunctionTwoOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Node* g = Binary(e, f,
b1.opts()
.WithName("G")
.WithControlInputs({e, f})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* h = Binary(d, e,
b1.opts()
.WithName("H")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* i = Unary(h, b1.opts().WithName("I").WithAttr("_encapsulate", "F1"));
Binary(g, i, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv, 0), ops::NodeOut(recv, 1),
shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
{
GraphDefBuilder shape2(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape2.opts());
Node* recv1 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
shape2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* recv2 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT, DT_FLOAT},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Binary(e, ops::NodeOut(recv2, 0),
shape2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* h = Binary(ops::NodeOut(recv2, 1), e,
shape2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g, h},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape2, "F1_F1_O2", &library_expected));
}
NameAttrList shape_inference_graph1, shape_inference_graph2;
shape_inference_graph1.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
shape_inference_graph2.set_name(
"_outside_compilation_shape_inference_F1_F1_O2");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"g_0_retval_retval:float", "i_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}},
{{"I"},
"UnaryTest",
{"outside_compilation_O2_host_compute:outputs:1"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"F:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph2},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name",
"outside_compilation_O2_host_compute"}},
{"F", "outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph1},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"g_0_retval_retval", "outside_compilation_O2_host_compute:outputs:0"},
{"i_0_retval_retval", "I:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv1})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* recv2 = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Binary(e, ops::NodeOut(recv2, 0),
b2.opts()
.WithName("G")
.WithControlInputs({recv2, e})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* h = Binary(ops::NodeOut(recv2, 1), e,
b2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
Node* send2 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g, h},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, send1, recv2, send2}),
"F1");
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(a).Input(b);
Node* call =
b2.opts().WithControlInputs({s, b}).FinalizeBuilder(&node_builder);
Binary(ops::NodeOut(call, 0), ops::NodeOut(call, 1),
b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = InputShaped(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Node* g = Binary(e, f,
b1.opts().WithName("G").WithControlInputs({e, f}).WithAttr(
"_encapsulate", "F2"));
Node* h = Binary(d, g,
b1.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* i =
Binary(f, h, b1.opts().WithName("I").WithAttr("_encapsulate", "F2"));
Binary(g, i, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1", "F2"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float",
"d_0_retval_retval:float"},
{},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"d_0_retval_retval", "D:o:0"},
{"f_0_retval_retval", "F:o:0"}});
*library_expected.add_function() = FunctionDefHelper::Create(
"F2", {"e_0_arg:float", "f_0_arg:float", "d_0_arg:float"},
{"g_0_retval_retval:float", "i_0_retval_retval:float"}, {},
{
{{"G"}, "BinaryTest", {"e_0_arg", "f_0_arg"}},
{{"I"},
"BinaryTest",
{"f_0_arg", "outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"d_0_arg", "G:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F2_F2_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"g_0_retval_retval", "G:o:0"}, {"i_0_retval_retval", "I:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = InputShaped(b2.opts().WithName("B"));
Node* key_constant1 =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(
ops::NodeOut(key_constant1, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv1})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 =
SendFromHost(ops::NodeOut(key_constant1, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInputs({s1, b}).FinalizeBuilder(&node_builder1);
Node* key_constant2 =
KeyPlaceholder("F2", b2.opts().WithName("F2_key_placeholder"));
Node* recv2 = RecvAtHost(
ops::NodeOut(key_constant2, 0), "F2", "F2", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* h = Binary(recv2, ops::NodeOut(recv2, 1),
b2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* send2 =
SendFromHost(ops::NodeOut(key_constant2, 0), "F2", "F2", "O1", {h},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s2 = Sequencer(
b2.opts().WithName("F2_sequencer").WithControlInputs({recv2, send2}),
"F2");
NodeBuilder node_builder2("F2", "F2", lib_def.get());
node_builder2.Input(call1)
.Input(ops::NodeOut(call1, 1))
.Input(ops::NodeOut(call1, 2));
Node* call2 = b2.opts()
.WithControlInputs({s2, call1})
.FinalizeBuilder(&node_builder2);
Binary(call2, ops::NodeOut(call2, 1), b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, TwoFunctionsTwoOutsideDependencyFromOutside) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = InputShaped(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Binary(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Node* g =
Binary(a, b, b1.opts().WithName("G").WithAttr("_encapsulate", "F2"));
Node* h = Unary(g, b1.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* i = Unary(h, b1.opts().WithName("I").WithAttr("_encapsulate", "F2"));
Binary(f, i, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1", "F2"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"C:o:0", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"C:o:0", "D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"f_0_retval_retval", "F:o:0"}});
*library_expected.add_function() = FunctionDefHelper::Create(
"F2", {"a_0_arg:float", "b_0_arg:float"}, {"i_0_retval_retval:float"}, {},
{
{{"G"}, "BinaryTest", {"a_0_arg", "b_0_arg"}},
{{"I"},
"UnaryTest",
{"outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"G:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F2_F2_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"i_0_retval_retval", "I:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = InputShaped(b2.opts().WithName("B"));
Node* key_constant1 =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant1, 0), "F1", "F1", "O1",
{DT_FLOAT, DT_FLOAT}, b2.opts());
Node* e = Binary(ops::NodeOut(recv1, 0), ops::NodeOut(recv1, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv1})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant1, 0), "F1", "F1", "O1",
{e}, b2.opts().WithControlInput(e));
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInputs({s1, b}).FinalizeBuilder(&node_builder1);
Node* key_constant2 =
KeyPlaceholder("F2", b2.opts().WithName("F2_key_placeholder"));
Node* recv2 = RecvAtHost(ops::NodeOut(key_constant2, 0), "F2", "F2", "O1",
{DT_FLOAT}, b2.opts());
Node* h = Unary(recv2, b2.opts()
.WithName("H")
.WithAttr("_encapsulate", "F2")
.WithAttr("_outside", "O1"));
Node* send2 = SendFromHost(ops::NodeOut(key_constant2, 0), "F2", "F2", "O1",
{h}, b2.opts());
Node* s2 = Sequencer(
b2.opts().WithName("F2_sequencer").WithControlInputs({recv2, send2}),
"F2");
NodeBuilder node_builder2("F2", "F2", lib_def.get());
node_builder2.Input(a).Input(b);
Node* call2 =
b2.opts().WithControlInputs({s2}).FinalizeBuilder(&node_builder2);
Binary(call1, call2, b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationNoInputs) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f =
Binary(d, e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Unary(f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"D:o:0", "outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts());
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({send1, recv1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Unary(call1, b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationControlInput) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithControlInput(d)
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f =
Binary(d, e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Unary(f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
TensorShapeProto shape_proto_expected;
shape_proto_expected.add_dim()->set_size(2);
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"BinaryTest",
{"D:o:0", "outside_compilation_O1_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes",
absl::Span<const TensorShapeProto>({shape_proto_expected})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"D"}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithControlInput(recv1)
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts());
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Unary(call1, b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationNoOutputs) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Binary(e, f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"D:o:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts());
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationControlOutput) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Binary(e, f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"UnaryTest",
{"D:o:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 = RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{DT_FLOAT}, b2.opts());
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 = SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1",
{e}, b2.opts().WithControlInput(e));
Node* s1 = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv1, send1}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b);
Node* call1 =
b2.opts().WithControlInput(s1).FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest,
OutsideCompilationClusterDependencyNoSrcCluster) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Node* g = Unary(f, b1.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* h = Unary(g, b1.opts().WithName("H").WithAttr("_encapsulate", "F1"));
Binary(e, h, b1.opts().WithName("I"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv1, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
{
GraphDefBuilder shape2(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape2.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Unary(ops::NodeOut(recv2, 0), shape2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g},
shape2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape2, "F1_F1_O2", &library_expected));
}
NameAttrList shape_inference_graph1;
shape_inference_graph1.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
NameAttrList shape_inference_graph2;
shape_inference_graph2.set_name(
"_outside_compilation_shape_inference_F1_F1_O2");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "h_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"D:o:0"}},
{{"H"},
"UnaryTest",
{"outside_compilation_O2_host_compute:outputs:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph1},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"F:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph2},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name",
"outside_compilation_O2_host_compute"}},
{"outside_compilation_O1_host_compute"}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"h_0_retval_retval", "H:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send1 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Unary(recv2, b2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* send2 =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {g},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s1 = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, send1, recv2, send2}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s1);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("I"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest,
OutsideCompilationClusterDependencyNoDstCluster) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Unary(a, b1.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* h = Unary(f, b1.opts().WithName("H").WithAttr("_encapsulate", "F1"));
Binary(e, h, b1.opts().WithName("I"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv2, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "h_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"},
"UnaryTest",
{"outside_compilation_O1_host_compute:outputs:0"}},
{{"H"}, "UnaryTest", {"F:o:0"}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name",
"outside_compilation_O2_host_compute"}},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"h_0_retval_retval", "H:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Unary(recv2, b2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* s1 = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, recv2, send}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s1);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("I"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationClusterDependency) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(d, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(e, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Node* g = Unary(d, b1.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* h = Unary(f, b1.opts().WithName("H").WithAttr("_encapsulate", "F1"));
Binary(d, e,
b1.opts()
.WithName("I")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O3")
.WithControlInput(g));
Binary(e, h, b1.opts().WithName("J"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv2, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "h_0_retval_retval:float"}, {},
{{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"outside_compilation_O1_host_compute:outputs:0"}},
{{"H"}, "UnaryTest", {"F:o:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
{{"outside_compilation_O2_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O2"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O2"},
{"_xla_token_input_nodes",
absl::Span<const string>(
{"_xla_token_arg_node", "outside_compilation_O1_host_compute"})},
{"_xla_original_oc_node_name", "outside_compilation_O2_host_compute"}},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O3_host_compute"},
"XlaHostCompute",
{"D:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O3"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", NameAttrList()},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O3"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node",
"outside_compilation_O1_host_compute",
"outside_compilation_O2_host_compute"})},
{"_xla_original_oc_node_name", "outside_compilation_O3_host_compute"}},
{"outside_compilation_O1_host_compute",
"outside_compilation_O2_host_compute"}}},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"h_0_retval_retval", "H:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv1 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv1, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O2", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* g = Unary(recv2, b2.opts()
.WithName("G")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O2")
.WithControlInput(e));
Node* recv3 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O3", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Binary(recv3, e,
b2.opts()
.WithName("I")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O3")
.WithControlInput(g));
Node* s1 = Sequencer(b2.opts()
.WithName("F1_sequencer")
.WithControlInputs({recv1, send, recv2, recv3}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s1);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("J"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationNoInputsOrOutputs) {
FunctionDefLibrary library;
GraphDef graphdef;
{
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = Input(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1"));
Node* d =
Binary(b, c, b1.opts().WithName("D").WithAttr("_encapsulate", "F1"));
Node* e = Unary(a, b1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Unary(d, b1.opts().WithName("F").WithAttr("_encapsulate", "F1"));
Binary(e, f, b1.opts().WithName("G"));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape1(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape1.opts());
Node* recv2 =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(ops::NodeOut(recv2, 0), shape1.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape1.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape1, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"a_0_arg:float", "b_0_arg:float"},
{"e_0_retval_retval:float", "f_0_retval_retval:float"}, {},
{
{{"C"}, "UnaryTest", {"a_0_arg"}},
{{"D"}, "BinaryTest", {"b_0_arg", "C:o:0"}},
{{"F"}, "UnaryTest", {"D:o:0"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"a_0_arg"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const TensorShapeProto>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}}},
},
{{"e_0_retval_retval", "outside_compilation_O1_host_compute:outputs:0"},
{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = Input(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv =
RecvAtHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = Unary(recv, b2.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv, send}),
"F1");
NodeBuilder node_builder1("F1", "F1", lib_def.get());
node_builder1.Input(a).Input(b).ControlInput(s);
Node* call1 = b2.opts().FinalizeBuilder(&node_builder1);
Binary(call1, ops::NodeOut(call1, 1), b2.opts().WithName("G"));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
TEST(EncapsulateSubgraphsTest, OutsideCompilationShapeInference) {
FunctionDefLibrary library;
GraphDef graphdef;
{
*library.add_function() = test::function::XTimesTwo();
GraphDefBuilder b1(GraphDefBuilder::kFailImmediately);
Node* a = InputShaped(b1.opts().WithName("A"));
Node* b = Input(b1.opts().WithName("B"));
Node* c = Unary(a, b1.opts().WithName("C"));
Node* d = Unary(b, b1.opts().WithName("c").WithControlInput(c).WithAttr(
"_encapsulate", "F1"));
Node* e = BinaryUnknownShape(c, d,
b1.opts()
.WithName("E")
.WithControlInputs({b, d})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* f = Binary(c, e,
b1.opts().WithName("F").WithControlInput(e).WithAttr(
"_encapsulate", "F1"));
Binary(a, f, b1.opts().WithName("G").WithControlInput(e));
TF_EXPECT_OK(b1.ToGraphDef(&graphdef));
}
std::vector<string> encapsulated_functions{"F1"};
TF_EXPECT_OK(Encapsulate(&graphdef, &library, encapsulated_functions));
FunctionDefLibrary library_expected;
GraphDef graphdef_expected;
{
GraphDefBuilder shape(GraphDefBuilder::kFailImmediately);
Node* key_constant = KeyPlaceholder("F1", shape.opts());
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = BinaryUnknownShape(recv, ops::NodeOut(recv, 1),
shape.opts()
.WithName("E")
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
shape.opts().WithAttr(kXlaHasHostTransferAttrName, true));
TF_EXPECT_OK(
AddGraphDefToFunctionLibrary(shape, "F1_F1_O1", &library_expected));
}
NameAttrList shape_inference_graph;
shape_inference_graph.set_name(
"_outside_compilation_shape_inference_F1_F1_O1");
*library_expected.add_function() = test::function::XTimesTwo();
*library_expected.add_function() = FunctionDefHelper::Create(
"F1", {"b_0_arg:float", "c_0_arg:float"}, {"f_0_retval_retval:float"}, {},
{
{{"c"}, "UnaryTest", {"b_0_arg"}, {}, {}},
{{"F"},
"BinaryTest",
{"c_0_arg", "outside_compilation_O1_host_compute:outputs:0"},
{},
{"outside_compilation_O1_host_compute"}},
{{"outside_compilation_O1_host_compute"},
"XlaHostCompute",
{"c_0_arg", "c:o:0"},
{{"Tinputs", absl::Span<const DataType>({DT_FLOAT, DT_FLOAT})},
{"Toutputs", absl::Span<const DataType>({DT_FLOAT})},
{"ancestors", absl::Span<const string>({})},
{"key", "host_compute_channel_F1_F1_O1"},
{"send_key", ""},
{"recv_key", ""},
{"shape_inference_graph", shape_inference_graph},
{"tpu_core", 0},
{"cost_estimate_ns", 1000000},
{"shapes", absl::Span<const DataType>({})},
{"_outside_compilation_subgraph", "O1"},
{"_xla_token_input_nodes",
absl::Span<const string>({"_xla_token_arg_node"})},
{"_xla_original_oc_node_name",
"outside_compilation_O1_host_compute"}},
{"c"}},
},
{{"f_0_retval_retval", "F:o:0"}});
{
std::unique_ptr<FunctionLibraryDefinition> lib_def(
new FunctionLibraryDefinition(OpRegistry::Global(), library_expected));
GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get());
Node* a = InputShaped(b2.opts().WithName("A"));
Node* b = Input(b2.opts().WithName("B"));
Node* c = Unary(a, b2.opts().WithName("C"));
Node* key_constant =
KeyPlaceholder("F1", b2.opts().WithName("F1_key_placeholder"));
Node* recv = RecvAtHost(
ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {DT_FLOAT, DT_FLOAT},
b2.opts().WithAttr(kXlaHasHostTransferAttrName, true));
Node* e = BinaryUnknownShape(recv, ops::NodeOut(recv, 1),
b2.opts()
.WithName("E")
.WithControlInputs({recv})
.WithAttr("_encapsulate", "F1")
.WithAttr("_outside", "O1"));
Node* send =
SendFromHost(ops::NodeOut(key_constant, 0), "F1", "F1", "O1", {e},
b2.opts().WithControlInput(e).WithAttr(
kXlaHasHostTransferAttrName, true));
Node* s = Sequencer(
b2.opts().WithName("F1_sequencer").WithControlInputs({recv, send}),
"F1");
NodeBuilder node_builder("F1", "F1", lib_def.get());
node_builder.Input(b).Input(c);
Node* call =
b2.opts().WithControlInputs({s, b, c}).FinalizeBuilder(&node_builder);
Binary(a, call, b2.opts().WithName("G").WithControlInputs({call}));
TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected));
}
TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef);
TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library);
}
void CreateSubgraphTouchingRefVar(const Scope& s) {
Output variable =
ops::Variable(s.WithOpName("variable"), PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(s.WithOpName("read_ref_var"), variable);
Output neg = ops::Negate(s.WithOpName("negate_ref"), read);
Output add = ops::Add(s.WithOpName("add_ref"), neg, neg);
Output constant =
ops::Const(s.WithOpName("constant_ref"), Input::Initializer(0.0));
s.graph()->AddControlEdge(constant.node(), variable.node());
}
TEST(EncapsulateSubgraphsTest, RefVariablesMarked) {
Scope root = Scope::NewRootScope().ExitOnError();
CreateSubgraphTouchingRefVar(root);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(graph.get()));
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions options =
wrapper.CreateGraphOptimizationPassOptions(&graph);
EncapsulateSubgraphsPass pass;
TF_ASSERT_OK(pass.Run(options));
for (const Node* node : graph->nodes()) {
bool has_ref_var;
TF_ASSERT_OK(
GetNodeAttr(node->attrs(), kXlaHasReferenceVarsAttr, &has_ref_var));
EXPECT_TRUE(node->IsSink() || node->IsSource() || has_ref_var)
<< "All nodes apart from source and sink can access reference variable";
}
}
void CreateSubgraphNotTouchingRefVar(const Scope& s) {
Output constant =
ops::Const(s.WithOpName("constant_normal"), Input::Initializer(0.0));
Output neg = ops::Negate(s.WithOpName("negate_normal"), constant);
Output add = ops::Add(s.WithOpName("add_normal"), neg, neg);
}
TEST(EncapsulateSubgraphsTest, NoRefVarsNoAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
CreateSubgraphNotTouchingRefVar(root);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(graph.get()));
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions options =
wrapper.CreateGraphOptimizationPassOptions(&graph);
EncapsulateSubgraphsPass pass;
TF_ASSERT_OK(pass.Run(options));
for (const Node* node : graph->nodes()) {
bool has_ref_var;
TF_ASSERT_OK(
GetNodeAttr(node->attrs(), kXlaHasReferenceVarsAttr, &has_ref_var));
EXPECT_FALSE(has_ref_var) << "The graph does not have reference variables";
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1b81dd1f-cf15-4132-82de-311b16082ad9 | cpp | tensorflow/tensorflow | resource_operation_safety_analysis | tensorflow/compiler/jit/resource_operation_safety_analysis.cc | tensorflow/compiler/jit/resource_operation_safety_analysis_test.cc | #include "tensorflow/compiler/jit/resource_operation_safety_analysis.h"
#include "absl/container/flat_hash_set.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "absl/types/optional.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
namespace {
Status XlaResourceOpKindForNode(
const Node& n, const FunctionLibraryDefinition* flib_def,
const std::function<Status(const Node&, bool*)>& resource_ops_to_ignore,
std::optional<XlaResourceOpKind>* out_resource_op_kind) {
bool should_ignore = false;
if (resource_ops_to_ignore) {
TF_RETURN_IF_ERROR(resource_ops_to_ignore(n, &should_ignore));
}
if (should_ignore) {
*out_resource_op_kind = std::nullopt;
return absl::OkStatus();
}
const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(n.type_string());
if (op_info) {
*out_resource_op_kind = op_info->kind();
return absl::OkStatus();
}
if (MayCallFunction(n, flib_def)) {
*out_resource_op_kind = XlaResourceOpKind::kReadWrite;
} else {
*out_resource_op_kind = std::nullopt;
}
return absl::OkStatus();
}
bool IsEdgeSafe(XlaResourceOpKind from, XlaResourceOpKind to) {
return from == XlaResourceOpKind::kRead || to == XlaResourceOpKind::kWrite;
}
using ResourceOp = std::pair<int, XlaResourceOpKind>;
string ResourceOpToString(const ResourceOp& resource_op) {
return absl::StrCat(
resource_op.first, ": ",
XlaResourceOpInfo::XlaResourceOpKindToString(resource_op.second));
}
class ResourceOpSet {
private:
using Impl = absl::flat_hash_set<ResourceOp>;
public:
ResourceOpSet() = default;
void Add(const ResourceOpSet& other) {
CHECK(!frozen_);
if (other.impl_ == impl_) {
other.frozen_ = true;
return;
}
if (!impl_) {
other.frozen_ = true;
impl_ = other.impl_;
return;
}
for (ResourceOp resource_op : other) {
Add(resource_op);
}
}
void Add(const ResourceOp& resource_op) {
CHECK(!frozen_);
if (!IsCopy() && Contains(resource_op)) {
return;
}
EnsureIsCopied();
impl_->insert(resource_op);
}
Impl::const_iterator begin() const {
return impl_ ? impl_->begin() : GetEmptyImpl()->begin();
}
Impl::const_iterator end() const {
return impl_ ? impl_->end() : GetEmptyImpl()->end();
}
bool Contains(const ResourceOp& resource_op) const {
return impl_ != nullptr && impl_->count(resource_op);
}
private:
bool IsCopy() const { return storage_ != nullptr; }
void EnsureIsCopied() {
if (storage_ == nullptr) {
storage_ = std::make_unique<Impl>();
for (ResourceOp op : *this) {
storage_->insert(op);
}
impl_ = storage_.get();
}
}
static Impl* GetEmptyImpl() {
static Impl* empty_impl = new Impl;
return empty_impl;
}
Impl* impl_ = nullptr;
std::unique_ptr<Impl> storage_;
mutable bool frozen_ = false;
ResourceOpSet(const ResourceOpSet&) = delete;
void operator=(const ResourceOpSet&) = delete;
};
string ResourceOpSetToString(const ResourceOpSet& resource_op_set) {
std::vector<string> elements_debug_string;
std::transform(resource_op_set.begin(), resource_op_set.end(),
std::back_inserter(elements_debug_string), ResourceOpToString);
return absl::StrCat("{", absl::StrJoin(elements_debug_string, ","), "}");
}
string NodeToString(const Node& n, XlaResourceOpKind resource_op_kind) {
return absl::StrCat(
"[", n.name(), ": ", n.type_string(), "(",
XlaResourceOpInfo::XlaResourceOpKindToString(resource_op_kind), ")", "]");
}
}
Status ComputeIncompatibleResourceOperationPairs(
const Graph& g, const FunctionLibraryDefinition* flib_def,
const std::function<Status(const Node&, bool*)>& resource_ops_to_ignore,
std::vector<std::pair<int, int>>* result) {
CHECK(result->empty());
std::vector<Node*> rpo;
GetReversePostOrder(g, &rpo, NodeComparatorName(),
[](const Edge& edge) {
return !edge.src()->IsNextIteration();
});
auto resource_op_set_for_node =
std::make_unique<ResourceOpSet[]>(g.num_node_ids());
const bool vlog = VLOG_IS_ON(2);
for (Node* n : rpo) {
std::optional<XlaResourceOpKind> op_kind;
TF_RETURN_IF_ERROR(XlaResourceOpKindForNode(
*n, flib_def, resource_ops_to_ignore, &op_kind));
ResourceOpSet* resource_op_set = &resource_op_set_for_node[n->id()];
for (const Edge* e : n->in_edges()) {
if (n->IsMerge() && e->src()->IsNextIteration()) {
continue;
}
const ResourceOpSet& incoming_op_set =
resource_op_set_for_node[e->src()->id()];
resource_op_set->Add(incoming_op_set);
}
if (op_kind) {
for (ResourceOp incoming_op : *resource_op_set) {
if (IsEdgeSafe(incoming_op.second, *op_kind)) {
continue;
}
if (vlog) {
VLOG(2) << "Unsafe edge: "
<< NodeToString(*g.FindNodeId(incoming_op.first),
incoming_op.second)
<< " -> " << NodeToString(*n, *op_kind);
}
result->push_back({incoming_op.first, n->id()});
}
if (op_kind != XlaResourceOpKind::kRead) {
resource_op_set->Add({n->id(), *op_kind});
}
}
if (vlog) {
VLOG(3) << n->name() << " -> " << ResourceOpSetToString(*resource_op_set);
}
}
std::sort(result->begin(), result->end());
CHECK(std::unique(result->begin(), result->end()) == result->end());
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/resource_operation_safety_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Node* MakeRead(const Scope& scope, const string& id) {
Output var_handle =
ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({}));
Output read =
ops::ReadVariableOp(scope.WithOpName("Read" + id), var_handle, DT_FLOAT);
return read.node();
}
Node* MakeWrite(const Scope& scope, const string& id) {
Output var_handle =
ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({}));
Output value_to_write =
ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f);
ops::AssignVariableOp assign_op(scope.WithOpName("Assignee" + id), var_handle,
value_to_write);
return assign_op.operation.node();
}
Node* MakeModify(const Scope& scope, const string& id) {
Output var_handle =
ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({}));
Output value_to_write = ops::Const(scope.WithOpName("Increment" + id), 1.0f);
ops::AssignAddVariableOp assign_add_op(scope.WithOpName("Increment" + id),
var_handle, value_to_write);
return assign_add_op.operation.node();
}
Node* MakeNeutral(const Scope& scope, const string& id) {
return ops::Const(scope.WithOpName("Const" + id), 42.0f).node();
}
Status ComputeIncompatiblePairs(Graph* g,
std::vector<std::pair<int, int>>* result) {
FixupSourceAndSinkEdges(g);
return ComputeIncompatibleResourceOperationPairs(*g, &g->flib_def(), {},
result);
}
TEST(ResourceOperationSafetyAnalysisTest, WriteRead) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(write, read);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> write_read_pair = {write->id(), read->id()};
EXPECT_EQ(incompatible_pairs[0], write_read_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, ReadWrite) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(read, write);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, ReadWriteNoEdges) {
Scope root = Scope::NewRootScope().ExitOnError();
MakeRead(root, "R");
MakeWrite(root, "W");
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, ReadModify) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* modify = MakeModify(root, "M");
root.graph()->AddControlEdge(read, modify);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, ModifyRead) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* modify = MakeModify(root, "M");
root.graph()->AddControlEdge(modify, read);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> modify_read_pair = {modify->id(), read->id()};
EXPECT_EQ(incompatible_pairs[0], modify_read_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, ModifyWrite) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* modify = MakeModify(root, "M");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(modify, write);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, WriteModify) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* modify = MakeModify(root, "M");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(write, modify);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> write_modify_pair = {write->id(), modify->id()};
EXPECT_EQ(incompatible_pairs[0], write_modify_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, ReadModifyWrite) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* modify = MakeModify(root, "M");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(read, modify);
root.graph()->AddControlEdge(modify, write);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, WriteModifyRead) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* modify = MakeModify(root, "M");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(write, modify);
root.graph()->AddControlEdge(modify, read);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 3);
std::pair<int, int> write_modify_pair = {write->id(), modify->id()};
std::pair<int, int> modify_read_pair = {modify->id(), read->id()};
std::pair<int, int> write_read_pair = {write->id(), read->id()};
EXPECT_EQ(incompatible_pairs[0], modify_read_pair);
EXPECT_EQ(incompatible_pairs[1], write_read_pair);
EXPECT_EQ(incompatible_pairs[2], write_modify_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, WriteReadModify) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* modify = MakeModify(root, "M");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(write, read);
root.graph()->AddControlEdge(read, modify);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 2);
std::pair<int, int> write_modify_pair = {write->id(), modify->id()};
std::pair<int, int> write_read_pair = {write->id(), read->id()};
EXPECT_EQ(incompatible_pairs[0], write_read_pair);
EXPECT_EQ(incompatible_pairs[1], write_modify_pair);
}
FunctionDefLibrary CreateFunctionDefLibWithConstFunction(const string& name) {
FunctionDefLibrary flib_def;
FunctionDef func = FunctionDefHelper::Create(
name, {}, {"out: float"},
{}, {FunctionDefHelper::Const("one", 1.0f)},
{{"out", "out:output:0"}});
*flib_def.add_function() = std::move(func);
return flib_def;
}
Node* MakeCall(Graph* graph, const string& callee_name, const string& node_name,
Status* status) {
NodeDef call_node;
call_node.set_name(node_name);
call_node.set_op(callee_name);
return graph->AddNode(call_node, status);
}
TEST(ResourceOperationSafetyAnalysisTest, CallRead) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary flib_def =
CreateFunctionDefLibWithConstFunction("Const_func");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
Node* read = MakeRead(root, "R");
Status status;
Node* call = MakeCall(root.graph(), "Const_func", "C", &status);
TF_ASSERT_OK(status);
root.graph()->AddControlEdge(call, read);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> call_read_edge = {call->id(), read->id()};
EXPECT_EQ(incompatible_pairs[0], call_read_edge);
}
TEST(ResourceOperationSafetyAnalysisTest, ReadCall) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary flib_def =
CreateFunctionDefLibWithConstFunction("Const_func");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
Node* read = MakeRead(root, "R");
Status status;
Node* call = MakeCall(root.graph(), "Const_func", "C", &status);
TF_ASSERT_OK(status);
root.graph()->AddControlEdge(read, call);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, CallWrite) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary flib_def =
CreateFunctionDefLibWithConstFunction("Const_func");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
Node* write = MakeWrite(root, "W");
Status status;
Node* call = MakeCall(root.graph(), "Const_func", "C", &status);
TF_ASSERT_OK(status);
root.graph()->AddControlEdge(call, write);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
EXPECT_EQ(incompatible_pairs.size(), 0);
}
TEST(ResourceOperationSafetyAnalysisTest, WriteCall) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary flib_def =
CreateFunctionDefLibWithConstFunction("Const_func");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
Node* write = MakeWrite(root, "W");
Status status;
Node* call = MakeCall(root.graph(), "Const_func", "C", &status);
TF_ASSERT_OK(status);
root.graph()->AddControlEdge(write, call);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> write_call_edge = {write->id(), call->id()};
EXPECT_EQ(incompatible_pairs[0], write_call_edge);
}
TEST(ResourceOperationSafetyAnalysisTest, SymbolicGradientRead) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary flib_def =
CreateFunctionDefLibWithConstFunction("Const_func");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
Node* read = MakeRead(root, "R");
NameAttrList fn;
fn.set_name("Const_func");
Node* symbolic_gradient =
ops::SymbolicGradient(root, {ops::Const(root, 1.0f)},
{DT_FLOAT}, fn)
.output[0]
.node();
root.graph()->AddControlEdge(symbolic_gradient, read);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> symbolic_gradient_read_edge = {symbolic_gradient->id(),
read->id()};
EXPECT_EQ(incompatible_pairs[0], symbolic_gradient_read_edge);
}
TEST(ResourceOperationSafetyAnalysisTest, WriteSymbolicGradient) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary flib_def =
CreateFunctionDefLibWithConstFunction("Const_func");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
Node* write = MakeWrite(root, "W");
NameAttrList fn;
fn.set_name("Const_func");
Node* symbolic_gradient =
ops::SymbolicGradient(root, {ops::Const(root, 1.0f)},
{DT_FLOAT}, fn)
.output[0]
.node();
root.graph()->AddControlEdge(write, symbolic_gradient);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> write_symbolic_gradient_edge = {write->id(),
symbolic_gradient->id()};
EXPECT_EQ(incompatible_pairs[0], write_symbolic_gradient_edge);
}
TEST(ResourceOperationSafetyAnalysisTest, ChainOfOps) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* write_0 = MakeWrite(root, "W0");
Node* neutral_0 = MakeNeutral(root, "N0");
Node* read_0 = MakeRead(root, "R0");
Node* write_1 = MakeWrite(root, "W1");
Node* neutral_1 = MakeNeutral(root, "N1");
Node* read_1 = MakeRead(root, "R1");
root.graph()->AddControlEdge(write_0, neutral_0);
root.graph()->AddControlEdge(neutral_0, read_0);
root.graph()->AddControlEdge(read_0, write_1);
root.graph()->AddControlEdge(write_1, neutral_1);
root.graph()->AddControlEdge(neutral_1, read_1);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 3);
std::pair<int, int> write_0_read_0_pair = {write_0->id(), read_0->id()};
std::pair<int, int> write_0_read_1_pair = {write_0->id(), read_1->id()};
std::pair<int, int> write_1_read_1_pair = {write_1->id(), read_1->id()};
EXPECT_EQ(incompatible_pairs[0], write_0_read_0_pair);
EXPECT_EQ(incompatible_pairs[1], write_0_read_1_pair);
EXPECT_EQ(incompatible_pairs[2], write_1_read_1_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, DagOfOps) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* write_0 = MakeWrite(root, "W0");
Node* write_1 = MakeWrite(root, "W1");
Node* neutral = MakeNeutral(root, "N");
Node* read_0 = MakeRead(root, "R0");
Node* read_1 = MakeRead(root, "R1");
root.graph()->AddControlEdge(write_0, neutral);
root.graph()->AddControlEdge(write_1, neutral);
root.graph()->AddControlEdge(neutral, read_0);
root.graph()->AddControlEdge(neutral, read_1);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 4);
std::pair<int, int> write_0_read_0_pair = {write_0->id(), read_0->id()};
std::pair<int, int> write_0_read_1_pair = {write_0->id(), read_1->id()};
std::pair<int, int> write_1_read_0_pair = {write_1->id(), read_0->id()};
std::pair<int, int> write_1_read_1_pair = {write_1->id(), read_1->id()};
EXPECT_EQ(incompatible_pairs[0], write_0_read_0_pair);
EXPECT_EQ(incompatible_pairs[1], write_0_read_1_pair);
EXPECT_EQ(incompatible_pairs[2], write_1_read_0_pair);
EXPECT_EQ(incompatible_pairs[3], write_1_read_1_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, DagOfOpsWithRepeatedPaths) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* write_0 = MakeWrite(root, "W0");
Node* write_1 = MakeWrite(root, "W1");
Node* neutral = MakeNeutral(root, "N");
Node* read_0 = MakeRead(root, "R0");
Node* read_1 = MakeRead(root, "R1");
root.graph()->AddControlEdge(write_0, neutral);
root.graph()->AddControlEdge(write_1, neutral);
root.graph()->AddControlEdge(neutral, read_0);
root.graph()->AddControlEdge(neutral, read_1);
root.graph()->AddControlEdge(write_1, read_1);
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 4);
std::pair<int, int> write_0_read_0_pair = {write_0->id(), read_0->id()};
std::pair<int, int> write_0_read_1_pair = {write_0->id(), read_1->id()};
std::pair<int, int> write_1_read_0_pair = {write_1->id(), read_0->id()};
std::pair<int, int> write_1_read_1_pair = {write_1->id(), read_1->id()};
EXPECT_EQ(incompatible_pairs[0], write_0_read_0_pair);
EXPECT_EQ(incompatible_pairs[1], write_0_read_1_pair);
EXPECT_EQ(incompatible_pairs[2], write_1_read_0_pair);
EXPECT_EQ(incompatible_pairs[3], write_1_read_1_pair);
}
TEST(ResourceOperationSafetyAnalysisTest, Loop) {
Scope root = Scope::NewRootScope().ExitOnError();
Output init_value = ops::Placeholder(root.WithOpName("init"), DT_FLOAT);
Output loop_cond = ops::Placeholder(root.WithOpName("init"), DT_BOOL);
Output enter_value =
ops::internal::Enter(root.WithOpName("enter"), init_value, "fr");
ops::Merge iv(root.WithOpName("iv"), {enter_value, enter_value});
ops::Switch latch(root.WithOpName("latch"), iv.output, loop_cond);
ops::internal::Exit exit(root.WithOpName("exit"), iv.output);
Output next_iteration =
ops::NextIteration(root.WithOpName("next_iteration"), latch.output_true);
TF_ASSERT_OK(
root.graph()->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1));
Node* write = MakeWrite(root, "W");
Node* read = MakeRead(root, "R");
root.graph()->AddControlEdge(iv.output.node(), write);
root.graph()->AddControlEdge(write, read);
root.graph()->AddControlEdge(read, next_iteration.node());
std::vector<std::pair<int, int>> incompatible_pairs;
TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs));
ASSERT_EQ(incompatible_pairs.size(), 1);
std::pair<int, int> write_read_pair = {write->id(), read->id()};
EXPECT_EQ(incompatible_pairs[0], write_read_pair);
}
bool IsResourceArgDef(const OpDef::ArgDef& arg_def) {
return arg_def.type() == DT_RESOURCE;
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/resource_operation_safety_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/resource_operation_safety_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3de9abf4-899d-48ad-b144-0361983111d1 | cpp | tensorflow/tensorflow | mark_for_compilation_pass | tensorflow/compiler/jit/mark_for_compilation_pass.cc | tensorflow/compiler/jit/mark_for_compilation_pass_test.cc | #include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
#include <algorithm>
#include <atomic>
#include <deque>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/jit/compilability_check_util.h"
#include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/resource_operation_safety_analysis.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/resource_operation_table.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/service/graphcycles/graphcycles.h"
#include "xla/union_find.h"
#include "xla/util.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/bounds_check.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/gtl/flatmap.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
using DeadnessPredicate = DeadnessAnalysis::DeadnessPredicate;
using jit::DeviceId;
using jit::DeviceSet;
const char* kXlaAlreadyClustered = "_XlaAlreadyClustered";
class MarkForCompilationPassImpl {
public:
struct DebugOptions {
bool ignore_deadness_checks;
bool ignore_resource_variable_checks;
bool ignore_xla_compile_attr;
bool deterministic_cluster_names;
int max_cluster_size;
int min_cluster_size;
std::atomic<int64_t>* fuel;
bool dump_graphs;
};
MarkForCompilationPassImpl(DebugOptions debug_options, Graph* graph,
FunctionLibraryDefinition* flib_def, Env* env,
OptimizerOptions::GlobalJitLevel global_jit_level,
bool cpu_global_jit,
std::string cluster_name_prefix)
: debug_options_(debug_options),
graph_(graph),
graph_fingerprint_(0),
flib_def_(flib_def),
env_(env),
global_jit_level_(global_jit_level),
cpu_global_jit_(cpu_global_jit),
cluster_name_prefix_(cluster_name_prefix) {}
Status Run();
private:
class Cluster {
public:
Cluster(int tf_graph_node_id, int effective_cluster_size,
bool has_functional_control_flow, DeviceSet devices,
std::optional<DeviceId> resource_op_device,
std::optional<int> resource_var_operation_node_id,
std::optional<DeadnessPredicate> deadness_predicate,
bool is_xla_compile_attr_true, std::optional<string> xla_scope)
: cycles_graph_node_id_(tf_graph_node_id),
effective_cluster_size_(effective_cluster_size),
has_functional_control_flow_(has_functional_control_flow),
devices_(std::move(devices)),
resource_op_device_(resource_op_device),
deadness_predicate_(deadness_predicate),
is_xla_compile_attr_true_(is_xla_compile_attr_true),
xla_scope_(std::move(xla_scope)) {
if (resource_var_operation_node_id.has_value()) {
resource_var_operation_node_ids_.push_back(
*resource_var_operation_node_id);
}
}
void Merge(Cluster* other);
int GetIdOfOnlyNode() const {
DCHECK_EQ(cluster_size(), 1);
return cycles_graph_node_id();
}
int cluster_size() const { return cluster_size_; }
int cycles_graph_node_id() const { return cycles_graph_node_id_; }
void set_cycles_graph_node_id(int cycles_graph_node_id) {
cycles_graph_node_id_ = cycles_graph_node_id;
}
int effective_cluster_size() const { return effective_cluster_size_; }
bool has_functional_control_flow() const {
return has_functional_control_flow_;
}
const DeviceSet& devices() const { return devices_; }
const std::optional<DeviceId>& resource_op_device() const {
return resource_op_device_;
}
const std::optional<DeadnessPredicate>& deadness_predicate() const {
return deadness_predicate_;
}
bool is_xla_compile_attr_true() const { return is_xla_compile_attr_true_; }
const std::optional<string>& xla_scope() const { return xla_scope_; }
absl::Span<const int> resource_var_operation_node_ids() const {
return resource_var_operation_node_ids_;
}
string DebugString(const Graph& graph) const {
Node* node = graph.FindNodeId(cycles_graph_node_id());
if (!node) {
return absl::StrCat("NULL NODE IN #", cycles_graph_node_id());
}
if (cluster_size() == 1) {
return absl::StrCat("<", node->name(), " #", cycles_graph_node_id(),
">");
}
return absl::StrCat("<", node->name(), " + ", cluster_size() - 1,
" others #", cycles_graph_node_id(), ">");
}
private:
int cluster_size_ = 1;
int cycles_graph_node_id_;
int effective_cluster_size_;
bool has_functional_control_flow_;
DeviceSet devices_;
std::optional<DeviceId> resource_op_device_;
std::optional<DeadnessPredicate> deadness_predicate_;
bool is_xla_compile_attr_true_;
std::optional<string> xla_scope_;
std::vector<int> resource_var_operation_node_ids_;
Cluster(const Cluster&) = delete;
void operator=(const Cluster&) = delete;
};
Node* GetOnlyNodeIn(const Cluster& cluster);
bool IsSinkLike(const Cluster& cluster);
bool IsScalarIntegerResourceOperation(const Cluster& cluster);
absl::StatusOr<bool> Initialize();
template <typename FnTy>
absl::StatusOr<bool> ForEachEdgeInPostOrder(FnTy fn);
Status RunEdgeContractionLoop();
Status DeclusterNodes();
Status CreateClusters();
Status DumpDebugInfo();
bool IsCompilationCandidate(Node* n) const {
return compilation_candidates_.find(n) != compilation_candidates_.end();
}
absl::StatusOr<bool> TryToContractEdge(Cluster* from, Cluster* to);
Status FindCompilationCandidates();
bool CompilationDisallowedByXlaCompileAttr(Node* node);
Status BuildInitialClusterSet();
absl::StatusOr<bool> ShouldCompileClusterImpl(const Cluster& cluster);
absl::StatusOr<bool> ShouldCompileCluster(const Cluster& cluster);
absl::StatusOr<bool> ClusteringWillIntroduceInterDeviceDependency(
const Cluster& from, const Cluster& to);
bool ShouldCompile(bool is_xla_compile_attr_true,
const DeviceType& device_type,
XlaOpRegistry::AutoclusteringPolicy policy) {
return is_xla_compile_attr_true ||
policy == XlaOpRegistry::AutoclusteringPolicy::kAlways ||
(policy == XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally &&
global_jit_level_ != OptimizerOptions::OFF) ||
(device_type.type_string() == DEVICE_CPU &&
policy ==
XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested &&
cpu_global_jit_);
}
absl::StatusOr<bool> AreDevicesCompatible(const Cluster& cluster_a,
const Cluster& cluster_b);
void DumpPostClusteringGraphs();
void VLogClusteringSummary();
Cluster* MakeNewCluster(int cycles_graph_node_id, int effective_cluster_size,
bool has_functional_control_flow,
const DeviceSet& device_set,
std::optional<DeviceId> resource_op_device,
std::optional<int> resource_var_operation_node_id,
std::optional<DeadnessPredicate> deadness_predicate,
bool is_xla_compile_attr_true,
std::optional<string> xla_scope) {
cluster_storage_.push_back(std::make_unique<Cluster>(
cycles_graph_node_id, effective_cluster_size,
has_functional_control_flow, device_set, resource_op_device,
resource_var_operation_node_id, deadness_predicate,
is_xla_compile_attr_true, xla_scope));
return cluster_storage_.back().get();
}
std::optional<string> GetXlaScope(Node* n);
Cluster* GetClusterForNode(Node* n) {
return cluster_for_node_[n->id()].Get();
}
Cluster* GetClusterForCyclesGraphNode(int node_id) {
if (node_id >= graph_->num_node_ids() ||
graph_->FindNodeId(node_id) == nullptr) {
return nullptr;
}
Cluster* cluster = cluster_for_node_[node_id].Get();
if (cluster) {
DCHECK_EQ(cluster->cycles_graph_node_id(), node_id);
}
return cluster;
}
bool LogNotContractableAndReturnFalse(Cluster* from, Cluster* to,
absl::string_view reason);
std::vector<int> FindAlternatePathForDebugging(int from, int to);
string DebugStringForCyclesGraphNode(int node_id, bool* found_unclustered);
string DescribePotentialCycle(int from, int to);
bool MergeClusters(Cluster* cluster_from, Cluster* cluster_to) {
int from = cluster_from->cycles_graph_node_id();
int to = cluster_to->cycles_graph_node_id();
auto optional_merged_node = cycles_graph_.ContractEdge(from, to);
if (!optional_merged_node.has_value()) {
VLOG(3) << "Could not contract " << cluster_from->DebugString(*graph_)
<< " -> " << cluster_to->DebugString(*graph_)
<< " because contracting the edge would create a cycle via "
<< DescribePotentialCycle(from, to) << ".";
return false;
}
cluster_from->Merge(cluster_to);
cluster_from->set_cycles_graph_node_id(optional_merged_node.value());
cluster_for_node_[from].Merge(&cluster_for_node_[to]);
return true;
}
string EdgeContractionFailureMsg(Cluster* from, Cluster* to,
absl::string_view reason) {
return absl::StrCat("Could not contract ", from->DebugString(*graph_),
" -> ", to->DebugString(*graph_), " because ", reason,
".");
}
DebugOptions debug_options_;
Graph* graph_;
uint64 graph_fingerprint_;
FunctionLibraryDefinition* flib_def_;
Env* env_;
OptimizerOptions::GlobalJitLevel global_jit_level_;
bool cpu_global_jit_;
const std::string cluster_name_prefix_;
absl::flat_hash_map<const Cluster*, bool> should_compile_cluster_cache_;
jit::DeviceInfoCache device_info_cache_;
bool initialized_ = false;
bool edges_contracted_ = false;
bool clusters_created_ = false;
std::vector<std::unique_ptr<Cluster>> cluster_storage_;
std::vector<xla::UnionFind<Cluster*>> cluster_for_node_;
absl::flat_hash_set<const Node*> declustered_nodes_;
xla::GraphCycles cycles_graph_;
OrderedNodeSet compilation_candidates_;
std::unique_ptr<DeadnessAnalysis> deadness_analysis_;
int64_t iteration_count_ = 0;
absl::flat_hash_set<std::pair<int, int>> unsafe_resource_deps_;
};
std::vector<int> MarkForCompilationPassImpl::FindAlternatePathForDebugging(
int from, int to) {
std::vector<int> rpo = cycles_graph_.AllNodesInPostOrder();
absl::c_reverse(rpo);
absl::flat_hash_map<int, int> best_pred_for_node;
best_pred_for_node[from] = -1;
int rpo_index = 0, current_rpo_node;
do {
current_rpo_node = rpo[rpo_index++];
std::optional<int> some_pred, preferred_pred;
for (int pred : cycles_graph_.Predecessors(current_rpo_node)) {
if (!best_pred_for_node.contains(pred)) {
continue;
}
if (current_rpo_node == to && pred == from) {
continue;
}
some_pred = pred;
if (GetClusterForCyclesGraphNode(pred) == nullptr) {
preferred_pred = pred;
}
}
if (some_pred || preferred_pred) {
best_pred_for_node[current_rpo_node] =
preferred_pred.has_value() ? *preferred_pred : *some_pred;
}
} while (current_rpo_node != to);
auto get_best_pred = [&](int n) {
auto it = best_pred_for_node.find(n);
CHECK(it != best_pred_for_node.end());
return it->second;
};
std::vector<int> path;
int current_path_node = get_best_pred(to);
while (current_path_node != from) {
path.push_back(current_path_node);
current_path_node = get_best_pred(current_path_node);
}
absl::c_reverse(path);
return path;
}
string MarkForCompilationPassImpl::DebugStringForCyclesGraphNode(
int cycles_graph_node_id, bool* found_unclustered) {
Cluster* cluster = GetClusterForCyclesGraphNode(cycles_graph_node_id);
if (cluster) {
return cluster->DebugString(*graph_);
}
*found_unclustered = true;
if (cycles_graph_node_id >= graph_->num_node_ids()) {
return absl::StrCat("<oob #", cycles_graph_node_id, ">");
}
Node* node = graph_->FindNodeId(cycles_graph_node_id);
if (!node) {
return absl::StrCat("<bad #", cycles_graph_node_id, ">");
}
return node->name();
}
string MarkForCompilationPassImpl::DescribePotentialCycle(int from, int to) {
std::vector<string> path_str;
bool found_unclustered = false;
absl::c_transform(FindAlternatePathForDebugging(from, to),
std::back_inserter(path_str), [&](int node_id) {
return DebugStringForCyclesGraphNode(node_id,
&found_unclustered);
});
return absl::StrCat(!found_unclustered ? "(all clusters) " : "", "[",
absl::StrJoin(path_str, ","), "]");
}
void MarkForCompilationPassImpl::Cluster::Merge(Cluster* other) {
cluster_size_ += other->cluster_size_;
effective_cluster_size_ += other->effective_cluster_size_;
has_functional_control_flow_ |= other->has_functional_control_flow_;
devices_.UnionWith(other->devices_);
DCHECK(!(resource_op_device_.has_value() &&
other->resource_op_device_.has_value()) ||
*resource_op_device_ == *other->resource_op_device_)
<< "AreDevicesCompatible should have returned false otherwise!";
if (!resource_op_device_.has_value()) {
resource_op_device_ = other->resource_op_device_;
}
is_xla_compile_attr_true_ |= other->is_xla_compile_attr_true_;
if (!xla_scope_.has_value()) {
xla_scope_ = std::move(other->xla_scope_);
}
resource_var_operation_node_ids_.reserve(
resource_var_operation_node_ids_.size() +
other->resource_var_operation_node_ids_.size());
absl::c_copy(other->resource_var_operation_node_ids_,
std::back_inserter(resource_var_operation_node_ids_));
other->resource_var_operation_node_ids_.clear();
}
Status IgnoreResourceOpForSafetyAnalysis(
jit::DeviceInfoCache* device_info_cache, const Node& n, bool* ignore) {
if (n.assigned_device_name().empty()) {
*ignore = false;
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(
const XlaOpRegistry::DeviceRegistration* registration,
device_info_cache->GetCompilationDevice(n.assigned_device_name()));
if (!registration) {
*ignore = true;
} else {
*ignore = registration->cluster_resource_variable_ops_unsafely;
}
return absl::OkStatus();
}
absl::StatusOr<bool> MarkForCompilationPassImpl::Initialize() {
TF_RET_CHECK(!initialized_ && !edges_contracted_ && !clusters_created_);
initialized_ = true;
TF_RETURN_IF_ERROR(FindCompilationCandidates());
if (compilation_candidates_.empty()) {
VLOG(2) << "No compilable candidates";
return false;
}
TF_ASSIGN_OR_RETURN(bool cycle_detection_graph_ok,
CreateCycleDetectionGraph(graph_, &cycles_graph_));
if (!cycle_detection_graph_ok) {
VLOG(2) << "Could not form cycle detection graph";
return false;
}
if (!debug_options_.ignore_deadness_checks) {
XLA_SCOPED_LOGGING_TIMER_LEVEL("DeadnessAnalysis", 1);
TF_RETURN_IF_ERROR(DeadnessAnalysis::Run(*graph_, &deadness_analysis_));
}
if (debug_options_.deterministic_cluster_names) {
TF_ASSIGN_OR_RETURN(graph_fingerprint_, FingerprintGraph(*graph_));
}
TF_RETURN_IF_ERROR(BuildInitialClusterSet());
return true;
}
template <typename FnTy>
absl::StatusOr<bool> MarkForCompilationPassImpl::ForEachEdgeInPostOrder(
FnTy fn) {
bool changed = false;
for (int32_t node : cycles_graph_.AllNodesInPostOrder()) {
Cluster* cluster_from = GetClusterForCyclesGraphNode(node);
if (!cluster_from) {
continue;
}
std::vector<int32> successors_copy =
cycles_graph_.SuccessorsCopy(cluster_from->cycles_graph_node_id());
for (int to : successors_copy) {
iteration_count_++;
Cluster* cluster_to = GetClusterForCyclesGraphNode(to);
if (!cluster_to) {
continue;
}
TF_ASSIGN_OR_RETURN(bool contracted_edge, fn(cluster_from, cluster_to));
changed |= contracted_edge;
}
}
return changed;
}
Node* MarkForCompilationPassImpl::GetOnlyNodeIn(const Cluster& cluster) {
return cluster.cluster_size() == 1
? graph_->FindNodeId(cluster.GetIdOfOnlyNode())
: nullptr;
}
bool MarkForCompilationPassImpl::IsSinkLike(const Cluster& cluster) {
if (Node* n = GetOnlyNodeIn(cluster)) {
return n->type_string() == "NoOp" && n->out_edges().size() == 1 &&
(*n->out_edges().begin())->dst()->IsSink();
}
return false;
}
bool MarkForCompilationPassImpl::IsScalarIntegerResourceOperation(
const Cluster& cluster) {
Node* n = GetOnlyNodeIn(cluster);
if (!n) {
return false;
}
if (n->type_string() != "AssignAddVariableOp" &&
n->type_string() != "AssignSubVariableOp") {
return false;
}
DataType dtype;
if (!TryGetNodeAttr(n->def(), "dtype", &dtype) || !DataTypeIsInteger(dtype)) {
return false;
}
Node* const_input = nullptr;
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsConstant()) {
const_input = e->src();
break;
}
}
if (!const_input) {
return false;
}
const TensorProto* proto = nullptr;
if (!TryGetNodeAttr(const_input->def(), "value", &proto)) {
return false;
}
return TensorShapeUtils::IsScalar(proto->tensor_shape());
}
Status MarkForCompilationPassImpl::RunEdgeContractionLoop() {
TF_RET_CHECK(initialized_ && !edges_contracted_ && !clusters_created_);
edges_contracted_ = true;
VLOG(4) << "Running phase 0";
TF_RETURN_IF_ERROR(
ForEachEdgeInPostOrder([&](Cluster* from,
Cluster* to) -> absl::StatusOr<bool> {
Node* n = GetOnlyNodeIn(*to);
bool is_shape_consumer_op = n && IsShapeConsumerOp(*n);
if (!is_shape_consumer_op) {
return false;
}
return TryToContractEdge(from, to);
}).status());
VLOG(4) << "Running phase 1";
TF_RETURN_IF_ERROR(
ForEachEdgeInPostOrder([&](Cluster* from,
Cluster* to) -> absl::StatusOr<bool> {
if (IsSinkLike(*to)) {
return false;
}
if (IsScalarIntegerResourceOperation(*from)) {
return false;
}
return TryToContractEdge(from, to);
}).status());
VLOG(4) << "Running phase 2";
TF_RETURN_IF_ERROR(ForEachEdgeInPostOrder([&](Cluster* from, Cluster* to) {
return TryToContractEdge(from, to);
}).status());
VLOG(2) << "Checking idempotence";
TF_ASSIGN_OR_RETURN(bool changed,
ForEachEdgeInPostOrder([&](Cluster* from, Cluster* to) {
return TryToContractEdge(from, to);
}));
TF_RET_CHECK(!changed);
return absl::OkStatus();
}
Status MarkForCompilationPassImpl::DeclusterNodes() {
for (Node* n : compilation_candidates_) {
Cluster* cluster = GetClusterForNode(n);
if (cluster == nullptr) {
continue;
}
if (n->op_def().name() == "Fill" &&
n->out_nodes().begin() != n->out_nodes().end() &&
absl::c_all_of(n->out_nodes(), [&](Node* user) {
return GetClusterForNode(user) != cluster;
})) {
declustered_nodes_.insert(n);
}
}
return absl::OkStatus();
}
class ClusterSequenceNumberGenerator {
public:
void Reset() {
mutex_lock lock(mu_);
sequence_numbers_.clear();
}
int64 GetNext(uint64 key) {
mutex_lock lock(mu_);
return sequence_numbers_[key]++;
}
static ClusterSequenceNumberGenerator& Global() {
static ClusterSequenceNumberGenerator* gen =
new ClusterSequenceNumberGenerator;
return *gen;
}
private:
mutex mu_;
absl::flat_hash_map<uint64, int64> sequence_numbers_;
};
int64_t GetNextClusterSequenceNumber(uint64 fingerprint) {
return ClusterSequenceNumberGenerator::Global().GetNext(fingerprint);
}
Status MarkForCompilationPassImpl::CreateClusters() {
TF_RET_CHECK(initialized_ && edges_contracted_ && !clusters_created_);
clusters_created_ = true;
std::unordered_map<int, string> cluster_names;
if (debug_options_.dump_graphs) {
DumpGraphToFile("before_mark_for_compilation", *graph_, flib_def_);
}
for (Node* n : compilation_candidates_) {
Cluster* cluster = GetClusterForNode(n);
TF_ASSIGN_OR_RETURN(bool should_compile_cluster,
ShouldCompileCluster(*cluster));
if (!should_compile_cluster || declustered_nodes_.contains(n)) {
continue;
}
if (cluster->effective_cluster_size() >= debug_options_.min_cluster_size ||
cluster->has_functional_control_flow() ||
cluster->is_xla_compile_attr_true()) {
string& name = cluster_names[cluster->cycles_graph_node_id()];
if (name.empty()) {
if (!cluster_name_prefix_.empty()) {
name = absl::StrCat(cluster_name_prefix_, "_");
} else {
name = "cluster_";
}
if (debug_options_.deterministic_cluster_names) {
absl::StrAppend(&name, graph_fingerprint_, "_");
}
absl::StrAppend(&name,
GetNextClusterSequenceNumber(graph_fingerprint_));
}
n->AddAttr(kXlaClusterAttr, name);
n->AddAttr(kXlaAlreadyClustered, true);
VLOG(3) << "Assigning node " << n->name() << " to cluster " << name;
}
}
return absl::OkStatus();
}
Status MarkForCompilationPassImpl::DumpDebugInfo() {
TF_RET_CHECK(initialized_ && edges_contracted_ && clusters_created_);
if (debug_options_.dump_graphs) {
DumpPostClusteringGraphs();
}
VLogClusteringSummary();
return absl::OkStatus();
}
absl::StatusOr<bool>
MarkForCompilationPassImpl::ClusteringWillIntroduceInterDeviceDependency(
const Cluster& cluster_from, const Cluster& cluster_to) {
for (const auto& in_id :
cycles_graph_.Predecessors(cluster_to.cycles_graph_node_id())) {
const Cluster* cluster_in = GetClusterForCyclesGraphNode(in_id);
if (cluster_in) {
TF_ASSIGN_OR_RETURN(bool devices_compatible,
AreDevicesCompatible(cluster_to, *cluster_in));
if (!devices_compatible) {
return true;
}
TF_ASSIGN_OR_RETURN(devices_compatible,
AreDevicesCompatible(cluster_from, *cluster_in));
if (!devices_compatible) {
return true;
}
}
}
return false;
}
std::optional<string> MarkForCompilationPassImpl::GetXlaScope(Node* node) {
if (global_jit_level_ != OptimizerOptions::OFF) {
const string& scope =
GetNodeAttrString(node->attrs(), kXlaInternalScopeAttr);
if (!scope.empty()) {
return scope;
}
} else {
const string& scope = GetNodeAttrString(node->attrs(), kXlaScopeAttr);
if (!scope.empty()) {
return scope;
}
}
return std::nullopt;
}
static bool GetNodeOrFuncAttr(Node* node, FunctionLibraryDefinition* flib_def,
const char* attr_name) {
bool out = false;
bool attr_value;
if (TryGetNodeAttr(node->attrs(), attr_name, &attr_value)) {
out |= attr_value;
}
if (flib_def->GetAttr(*node, attr_name, &attr_value).ok()) {
out |= attr_value;
}
return out;
}
Status MarkForCompilationPassImpl::BuildInitialClusterSet() {
auto ignore_resource_ops = [&](const Node& n, bool* ignore) {
return IgnoreResourceOpForSafetyAnalysis(&device_info_cache_, n, ignore);
};
std::vector<std::pair<int, int>> unsafe_resource_deps_vect;
TF_RETURN_IF_ERROR(ComputeIncompatibleResourceOperationPairs(
*graph_, flib_def_, ignore_resource_ops, &unsafe_resource_deps_vect));
absl::c_copy(
unsafe_resource_deps_vect,
std::inserter(unsafe_resource_deps_, unsafe_resource_deps_.begin()));
cluster_for_node_.resize(graph_->num_node_ids());
for (Node* node : graph_->nodes()) {
if (!IsCompilationCandidate(node)) {
cluster_for_node_[node->id()].Get() = nullptr;
continue;
}
int effective_cluster_size =
(node->IsIdentity() || node->IsConstant()) ? 0 : 1;
bool has_functional_control_flow = node->IsWhileNode() || node->IsIfNode();
std::optional<DeadnessPredicate> deadness_predicate;
if (deadness_analysis_) {
TF_ASSIGN_OR_RETURN(
deadness_predicate,
deadness_analysis_->GetPredicateFor(node, Graph::kControlSlot));
}
const string& device_name_str = !node->assigned_device_name().empty()
? node->assigned_device_name()
: node->requested_device();
TF_ASSIGN_OR_RETURN(DeviceId device,
device_info_cache_.GetIdFor(device_name_str));
bool is_resource_op = HasResourceInputOrOutput(*node);
std::optional<DeviceId> resource_op_device;
if (is_resource_op) {
resource_op_device = device;
}
std::optional<int> resource_var_operation_node_id;
if (is_resource_op || MayCallFunction(*node, flib_def_)) {
resource_var_operation_node_id = node->id();
}
bool is_xla_compile_attr_true =
GetNodeOrFuncAttr(node, flib_def_, kXlaCompileAttr) ||
(global_jit_level_ != OptimizerOptions::OFF &&
GetNodeOrFuncAttr(node, flib_def_, kXlaMustCompileAttr));
DeviceSet devices;
devices.Insert(device);
Cluster* new_cluster = MakeNewCluster(
node->id(),
effective_cluster_size,
has_functional_control_flow, devices,
resource_op_device, resource_var_operation_node_id, deadness_predicate,
is_xla_compile_attr_true,
GetXlaScope(node));
cluster_for_node_[node->id()].Get() = new_cluster;
}
return absl::OkStatus();
}
absl::StatusOr<bool> IsIdentityDrivingConstsInLoop(Node* node) {
if (!node->IsIdentity()) {
return false;
}
auto it = absl::c_find_if(node->in_edges(), [](const Edge* e) {
return e->src()->IsSwitch() && e->src_output() == 1;
});
if (it == node->in_edges().end()) {
return false;
}
const Node* switch_node = (*it)->src();
const Node* maybe_loop_cond;
TF_RETURN_IF_ERROR(switch_node->input_node(1, &maybe_loop_cond));
if (!maybe_loop_cond->IsLoopCond()) {
return false;
}
bool driving_any_consts =
absl::c_any_of(node->out_edges(), [](const Edge* e) {
return e->dst()->IsConstant() && e->IsControlEdge();
});
if (!driving_any_consts) {
return false;
}
return true;
}
absl::flat_hash_set<string> CreateClusterExcludeList() {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
absl::flat_hash_set<string> excludelist;
for (auto s : absl::StrSplit(flags->tf_xla_cluster_exclude_ops, ',')) {
if (!s.empty()) {
excludelist.insert(string(s));
}
}
if (VLOG_IS_ON(2) && !excludelist.empty()) {
std::vector<string> vexcludelist(excludelist.begin(), excludelist.end());
absl::c_sort(vexcludelist);
VLOG(2) << "XLA clustering will exclude following TF operations from auto "
"clustering: "
<< absl::StrJoin(vexcludelist, " ");
}
return excludelist;
}
absl::flat_hash_set<string> GetOrCreateAllowlist() {
absl::flat_hash_map<string, std::vector<string>>* allowlist_table =
tensorflow::GetAllowlistTable();
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
absl::flat_hash_set<string> allowlist;
for (auto s : absl::StrSplit(flags->tf_xla_ops_to_cluster, ',')) {
if (s == "FUSIBLE") {
for (auto pair : *allowlist_table) {
allowlist.insert(pair.second.begin(), pair.second.end());
}
} else if (allowlist_table->contains(s)) {
auto v = allowlist_table->at(s);
allowlist.insert(v.begin(), v.end());
} else if (!s.empty()) {
allowlist.insert(string(s));
}
}
if (VLOG_IS_ON(2) && !allowlist.empty()) {
std::vector<string> vallowlist(allowlist.begin(), allowlist.end());
absl::c_sort(vallowlist);
VLOG(2) << "XLA clustering will only consider the following TF operations: "
<< absl::StrJoin(vallowlist, " ");
}
return allowlist;
}
Status MarkForCompilationPassImpl::FindCompilationCandidates() {
OptimizerOptions opts;
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(nullptr, env_, nullptr,
TF_GRAPH_DEF_VERSION, flib_def_, opts));
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
std::vector<bool> compile_time_const_nodes(graph_->num_node_ids(), false);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(
*graph_, nullptr,
&compile_time_const_nodes, lib_runtime));
std::vector<Node*> sorted_nodes;
for (Node* node : graph_->op_nodes()) {
sorted_nodes.push_back(node);
}
std::sort(sorted_nodes.begin(), sorted_nodes.end(), NodeComparatorID());
if (*debug_options_.fuel >= std::numeric_limits<int64_t>::max() / 2) {
VLOG(2) << "Starting fuel: infinity";
} else {
VLOG(2) << "Starting fuel: " << *debug_options_.fuel;
}
VLOG(2) << "sorted_nodes.size() = " << sorted_nodes.size();
auto allowlist = GetOrCreateAllowlist();
std::vector<string> vall_ops = XlaOpRegistry::GetAllRegisteredOps();
absl::flat_hash_set<string> all_ops(vall_ops.begin(), vall_ops.end());
for (const auto& s : allowlist) {
if (!all_ops.contains(s)) {
return errors::InvalidArgument(
"The operation '", s,
"' passed to --tf_xla_ops_to_cluster is not supported by XLA.");
}
}
auto cluster_exclude_op_list = CreateClusterExcludeList();
bool allow_where_op = true;
for (const auto& s : cluster_exclude_op_list) {
if (s == "Where") {
allow_where_op = false;
} else {
return errors::InvalidArgument(
"The operation '", s,
"' passed to --tf_xla_cluster_exclude_ops is not supported by "
"XLA.");
}
}
for (Node* node : sorted_nodes) {
if (*debug_options_.fuel <= 0) {
VLOG(1)
<< "Hit fuel limit; not marking any remaining ops as clusterable.";
break;
}
TF_ASSIGN_OR_RETURN(
const DeviceType& device_type,
device_info_cache_.GetDeviceTypeFor(node->assigned_device_name()));
VLOG(4) << "Device type for " << node->name() << ": "
<< device_type.type_string();
if (CompilationDisallowedByXlaCompileAttr(node)) {
VLOG(2) << "Not clustering " << node->name()
<< ": disallowed by _XlaCompile attribute";
continue;
}
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(device_type.type(),
®istration)) {
VLOG(2) << "Rejecting " << node->name()
<< ": could not find JIT device for " << device_type.type();
continue;
}
bool is_xla_compile_attr_true =
GetNodeOrFuncAttr(node, flib_def_, kXlaCompileAttr) ||
(global_jit_level_ != OptimizerOptions::OFF &&
GetNodeOrFuncAttr(node, flib_def_, kXlaMustCompileAttr));
auto policy = registration->autoclustering_policy;
if (!ShouldCompile(is_xla_compile_attr_true, device_type, policy)) {
continue;
}
RecursiveCompilabilityChecker::OperationFilter filter =
CreateOperationFilter(*registration);
filter.require_always_compilable = true;
filter.allow_string_consts = false;
filter.allow_collective_reduce_v2 = false;
filter.allow_unique_op = false;
filter.allow_where_op = allow_where_op;
RecursiveCompilabilityChecker checker(
filter, DeviceType{registration->compilation_device_name});
if (!checker.IsCompilableNode(*node, lib_runtime)) {
continue;
}
if (node->type_string() == "Const") {
const AttrValue* attr = node->attrs().Find("dtype");
if (attr != nullptr && attr->type() == DT_STRING) {
continue;
}
}
if (!allowlist.empty() && !allowlist.contains(node->def().op())) {
VLOG(1) << "Rejecting TF operation " << node->def().op()
<< " as it is not listed in --tf_xla_ops_to_cluster.";
continue;
}
if (compile_time_const_nodes[node->id()]) {
const OpDef* op_def;
TF_RETURN_IF_ERROR(
graph_->op_registry()->LookUpOpDef(node->type_string(), &op_def));
if (op_def->is_stateful()) {
const XlaResourceOpInfo* op_info =
GetResourceOpInfoForOp(node->type_string());
bool is_tensor_array_or_stack_op =
op_info && op_info->resource_kind() != XlaResourceKind::kVariable;
if (!is_tensor_array_or_stack_op) {
VLOG(2) << "Isolating " << node->name()
<< ": must-be-constant stateful op";
continue;
}
}
}
TF_ASSIGN_OR_RETURN(bool is_identity_driving_consts_in_loop,
IsIdentityDrivingConstsInLoop(node));
if (is_identity_driving_consts_in_loop) {
VLOG(2) << "Rejecting " << node->name()
<< ": including it can create dependencies between while loop "
"condition and body computations with runtime overhead.";
continue;
}
compilation_candidates_.insert(node);
--(*debug_options_.fuel);
}
VLOG(2) << "compilation_candidates_.size() = "
<< compilation_candidates_.size();
return absl::OkStatus();
}
bool MarkForCompilationPassImpl::CompilationDisallowedByXlaCompileAttr(
Node* node) {
if (debug_options_.ignore_xla_compile_attr) {
return false;
}
bool compile = false;
Status status = GetNodeAttr(node->attrs(), kXlaCompileAttr, &compile);
if (status.ok()) {
if (!compile) {
VLOG(2) << "Rejecting " << node->name() << ": kXlaCompileAttr("
<< kXlaCompileAttr << ") is false.";
}
return !compile;
}
status = flib_def_->GetAttr(*node, kXlaCompileAttr, &compile);
if (status.ok()) {
if (!compile) {
VLOG(2) << "Rejecting " << node->name() << ": kXlaCompileAttr("
<< kXlaCompileAttr << ") on callee is false.";
}
return !compile;
}
return false;
}
bool MarkForCompilationPassImpl::LogNotContractableAndReturnFalse(
Cluster* from, Cluster* to, absl::string_view reason) {
VLOG(3) << EdgeContractionFailureMsg(from, to, reason);
return false;
}
absl::StatusOr<bool> MarkForCompilationPassImpl::TryToContractEdge(
Cluster* from, Cluster* to) {
DCHECK(from->deadness_predicate().has_value() ==
to->deadness_predicate().has_value());
if (from->deadness_predicate() != to->deadness_predicate()) {
VLOG(3) << EdgeContractionFailureMsg(
from, to,
absl::StrCat(
"the two nodes have mismatching deadness: ",
deadness_analysis_->DebugString(*from->deadness_predicate()),
" and ",
deadness_analysis_->DebugString(*to->deadness_predicate())));
return false;
}
TF_ASSIGN_OR_RETURN(bool devices_compatible,
AreDevicesCompatible(*from, *to));
if (!devices_compatible) {
return LogNotContractableAndReturnFalse(
from, to, "the two nodes have incompatible devices");
}
if (from->xla_scope().has_value() && to->xla_scope().has_value() &&
*from->xla_scope() != *to->xla_scope()) {
return LogNotContractableAndReturnFalse(
from, to, "the two nodes have mismatching XLA scopes");
}
if (from->cluster_size() + to->cluster_size() >
debug_options_.max_cluster_size) {
return LogNotContractableAndReturnFalse(
from, to, "the new cluster will be larger than the max cluster size");
}
TF_ASSIGN_OR_RETURN(bool will_introduce_cross_device_dependency,
ClusteringWillIntroduceInterDeviceDependency(*from, *to));
if (will_introduce_cross_device_dependency) {
return LogNotContractableAndReturnFalse(
from, to, "the new cluster will introduce a cross device dependency");
}
if (!debug_options_.ignore_resource_variable_checks) {
for (int resource_var_from : from->resource_var_operation_node_ids()) {
for (int resource_var_to : to->resource_var_operation_node_ids()) {
if (unsafe_resource_deps_.contains(
{resource_var_from, resource_var_to})) {
return LogNotContractableAndReturnFalse(
from, to,
"the new cluster would break resource variable semantics");
}
}
}
}
return MergeClusters(from, to);
}
Status MarkForCompilationPassImpl::Run() {
XlaOpRegistry::RegisterCompilationKernels();
XLA_SCOPED_LOGGING_TIMER_LEVEL("MarkForCompilationPassImpl::Run", 1);
TF_ASSIGN_OR_RETURN(bool initialized, Initialize());
if (!initialized) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(RunEdgeContractionLoop());
TF_RETURN_IF_ERROR(DeclusterNodes());
TF_RETURN_IF_ERROR(CreateClusters());
TF_RETURN_IF_ERROR(DumpDebugInfo());
return absl::OkStatus();
}
void MarkForCompilationPassImpl::DumpPostClusteringGraphs() {
DumpGraphToFile("mark_for_compilation", *graph_, flib_def_);
Graph new_graph(graph_->op_registry());
CopyGraph(*graph_, &new_graph);
for (Node* n : new_graph.nodes()) {
if (std::optional<absl::string_view> cluster_name =
GetXlaClusterForNode(*n)) {
n->set_name(absl::StrCat(*cluster_name, "/", n->name()));
} else if (n->type_string() == "VarHandleOp") {
n->set_name(absl::StrCat("varhandle/", n->name()));
} else {
n->set_name(absl::StrCat("unclustered/", n->name()));
}
}
DumpGraphToFile("mark_for_compilation_annotated", new_graph, flib_def_);
}
string RatioToString(int numerator, int denominator) {
return absl::StrFormat("%d / %d (%.2f%%)", numerator, denominator,
(100.0 * numerator) / denominator);
}
void MarkForCompilationPassImpl::VLogClusteringSummary() {
if (!VLOG_IS_ON(2)) {
return;
}
XlaAutoClusteringSummary auto_clustering_info =
GetXlaAutoClusteringSummary(*graph_);
VLOG(2) << "*** Clustering info for graph of size " << graph_->num_nodes();
VLOG(2) << " Built " << auto_clustering_info.clusters_size()
<< " clusters, size "
<< RatioToString(auto_clustering_info.clustered_node_count(),
graph_->num_nodes());
for (const XlaAutoClusteringSummary::Cluster& cluster :
auto_clustering_info.clusters()) {
absl::string_view cluster_name = cluster.name();
int size = cluster.size();
VLOG(2) << " " << cluster_name << " "
<< RatioToString(size, graph_->num_nodes());
for (const XlaAutoClusteringSummary::OpAndCount& op_count :
cluster.op_histogram()) {
VLOG(3) << " " << op_count.op() << ": " << op_count.count()
<< " instances";
}
}
if (!auto_clustering_info.unclustered_op_histogram().empty()) {
VLOG(2) << " Unclustered nodes: "
<< RatioToString(auto_clustering_info.unclustered_node_count(),
graph_->num_nodes());
for (const XlaAutoClusteringSummary::OpAndCount& op_count :
auto_clustering_info.unclustered_op_histogram()) {
VLOG(3) << " " << op_count.op() << ": " << op_count.count()
<< " instances";
}
}
struct EdgeInfo {
absl::string_view node_name;
std::optional<absl::string_view> cluster_name;
absl::string_view GetClusterName() const {
return cluster_name ? *cluster_name : "[none]";
}
std::pair<absl::string_view, std::optional<absl::string_view>> AsPair()
const {
return {node_name, cluster_name};
}
bool operator<(const EdgeInfo& other) const {
return AsPair() < other.AsPair();
}
};
using EdgeInfoMap = std::map<absl::string_view, std::map<EdgeInfo, int64_t>>;
EdgeInfoMap incoming_edge_infos;
EdgeInfoMap outgoing_edge_infos;
std::set<absl::string_view> cluster_names_to_print;
for (const Edge* e : graph_->edges()) {
const Node* from = e->src();
std::optional<absl::string_view> from_cluster_name =
GetXlaClusterForNode(*from);
const Node* to = e->dst();
std::optional<absl::string_view> to_cluster_name =
GetXlaClusterForNode(*to);
if (to_cluster_name == from_cluster_name) {
continue;
}
if (to_cluster_name) {
incoming_edge_infos[*to_cluster_name]
[EdgeInfo{from->name(), from_cluster_name}]++;
cluster_names_to_print.insert(*to_cluster_name);
}
if (from_cluster_name) {
outgoing_edge_infos[*from_cluster_name][{to->name(), to_cluster_name}]++;
cluster_names_to_print.insert(*from_cluster_name);
}
}
VLOG(4) << "*** Inter-Cluster edges:";
if (cluster_names_to_print.empty()) {
VLOG(4) << " [none]";
}
auto print_edge_info_set_for_cluster = [&](absl::string_view cluster_name,
const EdgeInfoMap& edge_info_map,
absl::string_view desc) {
auto it = edge_info_map.find(cluster_name);
if (it != edge_info_map.end()) {
VLOG(4) << " " << it->second.size() << " " << desc << " edges";
for (const auto& edge_info_count_pair : it->second) {
VLOG(4) << " " << edge_info_count_pair.first.GetClusterName() << " "
<< edge_info_count_pair.first.node_name << " # "
<< edge_info_count_pair.second;
}
} else {
VLOG(4) << " No " << desc << " edges.";
}
};
for (absl::string_view cluster_name : cluster_names_to_print) {
VLOG(4) << " ** Cluster " << cluster_name;
print_edge_info_set_for_cluster(cluster_name, incoming_edge_infos,
"incoming");
print_edge_info_set_for_cluster(cluster_name, outgoing_edge_infos,
"outgoing");
}
}
absl::StatusOr<bool> MarkForCompilationPassImpl::AreDevicesCompatible(
const Cluster& cluster_a, const Cluster& cluster_b) {
DeviceSet devices = cluster_a.devices();
devices.UnionWith(cluster_b.devices());
TF_ASSIGN_OR_RETURN(
std::optional<jit::DeviceId> maybe_chosen_device,
MaybePickDeviceForXla(device_info_cache_, devices,
false));
if (!maybe_chosen_device.has_value()) {
return false;
}
jit::DeviceId chosen_device = *maybe_chosen_device;
auto resource_op_device_ok = [&](std::optional<DeviceId> resource_op_device) {
return !resource_op_device.has_value() ||
*resource_op_device == chosen_device;
};
return resource_op_device_ok(cluster_a.resource_op_device()) &&
resource_op_device_ok(cluster_b.resource_op_device());
}
absl::StatusOr<bool> MarkForCompilationPassImpl::ShouldCompileClusterImpl(
const Cluster& cluster) {
TF_ASSIGN_OR_RETURN(DeviceId chosen_device,
PickDeviceForXla(device_info_cache_, cluster.devices(),
false));
const DeviceType& device_type =
device_info_cache_.GetDeviceTypeFor(chosen_device);
const XlaOpRegistry::DeviceRegistration* registration =
device_info_cache_.GetCompilationDevice(chosen_device);
TF_RET_CHECK(registration)
<< "chosen device = " << device_info_cache_.GetNameFor(chosen_device)
<< "; device type = " << device_type.type() << "; devices ("
<< device_info_cache_.DebugString(cluster.devices());
auto policy = registration->autoclustering_policy;
bool should_compile =
ShouldCompile(cluster.is_xla_compile_attr_true(), device_type, policy);
if (!should_compile && device_type.type_string() == DEVICE_CPU &&
global_jit_level_ > OptimizerOptions::OFF) {
static absl::once_flag once;
absl::call_once(once, [] {
LOG(WARNING) << R"((One-time warning): Not using XLA:CPU for cluster.
If you want XLA:CPU, do one of the following:
- set the TF_XLA_FLAGS to include "--tf_xla_cpu_global_jit", or
- set cpu_global_jit to true on this session's OptimizerOptions, or
- use experimental_jit_scope, or
- use tf.function(jit_compile=True).
To confirm that XLA is active, pass --vmodule=xla_compilation_cache=1 (as a
proper command-line flag, not via TF_XLA_FLAGS).)";
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
if (flags->tf_xla_cpu_global_jit) {
LOG(WARNING)
<< "(Although the tf_xla_cpu_global_jit flag is currently enabled, "
"perhaps it wasn't enabled at process startup?)";
}
});
}
VLOG(3) << (should_compile ? "Compiling" : "Not compiling")
<< " cluster with device "
<< device_info_cache_.GetNameFor(chosen_device);
return should_compile;
}
absl::StatusOr<bool> MarkForCompilationPassImpl::ShouldCompileCluster(
const Cluster& cluster) {
auto it = should_compile_cluster_cache_.find(&cluster);
if (it != should_compile_cluster_cache_.end()) {
return it->second;
}
TF_ASSIGN_OR_RETURN(bool should_compile, ShouldCompileClusterImpl(cluster));
should_compile_cluster_cache_.insert({&cluster, should_compile});
return should_compile;
}
Status MarkForCompilation(
const GraphOptimizationPassOptions& options,
const MarkForCompilationPassImpl::DebugOptions& debug_options) {
Graph* graph = options.graph->get();
FunctionLibraryDefinition* flib_def = options.flib_def;
FixupSourceAndSinkEdges(graph);
for (Node* n : graph->nodes()) {
if (n->attrs().Find(kXlaAlreadyClustered)) {
return absl::OkStatus();
}
if (n->type_string() == "TPUExecute" ||
n->type_string() == "TPUExecuteAndUpdateVariables") {
return absl::OkStatus();
}
}
return MarkForCompilationPassImpl{
debug_options,
graph,
flib_def,
options.session_options != nullptr ? options.session_options->env
: Env::Default(),
GetGlobalJitLevelForGraph(options),
options.session_options->config.graph_options()
.optimizer_options()
.cpu_global_jit(),
options.session_options != nullptr
? options.session_options->config.experimental()
.session_metadata()
.name()
: ""}
.Run();
}
std::atomic<int64_t>* GetPointerToFuel(int64_t initial_value) {
static std::atomic<int64_t>* fuel = [&]() {
std::atomic<int64_t>* fuel = new std::atomic<int64_t>;
*fuel = initial_value;
return fuel;
}();
return fuel;
}
}
Status MarkForCompilationPass::Run(
const GraphOptimizationPassOptions& options) {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
MarkForCompilationPassImpl::DebugOptions debug_options;
debug_options.ignore_deadness_checks =
flags->tf_xla_disable_deadness_safety_checks_for_debugging;
debug_options.ignore_resource_variable_checks =
flags->tf_xla_disable_resource_variable_safety_checks_for_debugging;
debug_options.ignore_xla_compile_attr = false;
debug_options.deterministic_cluster_names =
flags->tf_xla_deterministic_cluster_names;
debug_options.max_cluster_size = flags->tf_xla_max_cluster_size;
debug_options.min_cluster_size = flags->tf_xla_min_cluster_size;
debug_options.fuel = GetPointerToFuel(flags->tf_xla_clustering_fuel);
debug_options.dump_graphs = flags->tf_xla_clustering_debug;
return MarkForCompilation(options, debug_options);
}
Status MarkForCompilationPass::RunForTest(
const GraphOptimizationPassOptions& options, bool disable_deadness_analysis,
bool deterministic_cluster_names) {
MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags();
MarkForCompilationPassImpl::DebugOptions debug_options;
debug_options.ignore_deadness_checks = disable_deadness_analysis;
debug_options.ignore_resource_variable_checks =
flags->tf_xla_disable_resource_variable_safety_checks_for_debugging;
debug_options.ignore_xla_compile_attr = true;
debug_options.deterministic_cluster_names = deterministic_cluster_names;
debug_options.max_cluster_size = flags->tf_xla_max_cluster_size;
debug_options.min_cluster_size = flags->tf_xla_min_cluster_size;
debug_options.fuel = GetPointerToFuel(flags->tf_xla_clustering_fuel);
debug_options.dump_graphs = flags->tf_xla_clustering_debug;
return MarkForCompilation(options, debug_options);
}
absl::flat_hash_map<string, std::vector<string>>* GetAllowlistTable() {
static absl::flat_hash_map<string, std::vector<string>>* result =
new absl::flat_hash_map<string, std::vector<string>>{
{"PW",
{"ComplexAbs", "Angle", "Conj", "Abs", "Acos", "Acosh", "Asin",
"Atan", "Atanh", "Ceil", "Cos", "Cosh", "Sin", "Exp", "Expm1",
"Floor", "IsFinite", "IsInf", "IsNan", "Inv", "Reciprocal", "Log",
"Log1p", "Invert", "LogicalNot", "Ndtri", "Neg", "Rint", "Round",
"Rsqrt", "Sigmoid", "Sign", "Sinh", "Softplus", "Softsign", "Sqrt",
"Square", "Tan", "Tanh", "Real", "Imag", "Erf", "Erfc", "Erfinv",
"Lgamma", "Digamma",
"Add", "AddV2", "Sub", "Mul", "Div", "Atan2", "Complex", "DivNoNan",
"MulNoNan", "FloorDiv", "Xlogy", "Xlog1py", "Xdivy", "FloorMod",
"BitwiseAnd", "BitwiseOr", "BitwiseXor", "LeftShift", "RightShift",
"LogicalAnd", "LogicalOr", "Mod", "Maximum", "Minimum", "RealDiv",
"ReciprocalGrad", "RsqrtGrad", "SqrtGrad", "TruncateDiv",
"TruncateMod", "Equal", "NotEqual", "Greater", "GreaterEqual",
"Less", "LessEqual", "SigmoidGrad", "SoftplusGrad", "SoftsignGrad",
"TanhGrad", "Pow", "SquaredDifference", "ApproximateEqual",
"AddN", "Bitcast", "Cast", "ClipByValue", "Const", "Empty",
"Identity", "IdentityN", "Relu", "Relu6", "ReluGrad", "Relu6Grad",
"LeakyReluGrad", "Elu", "EluGrad", "Selu", "SeluGrad", "Select",
"SelectV2", "Transpose", "ConjugateTranspose",
"_UnaryOpsComposition", "CollectiveReduceV2",
"CollectiveAssignGroupV2",
"PlaceholderWithDefault", "PreventGradient", "StopGradient",
"Snapshot", "_EagerConst"}},
{"RED",
{"All", "Any", "Min", "Max", "Mean", "Prod", "Sum"}},
{"PWRED",
{"ArgMax", "ArgMin", "DiagPart", "Softmax",
"SparseSoftmaxCrossEntropyWithLogits", "LogSoftmax"}},
{"REDUCEWINDOW",
{"ArgMax", "ArgMin", "DiagPart", "Softmax",
"SparseSoftmaxCrossEntropyWithLogits", "LogSoftmax"}},
{"REDUCEWINDOWPW", {"BiasAddGrad", "LRN", "LRNGrad"}},
{"BN",
{"FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3",
"_FusedBatchNormEx", "FusedBatchNormGrad", "FusedBatchNormGradV2",
"FusedBatchNormGradV3"}},
{"Conv", {"_FusedConv2D"}},
{"SORT", {"TopKV2"}},
{"MISC",
{"ApproxTopK", "BroadcastTo", "ExpandDims", "Fill", "NoOp",
"Range", "Rank", "Reshape", "Shape", "ShapeN", "Size", "Squeeze",
"Transpose", "ZerosLike", "OnesLike", "BiasAdd" ,
"BroadcastArgs", "BroadcastGradientArgs", "OneHot", "Concat", "ConcatV2",
"ConcatOffset", "Const", "MirrorPad", "MirrorPadGrad", "Pack", "Pad",
"PadV2", "Reverse", "ReverseV2", "ReverseSequence", "Slice", "Split",
"SplitV", "StridedSlice", "StridedSliceGrad",
"ResourceStridedSliceAssign", "Tile", "Transpose", "InvertPermutation",
"Unpack", "DeviceIndex", "TensorStridedSliceUpdate", "XlaConcatND",
"XlaSplitND",
}}};
return result;
}
namespace testing {
void ResetClusterSequenceNumber() {
ClusterSequenceNumberGenerator::Global().Reset();
}
absl::flat_hash_set<string> GetKnownXLAAllowlistOp() {
absl::flat_hash_set<string> result{
"AdjustContrastv2",
"AdjustHue",
"AdjustSaturation",
"Asinh",
"Assert",
"AssignAddVariableOp",
"AssignSubVariableOp",
"AssignVariableOp",
"AssignVariableXlaConcatND",
"AvgPool",
"AvgPool3D",
"AvgPool3DGrad",
"AvgPoolGrad",
"BatchMatMul",
"BatchMatMulV2",
"BatchMatMulV3",
"BatchToSpace",
"BatchToSpaceND",
"BesselI0e",
"BesselI1e",
"Betainc",
"BiasAddV1",
"Bincount",
"Bucketize",
"Case",
"CheckNumerics",
"Cholesky",
"ControlTrigger",
"Conv",
"Conv2D",
"Conv2DBackpropFilter",
"Conv2DBackpropInput",
"Conv3D",
"Conv3DBackpropFilterV2",
"Conv3DBackpropInputV2",
"Cross",
"Cumprod",
"Cumsum",
"CumulativeLogsumexp",
"DenseBincount",
"DataFormatDimMap",
"DataFormatVecPermute",
"DepthToSpace",
"DepthwiseConv2dNative",
"DepthwiseConv2dNativeBackpropFilter",
"DepthwiseConv2dNativeBackpropInput",
"Dequantize",
"Diag",
"DynamicInfeedEnqueueTupleOp",
"DynamicInfeedDequeueTupleOp",
"DynamicStitch",
"DynamicPartition",
"Einsum",
"EmptyTensorList",
"EnsureShape",
"ExtractImagePatches",
"Igamma",
"IgammaGradA",
"RandomGammaGrad",
"Igammac",
"FFT",
"FFT2D",
"FFT3D",
"FakeParam",
"FakeQuantWithMinMaxArgs",
"FakeQuantWithMinMaxArgsGradient",
"FakeQuantWithMinMaxVars",
"FakeQuantWithMinMaxVarsGradient",
"FakeQuantWithMinMaxVarsPerChannel",
"FakeQuantWithMinMaxVarsPerChannelGradient",
"Gather",
"GatherNd",
"GatherV2",
"HSVToRGB",
"IFFT",
"IFFT2D",
"IFFT3D",
"IRFFT",
"IRFFT2D",
"IRFFT3D",
"If",
"InTopKV2",
"L2Loss",
"LeakyRelu",
"LinSpace",
"ListDiff",
"LogMatrixDeterminant",
"LowerBound",
"MatMul",
"MatrixBandPart",
"MatrixDiag",
"MatrixDiagPart",
"MatrixDiagPartV2",
"MatrixDiagPartV3",
"MatrixDiagV2",
"MatrixDiagV3",
"MatrixInverse",
"MatrixSetDiag",
"MatrixSetDiagV2",
"MatrixSetDiagV3",
"MatrixSolve",
"MatrixTriangularSolve",
"MaxPool",
"MaxPool3D",
"MaxPool3DGrad",
"MaxPool3DGradGrad",
"MaxPoolGrad",
"MaxPoolGradGrad",
"MaxPoolGradGradV2",
"MaxPoolGradV2",
"MaxPoolV2",
"Multinomial",
"NextAfter",
"NonMaxSuppressionV3",
"NonMaxSuppressionV4",
"ParallelDynamicStitch",
"ParameterizedTruncatedNormal",
"PartitionedCall",
"Polygamma",
"PopulationCount",
"Qr",
"QuantizeAndDequantizeV2",
"QuantizeAndDequantizeV3",
"QuantizeAndDequantizeV4",
"RFFT",
"RFFT2D",
"RFFT3D",
"RGBToHSV",
"RandomShuffle",
"RandomStandardNormal",
"RandomUniform",
"RandomUniformInt",
"ReadVariableOp",
"ReadVariableXlaSplitND",
"ResizeBilinear",
"ResizeBilinearGrad",
"ResizeNearestNeighbor",
"ResourceApplyAdaMax",
"ResourceApplyAdadelta",
"ResourceApplyAdagrad",
"ResourceApplyAdagradDA",
"ResourceApplyAdagradV2",
"ResourceApplyAdam",
"ResourceApplyAddSign",
"ResourceApplyCenteredRMSProp",
"ResourceApplyFtrl",
"ResourceApplyFtrlV2",
"ResourceApplyGradientDescent",
"ResourceApplyKerasMomentum",
"ResourceApplyMomentum",
"ResourceApplyPowerSign",
"ResourceApplyProximalAdagrad",
"ResourceApplyProximalGradientDescent",
"ResourceApplyRMSProp",
"ResourceGather",
"ResourceScatterAdd",
"ResourceScatterDiv",
"ResourceScatterMax",
"ResourceScatterMin",
"ResourceScatterMul",
"ResourceScatterNdAdd",
"ResourceScatterNdSub",
"ResourceScatterNdUpdate",
"ResourceScatterSub",
"ResourceScatterUpdate",
"RngReadAndSkip",
"RngSkip",
"Roll",
"ScatterNd",
"SegmentSumV2",
"SegmentProdV2",
"SegmentMinV2",
"SegmentMaxV2",
"SelfAdjointEigV2",
"SoftmaxCrossEntropyWithLogits",
"SpaceToBatch",
"SpaceToBatchND",
"SpaceToDepth",
"SparseMatMul",
"SparseToDense",
"StackCloseV2",
"StackPopV2",
"StackPushV2",
"StackV2",
"StatefulPartitionedCall",
"StatefulStandardNormalV2",
"StatefulTruncatedNormal",
"StatefulUniform",
"StatefulUniformFullInt",
"StatefulUniformInt",
"StatelessCase",
"StatelessIf",
"StatelessMultinomial",
"StatelessParameterizedTruncatedNormal",
"StatelessRandomGetAlg",
"StatelessRandomGetKeyCounter",
"StatelessRandomGetKeyCounterAlg",
"StatelessRandomNormal",
"StatelessRandomNormalV2",
"StatelessRandomUniform",
"StatelessRandomUniformV2",
"StatelessRandomUniformInt",
"StatelessRandomUniformIntV2",
"StatelessRandomUniformFullInt",
"StatelessRandomUniformFullIntV2",
"StatelessTruncatedNormal",
"StatelessTruncatedNormalV2",
"StatelessWhile",
"StochasticCastToInt",
"Svd",
"SymbolicGradient",
"TensorArrayCloseV3",
"TensorArrayConcatV3",
"TensorArrayGatherV3",
"TensorArrayGradV3",
"TensorArrayReadV3",
"TensorArrayScatterV3",
"TensorArraySizeV3",
"TensorArraySplitV3",
"TensorArrayV3",
"TensorArrayWriteV3",
"TensorListConcatV2",
"TensorListElementShape",
"TensorListFromTensor",
"TensorListGather",
"TensorListGetItem",
"TensorListLength",
"TensorListPopBack",
"TensorListPushBack",
"TensorListReserve",
"TensorListSetItem",
"TensorListSplit",
"TensorListStack",
"TensorScatterAdd",
"TensorScatterMax",
"TensorScatterMin",
"TensorScatterSub",
"TensorScatterUpdate",
"ToBool",
"TridiagonalSolve",
"TridiagonalMatMul",
"TruncatedNormal",
"UniformDequantize",
"UniformQuantize",
"UniformQuantizedAdd",
"UniformQuantizedClipByValue",
"UniformQuantizedConvolution",
"UniformQuantizedDot",
"UniformRequantize",
"Unique",
"UniqueV2",
"UpperBound",
"UnsortedSegmentMax",
"UnsortedSegmentMin",
"UnsortedSegmentProd",
"UnsortedSegmentSum",
"VarIsInitializedOp",
"VariableShape",
"Where",
"While",
"XlaAllReduce",
"XlaBroadcastHelper",
"XlaCallModule",
"XlaConcatND",
"XlaConv",
"XlaConvV2",
"XlaCustomCall",
"XlaCustomCallV2",
"XlaDequantize",
"XlaDot",
"XlaDotV2",
"XlaDynamicSlice",
"XlaDynamicUpdateSlice",
"XlaEinsum",
"XlaGather",
"XlaIf",
"XlaKeyValueSort",
"XlaOptimizationBarrier",
"XlaPad",
"XlaRecv",
"XlaReduce",
"XlaReducePrecision",
"XlaReduceScatter",
"XlaReduceWindow",
"XlaRemoveDynamicDimensionSize",
"XlaReplicaId",
"XlaRngBitGenerator",
"XlaScatter",
"XlaSelectAndScatter",
"XlaSelfAdjointEig",
"XlaSend",
"XlaSetBound",
"XlaSetDynamicDimensionSize",
"XlaSharding",
"XlaSort",
"XlaSplitND",
"XlaSpmdFullToShardShape",
"XlaSpmdShardToFullShape",
"XlaSvd",
"XlaVariadicReduce",
"XlaVariadicReduceV2",
"XlaVariadicSort",
"XlaWhile",
"Zeta",
"_Arg",
"_ArrayToList",
"_ListToArray",
"_Retval"};
return result;
}
}
} | #include "tensorflow/compiler/jit/mark_for_compilation_pass.h"
#include <algorithm>
#include <memory>
#include <set>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/mark_for_compilation_pass_test_helper.h"
#include "tensorflow/compiler/jit/node_matchers.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/test.h"
using ::tensorflow::testing::FindNodeByName;
namespace tensorflow {
namespace {
static bool Initialized = [] {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
return true;
}();
REGISTER_OP("UncompilableNullary").Output("o: float");
REGISTER_OP("UncompilableUnary").Input("a: float").Output("o: float");
std::unordered_map<string, string> GetClusters(const Graph& graph) {
std::unordered_map<string, string> ids;
for (Node* node : graph.nodes()) {
string cluster;
if (TryGetNodeAttr(node->attrs(), kXlaClusterAttr, &cluster)) {
CHECK(!cluster.empty());
ids[node->name()] = cluster;
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "Clusters:";
for (const auto& p : ids) {
VLOG(2) << " " << p.first << " -> " << p.second;
}
}
return ids;
}
std::set<string> GetClusterNames(const Graph& graph) {
std::set<string> names;
for (Node* node : graph.nodes()) {
string cluster;
if (TryGetNodeAttr(node->attrs(), kXlaClusterAttr, &cluster)) {
CHECK(!cluster.empty());
names.insert(cluster);
}
}
return names;
}
absl::flat_hash_map<string, std::vector<string>> GetClusterSets(
const Graph& g, std::vector<string>* cluster_names = nullptr) {
CHECK(cluster_names == nullptr || cluster_names->empty());
absl::flat_hash_map<string, std::vector<string>> cluster_sets;
for (const auto& p : GetClusters(g)) {
cluster_sets[p.second].push_back(p.first);
}
for (auto& p : cluster_sets) {
if (cluster_names != nullptr) {
cluster_names->push_back(p.first);
}
std::sort(p.second.begin(), p.second.end());
}
if (cluster_names != nullptr) {
std::sort(cluster_names->begin(), cluster_names->end());
}
return cluster_sets;
}
TEST(XlaCompilationTest, Chains) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a =
ops::SourceOp("UncompilableNullary", builder.opts().WithName("A"));
Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B"));
Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C"));
Node* d =
ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D"));
Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E"));
ops::UnaryOp("Relu", e, builder.opts().WithName("F"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_EQ(4, clusters.size());
EXPECT_EQ(clusters["B"], clusters["C"]);
EXPECT_EQ(clusters["E"], clusters["F"]);
EXPECT_NE(clusters["B"], clusters["E"]);
EXPECT_TRUE(clusters.find("A") == clusters.cend());
EXPECT_TRUE(clusters.find("D") == clusters.cend());
}
TEST(XlaCompilationTest, UncompilableCycles) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b =
ops::UnaryOp("UncompilableUnary", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_TRUE(clusters.empty());
}
TEST(XlaCompilationTest, CompilableCycles) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_EQ(3, clusters.size());
EXPECT_EQ(clusters["A"], clusters["B"]);
EXPECT_EQ(clusters["A"], clusters["C"]);
}
TEST(XlaCompilationTest, StringUnsupported) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp(
"Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_STRING)
.WithAttr("value", Tensor(DT_STRING, TensorShape())));
Node* b = ops::UnaryOp("EncodeBase64", a, builder.opts().WithName("B"));
ops::BinaryOp("StringSplit", a, b, builder.opts().WithName("C"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_TRUE(clusters.empty());
}
TEST(XlaCompilationTest, WhereUnsupported) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_INT32)
.WithAttr("value", Tensor()));
Node* b = ops::UnaryOp("Where", a, builder.opts().WithName("B"));
ops::BinaryOp("Gather", b, a, builder.opts().WithName("C"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_TRUE(!clusters.empty());
}
TEST(XlaCompilationTest, HalfSupported) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Tensor t(DT_HALF, TensorShape());
t.scalar<Eigen::half>()() = static_cast<Eigen::half>(0.0f);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_HALF)
.WithAttr("value", t));
Node* b = ops::UnaryOp("Neg", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_FALSE(clusters.empty());
}
TEST(XlaCompilationTest, PartitionedCallUnsupported) {
FunctionDef compilable = FunctionDefHelper::Define(
"CompilableFn", {"n_a:float", "n_b:float"}, {"n_c:float"}, {},
{{{"n_c"}, "Add", {"n_a", "n_b"}, {{"T", DT_FLOAT}}}});
FunctionDef uncompilable =
FunctionDefHelper::Define("UncompilableFn", {"n_a:float"}, {"n_c:float"},
{}, {{{"n_c"}, "UncompilableUnary", {"n_a"}}});
FunctionDefLibrary flib;
*flib.add_function() = compilable;
*flib.add_function() = uncompilable;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
std::unique_ptr<Graph> graph(new Graph(&flib_def));
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
NameAttrList b_name_attr;
b_name_attr.set_name("CompilableFn");
ops::PartitionedCall b(root.WithOpName("B"), {a, a}, {DT_FLOAT}, b_name_attr);
NameAttrList c_name_attr;
c_name_attr.set_name("UncompilableFn");
ops::PartitionedCall c(root.WithOpName("C"), {a}, {DT_FLOAT}, c_name_attr);
Output d = ops::Add(root.WithOpName("D"), b.output.front(), c.output.front());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def));
auto clusters = GetClusters(*graph);
EXPECT_EQ(2, clusters.size());
EXPECT_FALSE(clusters["B"].empty());
EXPECT_TRUE(clusters["C"].empty());
EXPECT_EQ(clusters["B"], clusters["D"]);
}
TEST(XlaCompilationTest, FunctionCalls) {
FunctionDef compilable = FunctionDefHelper::Define(
"CompilableFn", {"n_a:float", "n_b:float"}, {"n_c:float"}, {},
{{{"n_c"}, "Add", {"n_a", "n_b"}, {{"T", DT_FLOAT}}}});
FunctionDef uncompilable =
FunctionDefHelper::Define("UncompilableFn", {"n_a:float"}, {"n_c:float"},
{}, {{{"n_c"}, "UncompilableUnary", {"n_a"}}});
FunctionDef noinline = compilable;
noinline.mutable_signature()->set_name("NoInlineFn");
AddAttr("_noinline", static_cast<bool>(true), noinline.mutable_attr());
FunctionDefLibrary flib;
*flib.add_function() = compilable;
*flib.add_function() = uncompilable;
*flib.add_function() = noinline;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
std::unique_ptr<Graph> graph(new Graph(&flib_def));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
Node* a =
ops::SourceOp("UncompilableNullary", builder.opts().WithName("A"));
Node* b = ops::BinaryOp("CompilableFn", a, a, builder.opts().WithName("B"));
Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C"));
ops::UnaryOp("UncompilableFn", c, builder.opts().WithName("D"));
ops::BinaryOp("NoInlineFn", c, c, builder.opts().WithName("E"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def));
auto clusters = GetClusters(*graph);
EXPECT_EQ(2, clusters.size());
EXPECT_FALSE(clusters["C"].empty());
EXPECT_EQ(clusters["C"], clusters["E"]);
EXPECT_TRUE(clusters.find("A") == clusters.cend());
EXPECT_TRUE(clusters.find("B") == clusters.cend());
EXPECT_TRUE(clusters.find("D") == clusters.cend());
}
TEST(XlaCompilationTest, CallXlaDeviceFuncWithResourceOp) {
FunctionDef compilable = FunctionDefHelper::Define(
"FnWithResourceOp", {"var:resource", "val:float"}, {"retval:float"}, {},
{{{"assign_op"},
"AssignVariableOp",
{"var", "val"},
{{"dtype", DT_FLOAT}}},
{{"retval"}, "Identity", {"val"}, {{"T", DT_FLOAT}}, {"assign_op"}}});
FunctionDefLibrary flib;
*flib.add_function() = compilable;
FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib);
std::unique_ptr<Graph> graph(new Graph(&flib_def));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def);
Node* resource =
ops::SourceOp("VarHandleOp", builder.opts()
.WithName("varhandle")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("shape", TensorShape({})));
Tensor const_tensor(DT_FLOAT, TensorShape({}));
const_tensor.scalar<float>()() = 42.0f;
Node* value = ops::SourceOp("Const", builder.opts()
.WithName("const")
.WithAttr("value", const_tensor)
.WithAttr("dtype", DT_FLOAT));
Node* call = ops::BinaryOp("FnWithResourceOp", resource, value,
builder.opts().WithName("A"));
Node* tanh0 = ops::UnaryOp("Tanh", call, builder.opts().WithName("tanh0"));
Node* tanh1 = ops::UnaryOp("Tanh", tanh0, builder.opts().WithName("tanh1"));
ops::UnaryOp("Tanh", tanh1, builder.opts().WithName("tanh2"));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
string xla_cpu_device = "/job:worker/replica:0/task:0/device:XLA_CPU:0";
testing::FindNodeByName(graph.get(), "A")
->set_assigned_device_name(xla_cpu_device);
testing::FindNodeByName(graph.get(), "tanh0")
->set_assigned_device_name(xla_cpu_device);
testing::FindNodeByName(graph.get(), "tanh1")
->set_assigned_device_name(xla_cpu_device);
testing::FindNodeByName(graph.get(), "tanh2")
->set_assigned_device_name(xla_cpu_device);
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def));
auto clusters = GetClusters(*graph);
EXPECT_NE(clusters["A"], "");
}
static Status GradForUnaryCwise(FunctionDef* g,
std::vector<FunctionDefHelper::Node> nodes) {
for (auto& n : nodes) {
if (n.attr.empty()) {
n.attr = {{"T", DT_FLOAT}};
}
}
*g = FunctionDefHelper::Define(
{"x: float", "dy: float"},
{"dx: float"},
{},
nodes);
return absl::OkStatus();
}
Status SupportedGrad(const AttrSlice& attrs, FunctionDef* g) {
return GradForUnaryCwise(g, {
{{"y"}, "Tanh", {"x"}},
{{"y2"}, "Square", {"y"}, {}, {"dy"}},
FunctionDefHelper::Const("one", 1.0f),
{{"a"}, "Sub", {"one", "y2"}},
{{"dx"}, "Mul", {"dy", "a"}},
});
}
REGISTER_OP_GRADIENT("Supported", SupportedGrad);
Status UnsupportedGrad(const AttrSlice& attrs, FunctionDef* g) {
return GradForUnaryCwise(g, {
{{"y"}, "Tanh", {"x"}},
{{"y2"}, "UncompilableUnary", {"y"}, {}, {"dy"}},
FunctionDefHelper::Const("one", 1.0f),
{{"a"}, "Sub", {"one", "y2"}},
{{"dx"}, "Mul", {"dy", "a"}},
});
}
REGISTER_OP_GRADIENT("Unsupported", UnsupportedGrad);
TEST(XlaCompilationTest, SymbolicGradients) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a =
ops::SourceOp("UncompilableNullary", builder.opts().WithName("A"));
NodeBuilder b_builder("B", "SymbolicGradient",
builder.opts().op_registry());
NameAttrList b_name_attr;
b_name_attr.set_name("Supported");
b_builder.Attr("f", b_name_attr);
b_builder.Attr("Tin", {DT_FLOAT, DT_FLOAT});
b_builder.Attr("Tout", {DT_FLOAT});
b_builder.Input({a, a});
Node* b = builder.opts().FinalizeBuilder(&b_builder);
Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C"));
NodeBuilder d_builder("D", "SymbolicGradient",
builder.opts().op_registry());
NameAttrList d_name_attr;
d_name_attr.set_name("Unsupported");
d_builder.Attr("f", d_name_attr);
d_builder.Attr("Tin", {DT_FLOAT, DT_FLOAT});
d_builder.Attr("Tout", {DT_FLOAT});
d_builder.Input({c, c});
builder.opts().FinalizeBuilder(&d_builder);
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_EQ(2, clusters.size());
EXPECT_FALSE(clusters["B"].empty());
EXPECT_EQ(clusters["B"], clusters["C"]);
EXPECT_TRUE(clusters.find("A") == clusters.cend());
EXPECT_TRUE(clusters.find("D") == clusters.cend());
}
TEST(XlaCompilationTest, Loops) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT);
auto c = ops::Add(root.WithOpName("C"), a, b);
auto enter = ops::internal::Enter(root, c, "aframe");
auto next_iter = ops::NextIteration(root, enter);
auto exit = ops::internal::Exit(root, next_iter);
auto d = ops::Add(root.WithOpName("D"), c, exit);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_EQ(0, clusters.size());
}
TEST(XlaCompilationTest, CyclesWithAllDifferentScopesGlobalJitOverridden) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor())
.WithAttr(kXlaScopeAttr, "ScopeA"));
Node* b = ops::UnaryOp(
"Relu", a,
builder.opts().WithName("B").WithAttr(kXlaScopeAttr, "ScopeB"));
ops::BinaryOp(
"MatMul", a, b,
builder.opts().WithName("C").WithAttr(kXlaScopeAttr, "ScopeC"));
TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
FunctionDefLibrary flib;
FunctionLibraryDefinition flib_def(graph->op_registry(), flib);
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def));
auto clusters = GetClusters(*graph);
EXPECT_EQ(3, clusters.size());
EXPECT_EQ(clusters["A"], clusters["B"]);
EXPECT_EQ(clusters["A"], clusters["C"]);
}
TEST(XlaCompilationTest, CyclesWithAllDifferentScopes) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor())
.WithAttr(kXlaScopeAttr, "ScopeA"));
Node* b = ops::UnaryOp(
"Relu", a,
builder.opts().WithName("B").WithAttr(kXlaScopeAttr, "ScopeB"));
ops::BinaryOp(
"MatMul", a, b,
builder.opts().WithName("C").WithAttr(kXlaScopeAttr, "ScopeC"));
TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph, MarkForCompilationPassTestHelper::Options().WithNoGlobalJit()));
auto clusters = GetClusters(*graph);
EXPECT_EQ(0, clusters.size());
}
TEST(XlaCompilationTest, CyclesWithSplittingScopes) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor())
.WithAttr(kXlaCompileAttr, true)
.WithAttr(kXlaScopeAttr, "Scope1"));
Node* b = ops::UnaryOp("Relu", a,
builder.opts()
.WithName("B")
.WithAttr(kXlaCompileAttr, true)
.WithAttr(kXlaScopeAttr, "Scope1"));
Node* c = ops::BinaryOp("MatMul", a, b,
builder.opts()
.WithName("C")
.WithAttr(kXlaCompileAttr, true)
.WithAttr(kXlaScopeAttr, "Scope2"));
ops::BinaryOp("Add", b, c,
builder.opts()
.WithName("D")
.WithAttr(kXlaCompileAttr, true)
.WithAttr(kXlaScopeAttr, "Scope2"));
TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph, MarkForCompilationPassTestHelper::Options().WithNoGlobalJit()));
auto clusters = GetClusters(*graph);
EXPECT_EQ(4, clusters.size());
EXPECT_EQ(clusters["A"], clusters["B"]);
EXPECT_NE(clusters["A"], clusters["C"]);
EXPECT_EQ(clusters["C"], clusters["D"]);
}
TEST(XlaCompilationTest, CyclesWithDifferentScopesAndBridge) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor())
.WithAttr(kXlaCompileAttr, true)
.WithAttr(kXlaScopeAttr, "ScopeA"));
Node* b = ops::UnaryOp("Relu", a,
builder.opts()
.WithName("B")
.WithAttr(kXlaCompileAttr, true)
.WithAttr(kXlaScopeAttr, "ScopeB"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C"));
TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph, MarkForCompilationPassTestHelper::Options().WithNoGlobalJit()));
auto clusters = GetClusters(*graph);
EXPECT_EQ(2, clusters.size());
EXPECT_NE(clusters["A"], clusters["B"]);
EXPECT_NE(clusters["B"], clusters["C"]);
}
TEST(XlaCompilationTest, DontClusterNodesWithMismatchingDeadness) {
Scope root = Scope::NewRootScope().ExitOnError();
Output cond_a = ops::Placeholder(root.WithOpName("cond_a"), DT_BOOL);
Output cond_b = ops::Placeholder(root.WithOpName("cond_b"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch switch_a(root.WithOpName("switch_a"), value, cond_a);
ops::Switch switch_b(root.WithOpName("switch_b"), value, cond_b);
Output tanh_a0 = ops::Tanh(root.WithOpName("tan_a0"), switch_a.output_true);
Output tanh_a1 = ops::Tanh(root.WithOpName("tan_a1"), tanh_a0);
Output tanh_b0 = ops::Tanh(root.WithOpName("tan_b0"), switch_b.output_true);
Output tanh_b1 = ops::Tanh(root.WithOpName("tan_b1"), tanh_b0);
Output add = ops::Add(root.WithOpName("add"), tanh_a1, tanh_b1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph,
MarkForCompilationPassTestHelper::Options().WithDeadnessAnalysis()));
auto clusters = GetClusters(*graph);
EXPECT_NE(clusters["tan_a0"], "");
EXPECT_NE(clusters["tan_a1"], "");
EXPECT_NE(clusters["tan_b0"], "");
EXPECT_NE(clusters["tan_b1"], "");
EXPECT_EQ(clusters["tan_a0"], clusters["tan_a1"]);
EXPECT_EQ(clusters["tan_b0"], clusters["tan_b1"]);
EXPECT_NE(clusters["tan_a0"], clusters["tan_b0"]);
}
TEST(XlaCompilationTest, ClusterNodesWithMismatchingInputDeadness) {
Scope root = Scope::NewRootScope().ExitOnError();
Output cond_a = ops::Placeholder(root.WithOpName("cond_a"), DT_BOOL);
Output cond_b = ops::Placeholder(root.WithOpName("cond_b"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch switch_a(root.WithOpName("switch_a"), value, cond_a);
ops::Switch switch_b(root.WithOpName("switch_b"), value, cond_b);
Output add_a = ops::Add(root.WithOpName("add_a"), switch_a.output_true,
switch_b.output_true);
Output add_b = ops::Add(root.WithOpName("add_b"), switch_a.output_true,
switch_b.output_true);
Output add = ops::Add(root.WithOpName("add_c"), add_a, add_b);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph,
MarkForCompilationPassTestHelper::Options().WithDeadnessAnalysis()));
auto clusters = GetClusters(*graph);
EXPECT_NE(clusters["add_a"], "");
EXPECT_NE(clusters["add_b"], "");
EXPECT_NE(clusters["add_c"], "");
EXPECT_EQ(clusters["add_a"], clusters["add_b"]);
EXPECT_EQ(clusters["add_b"], clusters["add_c"]);
}
namespace {
Node* MakeRead(const Scope& scope, const string& id,
Node** var_handle_op = nullptr) {
Output var_handle =
ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({}));
Output read =
ops::ReadVariableOp(scope.WithOpName("Read" + id), var_handle, DT_FLOAT);
if (var_handle_op) {
*var_handle_op = var_handle.node();
}
return read.node();
}
Node* MakeWrite(const Scope& scope, const string& id) {
Output var_handle =
ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({}));
Output value_to_write =
ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f);
ops::AssignVariableOp assign_op(scope.WithOpName("Assignment" + id),
var_handle, value_to_write);
return assign_op.operation.node();
}
Node* MakeNeutral(const Scope& scope, const string& id) {
return ops::Const(scope.WithOpName("Const" + id), 42.0f).node();
}
}
TEST(XlaCompilationTest, ResourcesClusteringAllowed) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(read, write);
FixupSourceAndSinkEdges(root.graph());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
absl::flat_hash_map<string, std::vector<string>> cluster_sets =
GetClusterSets(*graph);
ASSERT_EQ(cluster_sets.size(), 1);
std::vector<string> expected_clustered_nodes = {"AssignmentW", "ReadR",
"ValueToAssignW"};
ASSERT_EQ(cluster_sets.begin()->second, expected_clustered_nodes);
}
TEST(XlaCompilationTest, ResourcesClusteringDisallowed) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* read = MakeRead(root, "R");
Node* write = MakeWrite(root, "W");
root.graph()->AddControlEdge(write, read);
FixupSourceAndSinkEdges(root.graph());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
absl::flat_hash_map<string, std::vector<string>> cluster_sets =
GetClusterSets(*graph);
ASSERT_EQ(cluster_sets.size(), 0);
}
TEST(XlaCompilationTest, ChainOfOps) {
Scope root = Scope::NewRootScope().ExitOnError();
Node* write_0 = MakeWrite(root, "W0");
Node* neutral_0 = MakeNeutral(root, "N0");
Node* read_0 = MakeRead(root, "R0");
Node* write_1 = MakeWrite(root, "W1");
Node* neutral_1 = MakeNeutral(root, "N1");
Node* read_1 = MakeRead(root, "R1");
root.graph()->AddControlEdge(write_0, neutral_0);
root.graph()->AddControlEdge(neutral_0, read_0);
root.graph()->AddControlEdge(read_0, write_1);
root.graph()->AddControlEdge(write_1, neutral_1);
root.graph()->AddControlEdge(neutral_1, read_1);
FixupSourceAndSinkEdges(root.graph());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::vector<string> cluster_names;
absl::flat_hash_map<string, std::vector<string>> cluster_sets =
GetClusterSets(*graph, &cluster_names);
ASSERT_EQ(cluster_sets.size(), 1);
std::vector<string> expected_clustered_nodes_a = {
"AssignmentW1", "ConstN0", "ReadR0", "ValueToAssignW1"};
ASSERT_EQ(cluster_sets[cluster_names[0]], expected_clustered_nodes_a);
}
TEST(XlaCompilationTest, IllegalCycle_UsefulErrorMessage) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
{
auto BuildNoopNode = [](absl::string_view name, Graph* graph) {
NodeDefBuilder builder(name, "NoOp");
NodeDef def;
TF_CHECK_OK(builder.Finalize(&def));
Status status;
Node* node = graph->AddNode(def, &status);
TF_CHECK_OK(status);
return node;
};
Node* a = BuildNoopNode("a", graph.get());
Node* b = BuildNoopNode("b", graph.get());
Node* c = BuildNoopNode("c", graph.get());
graph->AddControlEdge(a, b);
graph->AddControlEdge(b, c);
graph->AddControlEdge(c, a);
}
TF_EXPECT_OK(root.ToGraph(graph.get()));
Status status = MarkForCompilationPassTestHelper::MarkForCompilation(&graph);
EXPECT_FALSE(status.ok());
EXPECT_TRUE(absl::StrContains(status.ToString(),
"Edge from c to a would create a cycle.\n"
"+-> a\n"
"| b\n"
"+-- c\n"));
}
TEST(XlaCompilationTest, Retval) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B"));
ops::UnaryOp("_Retval", b,
builder.opts()
.WithName("R")
.WithAttr("T", DT_FLOAT)
.WithAttr("index", 0));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_TRUE(clusters.empty());
}
TEST(XlaCompilationTest, DontCountIdentityOps) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
{
auto a = ops::_Arg(root.WithOpName("A"), DT_INT32, 0);
auto b = ops::Identity(root.WithOpName("B"), a);
auto c = ops::Identity(root.WithOpName("C"), b);
auto r = ops::_Retval(root.WithOpName("R"), c, 0);
}
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_TRUE(clusters.empty());
}
TEST(XlaCompilationTest, ConstOp) {
{
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto c = ops::Const(root.WithOpName("const"), 0.5f);
c.node()->AddAttr(kXlaCompileAttr, true);
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
EXPECT_EQ(1, GetClusters(*graph).size());
}
{
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope root = Scope::NewRootScope().ExitOnError();
auto c = ops::Const(root.WithOpName("const"), string("string"));
c.node()->AddAttr(kXlaCompileAttr, true);
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
EXPECT_TRUE(GetClusters(*graph).empty());
}
}
TEST(XlaCompilationTest, DontClusterIdentityWithRefInput) {
Scope root = Scope::NewRootScope().ExitOnError();
Output variable = ops::Variable(root.WithOpName("variable"),
PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(root.WithOpName("read"), variable);
Output neg = ops::Negate(root.WithOpName("negate"), read);
Output add = ops::Add(root.WithOpName("add"), neg, neg);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
ASSERT_FALSE(clusters.empty());
string cluster_name = clusters.begin()->second;
std::unordered_map<string, string> expected_clusters(
{{"negate", cluster_name}, {"add", cluster_name}});
EXPECT_EQ(clusters, expected_clusters);
}
TEST(XlaCompilationTest, ClusterIdentityWithNonRefInput) {
Scope root = Scope::NewRootScope().ExitOnError();
Output variable = ops::Variable(root.WithOpName("variable"),
PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(root.WithOpName("read"), variable);
Output neg = ops::Negate(root.WithOpName("negate"), read);
Output identity = ops::Negate(root.WithOpName("identity"), neg);
Output add = ops::Add(root.WithOpName("add"), identity, neg);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
ASSERT_FALSE(clusters.empty());
string cluster_name = clusters.begin()->second;
std::unordered_map<string, string> expected_clusters(
{{"negate", cluster_name},
{"identity", cluster_name},
{"add", cluster_name}});
EXPECT_EQ(clusters, expected_clusters);
}
TEST(XlaCompilationTest, ClusterControlTrigger) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv_a = ops::_Recv(root.WithOpName("recv_a"), DT_BOOL, "tensor_a",
"sender", 0, "receiver");
Output recv_b = ops::_Recv(root.WithOpName("recv_b"), DT_BOOL, "tensor_b",
"sender", 0, "receiver");
Output const_a = ops::Const(root.WithOpName("const_a"), 42);
ops::ControlTrigger ctrl_trigger_a(root.WithOpName("ctrl_trigger_a"));
ops::ControlTrigger ctrl_trigger_b(root.WithOpName("ctrl_trigger_b"));
root.graph()->AddControlEdge(recv_a.node(), ctrl_trigger_a.operation.node());
root.graph()->AddControlEdge(recv_b.node(), ctrl_trigger_a.operation.node());
root.graph()->AddControlEdge(ctrl_trigger_b.operation.node(), const_a.node());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_TRUE(clusters.empty());
}
TEST(XlaCompilationTest, RandomShape) {
Scope root = Scope::NewRootScope().ExitOnError();
Output shape_shape = ops::Const(root.WithOpName("shape_shape"), {2}, {1});
Output shape =
ops::RandomUniformInt(root.WithOpName("shape"), shape_shape,
ops::Const(root.WithOpName("minval"), 1),
ops::Const(root.WithOpName("maxval"), 20));
Output reshape_input =
ops::Placeholder(root.WithOpName("reshape_input"), DT_FLOAT,
ops::Placeholder::Shape(TensorShape({500, 500})));
Output reshape =
ops::Reshape(root.WithOpName("reshape"), reshape_input, shape);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["shape"], "");
}
TEST(XlaCompilationTest, RandomShapeWithFunc) {
Scope root = Scope::DisabledShapeInferenceScope().ExitOnError();
FunctionDefLibrary flib_def;
FunctionDef func = FunctionDefHelper::Create(
"Stateful_func", {},
{"out: int32"},
{},
{FunctionDefHelper::Const("shape_shape", 2),
FunctionDefHelper::Const("minval", 1),
FunctionDefHelper::Const("maxval", 20),
{{"shape"},
"RandomUniformInt",
{"shape_shape:output:0", "minval:output:0", "maxval:output:0"},
{{"Tout", DataType::DT_INT32}, {"T", DataType::DT_INT32}}}},
{{"out", "shape:output:0"}});
func.mutable_signature()->set_is_stateful(true);
*flib_def.add_function() = std::move(func);
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def));
NodeDef call_node;
call_node.set_name("fn_call");
call_node.set_op("Stateful_func");
Status status;
Node* call = root.graph()->AddNode(call_node, &status);
TF_ASSERT_OK(status);
Output shape = Output(call, 0);
Output reshape_input =
ops::Placeholder(root.WithOpName("reshape_input"), DT_FLOAT,
ops::Placeholder::Shape(TensorShape({500, 500})));
Output reshape =
ops::Reshape(root.WithOpName("reshape"), reshape_input, shape);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
auto fld = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(),
flib_def);
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph, fld.get()));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["fn_call"], "");
}
TEST(XlaCompilationTest, RandomShapeOnXlaDevice) {
absl::string_view xla_gpu_device =
"/job:worker/replica:0/task:0/device:XLA_GPU:0";
Scope root = Scope::NewRootScope().ExitOnError();
Output shape_shape =
ops::Const(root.WithOpName("test/shape_shape"), {2}, {1});
Output shape =
ops::RandomUniformInt(root.WithOpName("test/shape_rng"), shape_shape,
ops::Const(root.WithOpName("test/minval"), 1),
ops::Const(root.WithOpName("test/maxval"), 20));
Output reshape_input =
ops::Placeholder(root.WithOpName("test/reshape_input"), DT_FLOAT,
ops::Placeholder::Shape(TensorShape({500, 500})));
Output reshape =
ops::Reshape(root.WithOpName("test/reshape"), reshape_input, shape);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::StartsWith(n->name(), "test/")) {
n->set_assigned_device_name(string(xla_gpu_device));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/shape_rng"], "");
EXPECT_EQ(clusters["test/reshape"], "");
}
TEST(XlaCompilationTest, TensorArrayShapeOnXlaDevice) {
absl::string_view xla_gpu_device =
"/job:worker/replica:0/task:0/device:XLA_GPU:0";
Scope root = Scope::NewRootScope().ExitOnError();
ops::TensorArray tensor_array(root.WithOpName("test/tensor_array"), 1,
DT_INT32);
Output zero = ops::Const(root.WithOpName("test/zero"), 0);
ops::TensorArrayWrite tensor_array_write(
root.WithOpName("test/write"), tensor_array.handle, zero,
ops::Const(root.WithOpName("test/forty_two"), 42.0f), tensor_array.flow);
Output tensor_array_read =
ops::TensorArrayRead(root.WithOpName("test/read"), tensor_array.handle,
zero, tensor_array_write.flow_out, DT_INT32);
Output reshape =
ops::Reshape(root.WithOpName("test/reshape"),
ops::Placeholder(root.WithOpName("placeholder"), DT_FLOAT),
tensor_array_read);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::StartsWith(n->name(), "test/")) {
n->set_assigned_device_name(string(xla_gpu_device));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/read"], "");
EXPECT_EQ(clusters["test/read"], clusters["test/reshape"]);
}
TEST(XlaCompilationTest, DontClusterMergingNodes) {
Scope root = Scope::NewRootScope().ExitOnError();
absl::string_view xla_gpu_dev0 =
"/job:worker/replica:0/task:0/device:XLA_GPU:0";
absl::string_view xla_gpu_dev1 =
"/job:worker/replica:0/task:0/device:XLA_GPU:1";
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Output a = ops::Tanh(root.WithOpName("tanh_A_dev0"),
ops::Const(root.WithOpName("A_dev0"), 1.0f, {2, 2}));
Output b = ops::Tanh(root.WithOpName("tanh_B_dev1"),
ops::Const(root.WithOpName("B_dev1"), 1.0f, {2, 2}));
Output matmul0 = ops::MatMul(root.WithOpName("MatMul0_dev0"), a, a);
Output matmul1 = ops::MatMul(root.WithOpName("MatMul1_dev1"), b, b);
Output combined =
ops::MatMul(root.WithOpName("MatMulCombined_dev1"), matmul0, matmul1);
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::EndsWith(n->name(), "dev0")) {
n->set_assigned_device_name(string(xla_gpu_dev0));
} else if (absl::EndsWith(n->name(), "dev1")) {
n->set_assigned_device_name(string(xla_gpu_dev1));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["MatMul0_dev0"], clusters["MatMul1_dev1"]);
EXPECT_NE(clusters["MatMulCombined_dev1"], clusters["MatMul0_dev0"]);
EXPECT_NE(clusters["MatMulCombined_dev1"], clusters["MatMul1_dev1"]);
EXPECT_EQ(clusters["A_dev0"], clusters["MatMul0_dev0"]);
EXPECT_EQ(clusters["B_dev1"], clusters["MatMul1_dev1"]);
}
TEST(XlaCompilationTest, DontClusterMergingNodesOnCPU) {
Scope root = Scope::NewRootScope().ExitOnError();
absl::string_view xla_gpu_dev0 = "/job:worker/replica:0/task:0/device:GPU:0";
absl::string_view xla_gpu_dev1 = "/job:worker/replica:0/task:0/device:GPU:1";
absl::string_view xla_cpu_dev0 = "/job:worker/replica:0/task:0/device:CPU:0";
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Output a = ops::Tanh(root.WithOpName("tanh_A_dev0"),
ops::Const(root.WithOpName("A_dev0"), 1.0f, {2, 2}));
Output b = ops::Tanh(root.WithOpName("tanh_B_dev1"),
ops::Const(root.WithOpName("B_dev1"), 1.0f, {2, 2}));
Output matmul0 = ops::MatMul(root.WithOpName("MatMul0_dev0"), a, a);
Output matmul1 = ops::MatMul(root.WithOpName("MatMul1_dev1"), b, b);
Output combined =
ops::MatMul(root.WithOpName("MatMulCombined_cpu"), matmul0, matmul1);
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::EndsWith(n->name(), "cpu")) {
n->set_assigned_device_name(string(xla_cpu_dev0));
} else if (absl::EndsWith(n->name(), "dev0")) {
n->set_assigned_device_name(string(xla_gpu_dev0));
} else if (absl::EndsWith(n->name(), "dev1")) {
n->set_assigned_device_name(string(xla_gpu_dev1));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["MatMul0_dev0"], clusters["MatMul1_dev1"]);
EXPECT_NE(clusters["MatMulCombined_cpu"], clusters["MatMul0_dev0"]);
EXPECT_NE(clusters["MatMulCombined_cpu"], clusters["MatMul1_dev1"]);
EXPECT_EQ(clusters["A_dev0"], clusters["MatMul0_dev0"]);
EXPECT_EQ(clusters["B_dev1"], clusters["MatMul1_dev1"]);
}
TEST(XlaCompilationTest, NOT_DontClusterSpreadingNodes) {
Scope root = Scope::NewRootScope().ExitOnError();
absl::string_view xla_gpu_dev0 =
"/job:worker/replica:0/task:0/device:XLA_GPU:0";
absl::string_view xla_gpu_dev1 =
"/job:worker/replica:0/task:0/device:XLA_GPU:1";
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Output a = ops::Const(root.WithOpName("A_dev0"), 1.0f, {2, 2});
Output matmul_source =
ops::MatMul(root.WithOpName("MatMulSource_dev0"), a, a);
Output matmul0 = ops::MatMul(root.WithOpName("MatMul0_dev0"), matmul_source,
matmul_source);
Output matmul1 = ops::MatMul(root.WithOpName("MatMul1_dev1"), matmul_source,
matmul_source);
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::EndsWith(n->name(), "dev0")) {
n->set_assigned_device_name(string(xla_gpu_dev0));
} else if (absl::EndsWith(n->name(), "dev1")) {
n->set_assigned_device_name(string(xla_gpu_dev1));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["A_dev0"], clusters["MatMulSource_dev0"]);
EXPECT_NE(clusters["MatMul0_dev0"], clusters["MatMul1_dev1"]);
EXPECT_NE(clusters["MatMulSource_dev0"], clusters["MatMul1_dev1"]);
EXPECT_EQ(clusters["MatMulSource_dev0"], clusters["MatMul0_dev0"]);
}
TEST(XlaCompilationTest, ClusterStatefulRandomOpOnXlaDevice) {
absl::string_view xla_cpu_device =
"/job:worker/replica:0/task:0/device:XLA_CPU:0";
Scope root = Scope::NewRootScope().ExitOnError();
Output shape = ops::Const(root.WithOpName("test/shape_shape"), {200, 200});
Output a = ops::RandomUniform(root.WithOpName("test/a"), shape, DT_FLOAT);
Output b = ops::RandomUniform(root.WithOpName("test/b"), shape, DT_FLOAT);
Output c = ops::Add(root.WithOpName("test/c"), a, b);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::StartsWith(n->name(), "test/")) {
n->set_assigned_device_name(string(xla_cpu_device));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/a"], "");
EXPECT_NE(clusters["test/b"], "");
EXPECT_NE(clusters["test/c"], "");
}
TEST(XlaCompilationTest, DontAutoClusterStatefulRandomOp) {
Scope root = Scope::NewRootScope().ExitOnError();
Output shape = ops::Const(root.WithOpName("test/shape_shape"), {200, 200});
Output a = ops::RandomUniform(root.WithOpName("test/a"), shape, DT_FLOAT);
Output b = ops::RandomUniform(root.WithOpName("test/b"), shape, DT_FLOAT);
Output c = ops::Add(root.WithOpName("test/c"), a, b);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/a"], "");
EXPECT_EQ(clusters["test/b"], "");
}
TEST(XlaCompilationTest, ClusterDummyOpsOnXlaDevice) {
absl::string_view xla_cpu_device =
"/job:worker/replica:0/task:0/device:XLA_CPU:0";
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output check =
ops::CheckNumerics(root.WithOpName("test/check"), a, "test/check");
Output ge = ops::GreaterEqual(root.WithOpName("test/greaterequal"), check, b);
Operation assert = ops::Assert(root.WithOpName("test/assert"), ge, {a, b});
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
for (Node* n : graph->nodes()) {
if (absl::StartsWith(n->name(), "test/")) {
n->set_assigned_device_name(string(xla_cpu_device));
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/check"], "");
EXPECT_NE(clusters["test/greaterequal"], "");
EXPECT_NE(clusters["test/assert"], "");
}
TEST(XlaCompilationTest, DontAutoClusterDummyOps) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output check =
ops::CheckNumerics(root.WithOpName("test/check"), a, "test/check");
Output ge = ops::GreaterEqual(root.WithOpName("test/greaterequal"), check, b);
Operation assert = ops::Assert(root.WithOpName("test/assert"), ge, {a, b});
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/assert"], "");
EXPECT_EQ(clusters["test/check"], "");
}
TEST(XlaCompilationTest, DontAutoClusterOpsProducingVariant) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_INT64);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_INT64);
Output cast_a = ops::Cast(root.WithOpName("test/cast_a"), a, DT_INT32);
Output cast_b = ops::Cast(root.WithOpName("test/cast_b"), b, DT_INT32);
Output tensor_list_reserve = ops::TensorListReserve(
root.WithOpName("test/tensor_list_reserve"), cast_a, cast_b, DT_FLOAT);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/tensor_list_reserve"], "");
}
TEST(XlaCompilationTest, DontAutoClusterOpsConsumingVariant) {
Scope root = Scope::NewRootScope().ExitOnError();
Output dummy_input =
ops::Placeholder(root.WithOpName("test/dummy_input"), DT_INT64);
Output variant_input =
ops::Placeholder(root.WithOpName("test/variant_input"), DT_VARIANT);
Output dummy_cast =
ops::Cast(root.WithOpName("test/dummy_cast"), dummy_input, DT_INT32);
Output tensor_list_element_shape = ops::TensorListElementShape(
root.WithOpName("test/tensor_list_element_shape"), variant_input,
DT_INT32);
root.graph()->AddControlEdge(dummy_cast.node(),
tensor_list_element_shape.node());
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/tensor_list_element_shape"], "");
}
TEST(XlaCompilationTest, ClusterOpsProducingVariantIfOnXlaDevice) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_INT64);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_INT64);
Output cast_a = ops::Cast(root.WithOpName("test/cast_a"), a, DT_INT32);
Output cast_b = ops::Cast(root.WithOpName("test/cast_b"), b, DT_INT32);
Output tensor_list_reserve = ops::TensorListReserve(
root.WithOpName("test/tensor_list_reserve"), cast_a, cast_b, DT_FLOAT);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
string xla_cpu_device = "/job:worker/replica:0/task:0/device:XLA_CPU:0";
for (Node* n : graph->nodes()) {
if (absl::StartsWith(n->name(), "test/")) {
n->set_assigned_device_name(xla_cpu_device);
}
}
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/tensor_list_reserve"], "");
}
const char* kCPU0 = "/job:worker/replica:0/task:0/device:CPU:0";
const char* kGPU0 = "/job:worker/replica:0/task:0/device:GPU:0";
const char* kXLA_GPU0 = "/job:worker/replica:0/task:0/device:XLA_GPU:0";
const char* kGPU1 = "/job:worker/replica:0/task:0/device:GPU:1";
TEST(XlaCompilationTest, CreateCombinedCpuGpuClusters) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::Add(root.WithOpName("test/x"), a, b);
Output y = ops::MatMul(root.WithOpName("test/y"), a, b);
Output z = ops::Add(root.WithOpName("test/z"), x, y);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/x")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/y")->set_assigned_device_name(kCPU0);
FindNodeByName(graph.get(), "test/z")->set_assigned_device_name(kGPU0);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/x"], "");
EXPECT_EQ(clusters["test/x"], clusters["test/y"]);
EXPECT_EQ(clusters["test/y"], clusters["test/z"]);
}
TEST(XlaCompilationTest, DontCreateGpu0AndGpu1Clusters) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::Add(root.WithOpName("test/x"), a, b);
Output y = ops::Add(root.WithOpName("test/y"), x, x);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/x")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/y")->set_assigned_device_name(kGPU1);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/x"], "");
EXPECT_EQ(clusters["test/y"], "");
}
TEST(XlaCompilationTest, DontCreateCombinedCpuUnknownClusters) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::Add(root.WithOpName("test/x"), a, b);
Output y = ops::Add(root.WithOpName("test/y"), x, x);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/x")->set_assigned_device_name(kCPU0);
FindNodeByName(graph.get(), "test/y")->set_assigned_device_name(kXLA_GPU0);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/x"], "");
EXPECT_EQ(clusters["test/y"], "");
}
TEST(XlaCompilationTest, ClusterResourceOpsWhenSafe) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Node* var_handle;
Node* resource_read = MakeRead(root, "read", &var_handle);
Output b = ops::Add(root.WithOpName("test/b"), Output(resource_read, 0), a);
string resource_read_name = resource_read->name();
string var_handle_name = var_handle->name();
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/b")->set_assigned_device_name(kCPU0);
FindNodeByName(graph.get(), resource_read_name)
->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), var_handle_name)->set_assigned_device_name(kGPU0);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/b"], "");
EXPECT_EQ(clusters["test/b"], clusters[resource_read_name]);
}
TEST(XlaCompilationTest, DontClusterResourceOpsWhenUnsafe) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Node* var_handle;
Node* resource_read = MakeRead(root, "read", &var_handle);
Output b = ops::Add(root.WithOpName("test/b"), Output(resource_read, 0), a);
string resource_read_name = resource_read->name();
string var_handle_name = var_handle->name();
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/b")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), resource_read_name)
->set_assigned_device_name(kCPU0);
FindNodeByName(graph.get(), var_handle_name)->set_assigned_device_name(kCPU0);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/b"], "");
EXPECT_EQ(clusters[resource_read_name], "");
}
TEST(XlaCompilationTest, DontClusterNodesWithScopedAllocatorAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::Add(root.WithOpName("test/x"), a, b);
Output y = ops::MatMul(root.WithOpName("test/y"), a, b);
Output z = ops::Add(root.WithOpName("test/z"), x, y);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/x")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/y")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/z")->set_assigned_device_name(kGPU0);
std::vector<int> scoped_allocator_value;
scoped_allocator_value.push_back(0);
scoped_allocator_value.push_back(155);
FindNodeByName(graph.get(), "test/z")
->AddAttr("_scoped_allocator", scoped_allocator_value);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/z"], "");
}
TEST(XlaCompilationTest, DontClusterNodesWithForwardFromAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::Add(root.WithOpName("test/x"), a, b);
Output y = ops::MatMul(root.WithOpName("test/y"), a, b);
Output z = ops::Add(root.WithOpName("test/z"), x, y);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/x")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/y")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/z")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/z")->AddAttr("_forward_from", 0);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["test/z"], "");
}
TEST(XlaCompilationTest, ClusterShapeConsumerWithProducer) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::MatMul(root.WithOpName("test/x"), a, b);
Output y = ops::Size(root.WithOpName("test/y"), x);
Output z = ops::Add(root.WithOpName("test/z"), y, y);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
FindNodeByName(graph.get(), "test/x")->set_assigned_device_name(kGPU0);
FindNodeByName(graph.get(), "test/y")->set_assigned_device_name(kCPU0);
FindNodeByName(graph.get(), "test/z")->set_assigned_device_name(kGPU1);
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/y"], "");
EXPECT_EQ(clusters["test/x"], clusters["test/y"]);
EXPECT_NE(clusters["test/z"], clusters["test/y"]);
}
TEST(XlaCompilationTest, ClusterShapeConsumerWithProducerAndConsumer) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("test/b"), DT_FLOAT);
Output x = ops::MatMul(root.WithOpName("test/x"), a, b);
Output y = ops::Size(root.WithOpName("test/y"), x);
Output z = ops::Add(root.WithOpName("test/z"), y, y);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["test/y"], "");
EXPECT_EQ(clusters["test/y"], clusters["test/x"]);
EXPECT_EQ(clusters["test/y"], clusters["test/z"]);
}
void AddCtrlEdge(const Scope& scope, Operation a, Operation b) {
scope.graph()->AddControlEdge(a.node(), b.node());
}
void AddCtrlEdge(const Scope& scope, Output a, Operation b) {
AddCtrlEdge(scope, a.op(), b);
}
void AddCtrlEdge(const Scope& scope, Operation a, Output b) {
AddCtrlEdge(scope, a, b.op());
}
TEST(XlaCompilationTest, IterationIncrementAndGroupDeps) {
Scope scope = Scope::NewRootScope().ExitOnError();
Output iter =
ops::VarHandleOp(scope.WithOpName("iter"), DT_INT64, TensorShape({}));
Output weights_0 = ops::VarHandleOp(scope.WithOpName("weights_0"), DT_FLOAT,
TensorShape({1000}));
Output weights_1 = ops::VarHandleOp(scope.WithOpName("weights_1"), DT_FLOAT,
TensorShape({1000}));
Output delta = ops::Placeholder(scope.WithOpName("delta"), DT_FLOAT);
ops::AssignAddVariableOp increment_op(
scope.WithOpName("IncrementIteration"), iter,
ops::Const(scope.WithOpName("one"), static_cast<int64_t>(1)));
ops::AssignAddVariableOp weights_0_update_op(
scope.WithOpName("weights_0_update"), weights_0, delta);
ops::AssignAddVariableOp weights_1_update_op(
scope.WithOpName("weights_1_update"), weights_1, delta);
ops::NoOp group_deps(scope.WithOpName("group_deps"));
ops::NoOp some_ctrl_input(scope.WithOpName("some_ctrl_input"));
Output matmul_input =
ops::Placeholder(scope.WithOpName("matmul_input"), DT_FLOAT);
Output matmul_0 =
ops::MatMul(scope.WithOpName("matmul_0"), matmul_input, matmul_input);
Output matmul_1 =
ops::MatMul(scope.WithOpName("matmul_1"), matmul_input, matmul_input);
AddCtrlEdge(scope, increment_op, group_deps);
AddCtrlEdge(scope, weights_0_update_op, increment_op);
AddCtrlEdge(scope, weights_1_update_op, increment_op);
AddCtrlEdge(scope, some_ctrl_input, weights_0_update_op);
AddCtrlEdge(scope, some_ctrl_input, weights_1_update_op);
AddCtrlEdge(scope, matmul_0, group_deps);
AddCtrlEdge(scope, matmul_1, group_deps);
AddCtrlEdge(scope, weights_0_update_op, matmul_0);
AddCtrlEdge(scope, weights_1_update_op, matmul_1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(scope.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["some_ctrl_input"], "");
EXPECT_EQ(clusters["some_ctrl_input"], clusters["weights_0_update"]);
EXPECT_EQ(clusters["some_ctrl_input"], clusters["weights_1_update"]);
EXPECT_EQ(clusters["some_ctrl_input"], clusters["matmul_0"]);
EXPECT_EQ(clusters["some_ctrl_input"], clusters["matmul_0"]);
}
TEST(XlaCompilationTest, DontClusterTheSpecialIdentityDrivingConstsInLoop) {
Scope root = Scope::NewRootScope().ExitOnError();
Output cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
Output loop_cond = ops::LoopCond(root.WithOpName("loop_cond"), cond);
ops::Switch switch_node(root.WithOpName("switch"), value, loop_cond);
Output identity =
ops::Identity(root.WithOpName("identity"), switch_node.output_true);
Output const_node = ops::Const(root.WithOpName("const"), 1.0f);
root.graph()->AddControlEdge(identity.node(), const_node.node());
Output tanh0 = ops::Tanh(root.WithOpName("tanh0"), const_node);
Output tanh1 = ops::Tanh(root.WithOpName("tanh1"), tanh0);
Output add = ops::Add(root.WithOpName("add"), const_node, tanh1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph,
MarkForCompilationPassTestHelper::Options().WithDeadnessAnalysis()));
auto clusters = GetClusters(*graph);
EXPECT_EQ(clusters["identity"], "");
}
TEST(XlaCompilationTest, UnsupportedEnterExitPattern) {
Scope root = Scope::NewRootScope().ExitOnError();
auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
auto enter_0 = ops::internal::Enter(root.WithOpName("enter_a"), a, "frame");
auto exit_0 = ops::internal::Exit(root.WithOpName("exit_a"), enter_0);
auto tanh = ops::Tanh(root.WithOpName("tanh"), exit_0);
auto enter_1 =
ops::internal::Enter(root.WithOpName("enter_1"), tanh, "frame");
auto exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_EXPECT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
auto clusters = GetClusters(*graph);
EXPECT_EQ(0, clusters.size());
}
TEST(XlaCompilationTest, DeterministicClusterNames) {
auto create_graph =
[](absl::string_view output_name) -> std::unique_ptr<Graph> {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Tensor t(DT_FLOAT, TensorShape());
t.scalar<float>()() = 0.0f;
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("A")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", t));
Node* b = ops::UnaryOp("Neg", a, builder.opts().WithName("B"));
ops::BinaryOp("MatMul", a, b, builder.opts().WithName(output_name));
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
return graph;
};
auto cluster_names_match = [](absl::string_view lhs_cluster_name,
absl::string_view rhs_cluster_name) {
std::vector<absl::string_view> lhs_cluster_name_parts =
absl::StrSplit(lhs_cluster_name, '_');
std::vector<absl::string_view> rhs_cluster_name_parts =
absl::StrSplit(rhs_cluster_name, '_');
if (lhs_cluster_name_parts.size() != 3) {
return errors::FailedPrecondition("unexpected lhs cluster name: ",
lhs_cluster_name);
}
if (rhs_cluster_name_parts.size() != 3) {
return errors::FailedPrecondition("unexpected rhs cluster name: ",
rhs_cluster_name);
}
if (lhs_cluster_name_parts[0] != rhs_cluster_name_parts[0] ||
lhs_cluster_name_parts[1] != rhs_cluster_name_parts[1]) {
return errors::FailedPrecondition(
"Cluster names mismatch: lhs: ", lhs_cluster_name,
" rhs: ", rhs_cluster_name);
}
if (lhs_cluster_name_parts[2] == rhs_cluster_name_parts[2]) {
return errors::FailedPrecondition(
"cluster sequence numbers are the same: lhs: ", lhs_cluster_name,
" rhs: ", rhs_cluster_name);
}
return absl::OkStatus();
};
testing::ResetClusterSequenceNumber();
auto options = MarkForCompilationPassTestHelper::Options()
.WithDeterministicClusterNames();
auto graph0 = create_graph("out");
auto graph1 = create_graph("differs");
auto graph2 = create_graph("out");
auto graph3 = create_graph("differs");
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph0, options));
auto clusters0 = GetClusterNames(*graph0);
ASSERT_EQ(clusters0.size(), 1);
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph1, options));
auto clusters1 = GetClusterNames(*graph1);
ASSERT_EQ(clusters1.size(), 1);
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph2, options));
auto clusters2 = GetClusterNames(*graph2);
ASSERT_EQ(clusters2.size(), 1);
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph3, options));
auto clusters3 = GetClusterNames(*graph3);
ASSERT_EQ(clusters3.size(), 1);
TF_EXPECT_OK(cluster_names_match(*clusters0.begin(), *clusters2.begin()));
TF_EXPECT_OK(cluster_names_match(*clusters1.begin(), *clusters3.begin()));
}
TEST(XlaCompilationTest, ClusterSessionName) {
Scope root = Scope::NewRootScope().ExitOnError();
Output variable = ops::Variable(root.WithOpName("variable"),
PartialTensorShape{}, DT_FLOAT);
Output read = ops::Identity(root.WithOpName("read"), variable);
Output neg = ops::Negate(root.WithOpName("negate"), read);
Output add = ops::Add(root.WithOpName("add"), neg, neg);
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(root.ToGraph(graph.get()));
auto options = MarkForCompilationPassTestHelper::Options().WithSessionName(
"test_session_name");
TF_ASSERT_OK(
MarkForCompilationPassTestHelper::MarkForCompilation(&graph, options));
std::unordered_map<string, string> clusters = GetClusters(*graph);
ASSERT_FALSE(clusters.empty());
string cluster_name = clusters.begin()->second;
std::unordered_map<string, string> expected_clusters(
{{"negate", cluster_name}, {"add", cluster_name}});
EXPECT_EQ(clusters, expected_clusters);
EXPECT_THAT(cluster_name, ::testing::StartsWith("test_session_name"));
}
namespace {
Node* MakeStageNode(GraphDefBuilder& builder, string name,
std::initializer_list<DataType> dtypes,
absl::Span<const ops::NodeOut> values) {
auto opts = builder.opts()
.WithName(std::move(name))
.WithAttr("dtypes", std::move(dtypes));
if (opts.HaveError()) {
return nullptr;
}
NodeBuilder node_builder(name, "Stage", opts.op_registry());
node_builder.Input(values);
return opts.FinalizeBuilder(&node_builder);
}
}
TEST(XlaCompilationTest, StagePipelinePreservedByClusterScopingPass) {
auto build_staged_graph = [](std::unique_ptr<Graph>* graph) -> Status {
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0"));
Node* add1 =
ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1"));
Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0"));
ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1"));
MakeStageNode(builder, "stage", {DT_FLOAT}, {relu0});
return GraphDefBuilderToGraph(builder, graph->get());
};
{
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(build_staged_graph(&graph));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(
&graph,
MarkForCompilationPassTestHelper::Options().WithNoClusterScoping()));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_EQ(clusters["add0"], clusters["add1"]);
EXPECT_EQ(clusters["add0"], clusters["relu1"]);
EXPECT_EQ(clusters["relu0"], clusters["add1"]);
EXPECT_EQ(clusters["relu0"], clusters["relu1"]);
}
{
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
TF_ASSERT_OK(build_staged_graph(&graph));
TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph));
std::unordered_map<string, string> clusters = GetClusters(*graph);
EXPECT_NE(clusters["add0"], clusters["add1"]);
EXPECT_NE(clusters["add0"], clusters["relu1"]);
EXPECT_NE(clusters["relu0"], clusters["add1"]);
EXPECT_NE(clusters["relu0"], clusters["relu1"]);
}
}
TEST(XlaCompilationTest, XLALiteAllowlist) {
auto* allowlist_table = tensorflow::GetAllowlistTable();
absl::flat_hash_set<string> hallowlist;
std::vector<string> vall_ops = XlaOpRegistry::GetAllRegisteredOps();
absl::flat_hash_set<string> all_ops(vall_ops.begin(), vall_ops.end());
for (auto pair : *allowlist_table) {
hallowlist.insert(pair.second.begin(), pair.second.end());
for (auto op : pair.second) {
ASSERT_TRUE(all_ops.contains(op));
}
}
absl::flat_hash_set<string> known_not_in_list =
tensorflow::testing::GetKnownXLAAllowlistOp();
std::vector<string> unknow_op;
for (string op : vall_ops) {
if (!hallowlist.contains(op) && !known_not_in_list.contains(op)) {
unknow_op.push_back(op);
}
}
EXPECT_TRUE(unknow_op.empty())
<< "Someone added support for a new TF operations inside XLA. They must "
"be included in the XLALite allowlist or denylist:\n"
<< absl::StrJoin(unknow_op, "\n");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/mark_for_compilation_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/mark_for_compilation_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e71a6411-3f88-4818-900f-d9de1dcb9e52 | cpp | tensorflow/tensorflow | xla_activity_listener | tensorflow/compiler/jit/xla_activity_listener.cc | tensorflow/compiler/jit/xla_activity_listener_test.cc | #include "tensorflow/compiler/jit/xla_activity_listener.h"
#include "absl/synchronization/mutex.h"
#include "tensorflow/compiler/jit/xla_activity.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace {
struct XlaActivityListenerList {
absl::Mutex mutex;
std::vector<std::unique_ptr<XlaActivityListener>> listeners
TF_GUARDED_BY(mutex);
};
void FlushAllListeners();
XlaActivityListenerList* GetXlaActivityListenerList() {
static XlaActivityListenerList* listener_list = new XlaActivityListenerList;
static int unused = std::atexit(FlushAllListeners);
(void)unused;
return listener_list;
}
template <typename FnTy>
Status ForEachListener(FnTy fn) {
XlaActivityListenerList* listener_list = GetXlaActivityListenerList();
absl::ReaderMutexLock reader_lock(&listener_list->mutex);
for (const std::unique_ptr<XlaActivityListener>& listener :
listener_list->listeners) {
TF_RETURN_IF_ERROR(fn(listener.get()));
}
return absl::OkStatus();
}
void FlushAllListeners() {
Status s = ForEachListener([](XlaActivityListener* listener) {
listener->Flush();
return absl::OkStatus();
});
CHECK(s.ok());
}
}
Status BroadcastXlaActivity(
XlaAutoClusteringActivity auto_clustering_activity) {
return ForEachListener([&](XlaActivityListener* listener) {
return listener->Listen(auto_clustering_activity);
});
}
Status BroadcastXlaActivity(
XlaJitCompilationActivity jit_compilation_activity) {
return ForEachListener([&](XlaActivityListener* listener) {
return listener->Listen(jit_compilation_activity);
});
}
Status BroadcastOptimizationRemark(XlaOptimizationRemark optimization_remark) {
VLOG(2) << "OptimizationRemark: " << optimization_remark.DebugString();
return ForEachListener([&](XlaActivityListener* listener) {
return listener->Listen(optimization_remark);
});
}
Status BroadcastOptimizationRemark(
XlaOptimizationRemark::Warning optimization_warning,
string debug_information) {
XlaOptimizationRemark remark;
remark.set_warning(optimization_warning);
remark.set_debug_information(std::move(debug_information));
return BroadcastOptimizationRemark(std::move(remark));
}
void RegisterXlaActivityListener(
std::unique_ptr<XlaActivityListener> listener) {
XlaActivityListenerList* listener_list = GetXlaActivityListenerList();
absl::WriterMutexLock writer_lock(&listener_list->mutex);
listener_list->listeners.push_back(std::move(listener));
}
void XlaActivityListener::Flush() {}
XlaActivityListener::~XlaActivityListener() {}
} | #include "tensorflow/compiler/jit/xla_activity_listener.h"
#include <cstdlib>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/list_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/core/common_runtime/direct_session.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class TestListener : public XlaActivityListener {
public:
Status Listen(
const XlaAutoClusteringActivity& auto_clustering_activity) override {
auto_clustering_activity_ = auto_clustering_activity;
return absl::OkStatus();
}
Status Listen(
const XlaJitCompilationActivity& jit_compilation_activity) override {
jit_compilation_activity_ = jit_compilation_activity;
return absl::OkStatus();
}
Status Listen(const XlaOptimizationRemark& optimization_remark) override {
return absl::OkStatus();
}
~TestListener() override {}
const XlaAutoClusteringActivity& auto_clustering_activity() const {
return auto_clustering_activity_;
}
const XlaJitCompilationActivity& jit_compilation_activity() const {
return jit_compilation_activity_;
}
private:
XlaAutoClusteringActivity auto_clustering_activity_;
XlaJitCompilationActivity jit_compilation_activity_;
};
class XlaActivityListenerTest : public ::testing::Test {
protected:
XlaActivityListenerTest() {
auto listener = std::make_unique<TestListener>();
listener_ = listener.get();
RegisterXlaActivityListener(std::move(listener));
}
TestListener* listener() const { return listener_; }
private:
TestListener* listener_;
};
GraphDef CreateGraphDef() {
Scope root = Scope::NewRootScope().ExitOnError().WithAssignedDevice(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT);
for (int i = 0; i < 5; i++) {
a = ops::MatMul(root.WithOpName(absl::StrCat("matmul_", i)), a, a);
a = ops::Add(root.WithOpName(absl::StrCat("add_", i)), a, a);
}
GraphDef graph_def;
root.graph()->ToGraphDef(&graph_def);
return graph_def;
}
TEST_F(XlaActivityListenerTest, Test) {
GraphDef graph_def = CreateGraphDef();
SessionOptions options;
options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
std::unique_ptr<Session> session(NewSession(options));
TF_ASSERT_OK(session->Create(graph_def));
std::vector<std::string> output_names = {std::string("add_4:0")};
Tensor tensor_2x2(DT_FLOAT, TensorShape({2, 2}));
for (int i = 0; i < 4; i++) {
tensor_2x2.matrix<float>()(i / 2, i % 2) = 5 * i;
}
Tensor tensor_3x3(DT_FLOAT, TensorShape({3, 3}));
for (int i = 0; i < 9; i++) {
tensor_3x3.matrix<float>()(i / 3, i % 3) = 5 * i;
}
std::vector<std::pair<string, Tensor>> inputs_2x2 = {{"A", tensor_2x2}};
std::vector<Tensor> outputs;
TF_ASSERT_OK(session->Run(inputs_2x2, output_names,
{}, &outputs));
XlaAutoClusteringActivity expected_auto_clustering_activity;
protobuf::TextFormat::ParseFromString(
R"(global_jit_level: ON_2
cpu_global_jit_enabled: true
summary {
unclustered_node_count: 4
clustered_node_count: 14
clusters {
name: "cluster_0"
size: 14
op_histogram {
op: "Add"
count: 1
}
op_histogram {
op: "Const"
count: 4
}
op_histogram {
op: "MatMul"
count: 5
}
op_histogram {
op: "Mul"
count: 4
}
}
unclustered_op_histogram {
op: "NoOp"
count: 2
}
unclustered_op_histogram {
op: "_Arg"
count: 1
}
unclustered_op_histogram {
op: "_Retval"
count: 1
}
}
)",
&expected_auto_clustering_activity);
EXPECT_EQ(listener()->auto_clustering_activity().DebugString(),
expected_auto_clustering_activity.DebugString());
EXPECT_EQ(listener()->jit_compilation_activity().cluster_name(), "cluster_0");
EXPECT_EQ(listener()->jit_compilation_activity().compile_count(), 1);
int64_t first_compile_time =
listener()->jit_compilation_activity().compile_time_us();
EXPECT_GT(first_compile_time, 0);
EXPECT_EQ(listener()->jit_compilation_activity().cumulative_compile_time_us(),
first_compile_time);
std::vector<std::pair<string, Tensor>> inputs_3x3 = {{"A", tensor_3x3}};
outputs.clear();
for (int i = 0; i < 3; i++) {
TF_ASSERT_OK(session->Run(inputs_3x3, output_names,
{}, &outputs));
}
EXPECT_EQ(listener()->jit_compilation_activity().cluster_name(), "cluster_0");
EXPECT_EQ(listener()->jit_compilation_activity().compile_count(), 2);
EXPECT_GT(listener()->jit_compilation_activity().compile_time_us(), 0);
EXPECT_EQ(listener()->jit_compilation_activity().cumulative_compile_time_us(),
first_compile_time +
listener()->jit_compilation_activity().compile_time_us());
}
}
}
int main(int argc, char** argv) {
tensorflow::GetMarkForCompilationPassFlags()->tf_xla_cpu_global_jit = true;
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_activity_listener.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_activity_listener_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c34f11c-bc6d-43c9-ba89-f90098acaca7 | cpp | tensorflow/tensorflow | cluster_scoping_pass | tensorflow/compiler/jit/cluster_scoping_pass.cc | tensorflow/compiler/jit/cluster_scoping_pass_test.cc | #include "tensorflow/compiler/jit/cluster_scoping_pass.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
namespace tensorflow {
namespace {
class ClusterScopingPassImpl {
public:
ClusterScopingPassImpl(Graph* graph,
OptimizerOptions::GlobalJitLevel global_jit_level)
: graph_(graph),
global_jit_level_(global_jit_level),
unique_scope_id_(0) {}
Status Run();
private:
Status ScopingForPipelineStages();
size_t GetUniqueScopeId() { return unique_scope_id_++; }
void AddScopeToAllTransitivePredecessors(Node* start);
void AddScopeToAllTransitiveSuccessors(Node* start);
private:
Graph* graph_;
OptimizerOptions::GlobalJitLevel global_jit_level_;
size_t unique_scope_id_;
};
std::optional<string> GetXlaInternalScope(Node* node) {
string scope;
if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) {
return scope;
}
return std::nullopt;
}
void SetXlaInternalScope(Node* node, StringPiece scope) {
node->AddAttr(kXlaInternalScopeAttr, scope);
}
void AddOrAppendXlaInternalScope(Node* node, absl::string_view suffix) {
string updated_scope;
std::optional<string> cur_scope = GetXlaInternalScope(node);
if (cur_scope == std::nullopt) {
updated_scope = std::string(suffix);
} else {
updated_scope = absl::StrCat(cur_scope.value(), "&", suffix);
}
SetXlaInternalScope(node, updated_scope);
}
void ClusterScopingPassImpl::AddScopeToAllTransitivePredecessors(Node* start) {
const string unique_suffix = absl::StrCat("_", GetUniqueScopeId());
std::vector<Node*> starts;
starts.push_back(start);
auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); };
ReverseDFSFrom(*graph_, starts, enter, nullptr,
NodeComparatorName());
}
void ClusterScopingPassImpl::AddScopeToAllTransitiveSuccessors(Node* start) {
const string unique_suffix = absl::StrCat("_", GetUniqueScopeId());
std::vector<Node*> starts;
starts.push_back(start);
auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); };
DFSFrom(*graph_, starts, enter, nullptr,
NodeComparatorName(),
nullptr);
}
Status ClusterScopingPassImpl::ScopingForPipelineStages() {
for (Node* n : graph_->nodes()) {
DCHECK(n);
if (n->type_string() == "Unstage") {
AddScopeToAllTransitiveSuccessors(n);
}
if (n->type_string() == "Stage") {
AddScopeToAllTransitivePredecessors(n);
}
}
return absl::OkStatus();
}
Status ClusterScopingPassImpl::Run() {
if (global_jit_level_ == OptimizerOptions::OFF) {
return absl::OkStatus();
}
return ScopingForPipelineStages();
}
}
Status ClusterScopingPass::Run(const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
return ClusterScopingPassImpl{graph, GetGlobalJitLevelForGraph(options)}
.Run();
}
} | #include "tensorflow/compiler/jit/cluster_scoping_pass.h"
#include "absl/container/flat_hash_map.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
Status ClusterScoping(std::unique_ptr<Graph>* graph) {
FixupSourceAndSinkEdges(graph->get());
GraphOptimizationPassWrapper wrapper;
wrapper.session_options.config.mutable_graph_options()
->mutable_optimizer_options()
->set_global_jit_level(OptimizerOptions::ON_2);
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(graph);
ClusterScopingPass pass;
return pass.Run(opt_options);
}
absl::flat_hash_map<string, string> GetXlaInternalScopes(const Graph& graph) {
absl::flat_hash_map<string, string> scopes;
for (Node* node : graph.nodes()) {
string scope;
if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) {
scopes[node->name()] = scope;
}
}
if (VLOG_IS_ON(2)) {
VLOG(2) << "_XlaInternalScopes:";
for (const auto& p : scopes) {
VLOG(2) << " " << p.first << " -> " << p.second;
}
}
return scopes;
}
Node* BuildStageNode(GraphDefBuilder& builder, string name,
std::initializer_list<DataType> dtypes,
absl::Span<const ops::NodeOut> values) {
auto opts = builder.opts()
.WithName(std::move(name))
.WithAttr("dtypes", std::move(dtypes));
if (opts.HaveError()) {
return nullptr;
}
NodeBuilder node_builder(name, "Stage", opts.op_registry());
node_builder.Input(values);
return opts.FinalizeBuilder(&node_builder);
}
TEST(XlaCompilationTest, StagePipelinePreserved) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0"));
Node* add1 =
ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1"));
Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0"));
ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_EQ(scopes["add0"], scopes["relu0"]);
EXPECT_EQ(scopes["add1"], scopes["relu1"]);
}
TEST(XlaCompilationTest, StagePipelinePreservedAndInitialScopesRespected) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* a = ops::SourceOp("Const", builder.opts()
.WithName("a")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* b = ops::SourceOp("Const", builder.opts()
.WithName("b")
.WithAttr("dtype", DT_FLOAT)
.WithAttr("value", Tensor()));
Node* unstage = ops::SourceOp(
"Unstage",
builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT}));
Node* add0 = ops::BinaryOp("Add", a, b,
builder.opts().WithName("add0").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* add1 = ops::BinaryOp("Add", unstage, b,
builder.opts().WithName("add1").WithAttr(
kXlaInternalScopeAttr, "ClusterA"));
Node* relu0 = ops::UnaryOp("Relu", add0,
builder.opts().WithName("relu0").WithAttr(
kXlaInternalScopeAttr, "ClusterB"));
ops::UnaryOp("Relu", add1,
builder.opts().WithName("relu1").WithAttr(
kXlaInternalScopeAttr, "ClusterD"));
BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0});
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(ClusterScoping(&graph));
auto scopes = GetXlaInternalScopes(*graph);
EXPECT_NE(scopes["add0"], scopes["add1"]);
EXPECT_NE(scopes["add0"], scopes["relu0"]);
EXPECT_NE(scopes["add1"], scopes["relu1"]);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/cluster_scoping_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/cluster_scoping_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e0baf845-6b83-47b0-abf7-41d684998609 | cpp | tensorflow/tensorflow | pjrt_tensor_buffer_util | tensorflow/compiler/jit/pjrt_tensor_buffer_util.cc | tensorflow/compiler/jit/pjrt_tensor_buffer_util_test.cc | #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include <cstddef>
#include <memory>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/compiler/jit/pjrt_tensor_buffer.h"
#include "xla/pjrt/pjrt_client.h"
#include "tensorflow/core/common_runtime/dma_helper.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
static size_t GetTensorSize(const TensorShape& shape, const DataType dtype) {
return shape.num_elements() * DataTypeSize(dtype);
}
absl::StatusOr<Tensor> MakeTensorFromPjRtBuffer(
const DataType dtype, const TensorShape& shape,
std::unique_ptr<xla::PjRtBuffer> pjrt_buffer) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtBuffer::ExternalReference> ref,
pjrt_buffer->AcquireExternalReference());
auto* tensor_buffer =
new PjRtTensorBuffer(ref->OpaqueDeviceMemoryDataPointer(),
GetTensorSize(shape, dtype), std::move(pjrt_buffer));
Tensor result(dtype, shape, tensor_buffer);
tensor_buffer->Unref();
return result;
}
static bool ShouldReuseTensor(void* opaque_device_memory,
const size_t expected_size,
const Tensor* existing_tensor) {
const PjRtTensorBuffer* input_pjrt_tensor_buffer =
dynamic_cast<const PjRtTensorBuffer*>(DMAHelper::buffer(existing_tensor));
if (input_pjrt_tensor_buffer != nullptr) {
return false;
}
const size_t current_size =
GetTensorSize(existing_tensor->shape(), existing_tensor->dtype());
return existing_tensor->tensor_data().data() == opaque_device_memory &&
current_size == expected_size;
}
absl::Status PjRtTensorBufferUtil::UpdateOrMakeTensorWithPjRtBuffer(
const DataType dtype, const TensorShape& shape,
std::unique_ptr<xla::PjRtBuffer> pjrt_buffer, Tensor* output_tensor) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtBuffer::ExternalReference> ref,
pjrt_buffer->AcquireExternalReference());
const size_t expected_size = GetTensorSize(shape, dtype);
void* opaque_device_memory = ref->OpaqueDeviceMemoryDataPointer();
auto* tensor_buffer = new PjRtTensorBuffer(
opaque_device_memory, expected_size, std::move(pjrt_buffer));
if (ShouldReuseTensor(opaque_device_memory, expected_size, output_tensor)) {
output_tensor->buf_ = tensor_buffer;
return absl::OkStatus();
}
Tensor result(dtype, shape, tensor_buffer);
tensor_buffer->Unref();
*output_tensor = result;
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/test_util.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
TEST(PjRtTensorBufferUtilTest, MakeTensorFromPjRtBuffer) {
DeviceSetup device_setup;
device_setup.AddDevicesAndSetUp({DEVICE_GPU});
Device* device = device_setup.GetDevice(DEVICE_GPU);
std::vector<int64_t> dimensions = {2, 3};
Tensor dest_cpu_tensor(cpu_allocator(), tensorflow::DT_INT32,
tensorflow::TensorShape(dimensions));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetPjRtClient(DEVICE_GPU));
std::vector<int32_t> data{1, 2, 3, 4, 5, 6};
xla::Shape xla_shape = xla::ShapeUtil::MakeShape(xla::S32, dimensions);
TF_ASSERT_OK_AND_ASSIGN(
auto pjrt_buffer,
pjrt_client->BufferFromHostBuffer(
data.data(), xla_shape.element_type(), xla_shape.dimensions(),
std::nullopt,
xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall,
nullptr, pjrt_client->addressable_devices()[0]));
TF_ASSERT_OK_AND_ASSIGN(
Tensor tensor, MakeTensorFromPjRtBuffer(DT_INT32, TensorShape(dimensions),
std::move(pjrt_buffer)));
auto s = device->tensorflow_accelerator_device_info()
->pjrt_context->CopyDeviceTensorToCPUSync(&tensor, "", device,
&dest_cpu_tensor);
for (int i = 0; i < tensor.NumElements(); ++i) {
EXPECT_EQ(dest_cpu_tensor.flat<int32_t>().data()[i], data[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/pjrt_tensor_buffer_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/pjrt_tensor_buffer_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b7d6da6e-3bdf-40bb-8cec-56ffbc861a17 | cpp | tensorflow/tensorflow | partially_decluster_pass | tensorflow/compiler/jit/partially_decluster_pass.cc | tensorflow/compiler/jit/partially_decluster_pass_test.cc | #include "tensorflow/compiler/jit/partially_decluster_pass.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/const_analysis.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
namespace {
bool NotBackedge(const Edge& edge) { return !edge.src()->IsNextIteration(); }
namespace reduce_device_to_host_copies {
Status FindNodesToDecluster(const Graph& graph,
absl::flat_hash_set<Node*>* result,
absl::Span<Node* const> post_order) {
MemoryTypeVector input_mtypes, output_mtypes;
for (Node* n : post_order) {
std::optional<absl::string_view> from_cluster = GetXlaClusterForNode(*n);
if (!from_cluster) {
continue;
}
if (IsShapeConsumerOp(*n)) {
continue;
}
if (HasResourceInputOrOutput(*n)) {
continue;
}
DeviceType device_type("");
TF_RETURN_IF_ERROR(
DeviceNameToDeviceType(n->assigned_device_name(), &device_type));
TF_RETURN_IF_ERROR(MemoryTypesForNode(graph.op_registry(), device_type,
n->def(), &input_mtypes,
&output_mtypes));
for (const Edge* e : n->out_edges()) {
Node* dst = e->dst();
if (e->IsControlEdge()) {
continue;
}
bool edge_incurs_extra_device_to_host_copy;
if (output_mtypes[e->src_output()] == DEVICE_MEMORY) {
edge_incurs_extra_device_to_host_copy = false;
} else {
MemoryTypeVector dst_input_mtypes, dst_output_mtypes;
DeviceType dst_device_type("");
TF_RETURN_IF_ERROR(DeviceNameToDeviceType(dst->assigned_device_name(),
&dst_device_type));
TF_RETURN_IF_ERROR(MemoryTypesForNode(graph.op_registry(), device_type,
dst->def(), &dst_input_mtypes,
&dst_output_mtypes));
edge_incurs_extra_device_to_host_copy =
dst_input_mtypes[e->dst_input()] == HOST_MEMORY;
}
if (!edge_incurs_extra_device_to_host_copy) {
continue;
}
std::optional<absl::string_view> dst_cluster =
result->count(dst) ? std::nullopt : GetXlaClusterForNode(*dst);
if (from_cluster != dst_cluster) {
CHECK(result->insert(n).second);
break;
}
}
}
return absl::OkStatus();
}
Status PartiallyDeclusterNode(Graph* graph, Node* n) {
absl::string_view cluster_name = *GetXlaClusterForNode(*n);
absl::InlinedVector<const Edge*, 6> out_edges_to_clone;
for (const Edge* out_edge : n->out_edges()) {
if (out_edge->IsControlEdge()) {
continue;
}
Node* dst = out_edge->dst();
std::optional<absl::string_view> dst_cluster_name =
GetXlaClusterForNode(*dst);
if (dst_cluster_name != cluster_name) {
out_edges_to_clone.push_back(out_edge);
}
}
CHECK(!out_edges_to_clone.empty()) << n->DebugString();
NodeDef ndef = n->def();
ndef.set_name(absl::StrCat(n->name(), "/declustered"));
MergeDebugInfo(NodeDebugInfo(n->def()), &ndef);
RemoveFromXlaCluster(&ndef);
TF_ASSIGN_OR_RETURN(Node * cloned_node, graph->AddNode(ndef));
cloned_node->set_assigned_device_name(n->assigned_device_name());
for (const Edge* in_edge : n->in_edges()) {
graph->AddEdge(in_edge->src(), in_edge->src_output(), cloned_node,
in_edge->dst_input());
}
for (const Edge* out_edge_to_clone : out_edges_to_clone) {
graph->AddEdge(cloned_node, out_edge_to_clone->src_output(),
out_edge_to_clone->dst(), out_edge_to_clone->dst_input());
graph->RemoveEdge(out_edge_to_clone);
}
if (n->out_edges().empty()) {
graph->RemoveNode(n);
}
return absl::OkStatus();
}
Status PartiallyDeclusterGraph(Graph* graph) {
std::vector<Node*> post_order;
GetPostOrder(*graph, &post_order, NodeComparatorName(),
NotBackedge);
absl::flat_hash_set<Node*> nodes_to_partially_decluster;
TF_RETURN_IF_ERROR(
FindNodesToDecluster(*graph, &nodes_to_partially_decluster, post_order));
if (VLOG_IS_ON(3)) {
for (Node* n : post_order) {
if (nodes_to_partially_decluster.count(n)) {
VLOG(3) << n->DebugString();
}
}
}
for (Node* n : post_order) {
if (nodes_to_partially_decluster.count(n)) {
TF_RETURN_IF_ERROR(PartiallyDeclusterNode(graph, n));
}
}
post_order.clear();
GetPostOrder(*graph, &post_order, NodeComparatorName(),
NotBackedge);
nodes_to_partially_decluster.clear();
TF_RETURN_IF_ERROR(
FindNodesToDecluster(*graph, &nodes_to_partially_decluster, post_order));
CHECK(nodes_to_partially_decluster.empty());
return absl::OkStatus();
}
}
namespace reduce_recompilation {
bool IsIntraClusterEdge(const Edge& edge) {
std::optional<absl::string_view> src_cluster_name =
GetXlaClusterForNode(*edge.src());
std::optional<absl::string_view> dst_cluster_name =
GetXlaClusterForNode(*edge.dst());
return src_cluster_name.has_value() && src_cluster_name == dst_cluster_name;
}
bool IsMustCompileDevice(const DeviceType& device_type) {
const XlaOpRegistry::DeviceRegistration* registration;
if (XlaOpRegistry::GetCompilationDevice(device_type.type(), ®istration)) {
return registration->autoclustering_policy ==
XlaOpRegistry::AutoclusteringPolicy::kAlways;
}
return false;
}
Status MustCompileNode(const Node* n, bool* must_compile) {
DeviceType device_type("");
TF_RETURN_IF_ERROR(
DeviceNameToDeviceType(n->assigned_device_name(), &device_type));
if (IsMustCompileDevice(device_type)) {
*must_compile = true;
return absl::OkStatus();
}
*must_compile = !FindKernelDef(device_type, n->def(), nullptr, nullptr).ok();
return absl::OkStatus();
}
Status PartiallyDeclusterGraph(Graph* graph,
const FunctionLibraryDefinition* flib_def,
Env* env) {
std::vector<bool> compile_time_const_nodes(graph->num_node_ids());
OptimizerOptions opts;
auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>(
nullptr, env, nullptr, TF_GRAPH_DEF_VERSION, flib_def, opts);
FunctionLibraryRuntime* lib_runtime =
pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice);
TF_RETURN_IF_ERROR(BackwardsConstAnalysis(*graph, nullptr,
&compile_time_const_nodes,
lib_runtime, IsIntraClusterEdge));
std::vector<Node*> rpo;
GetReversePostOrder(*graph, &rpo, NodeComparatorName(),
NotBackedge);
for (Node* n : rpo) {
if (!compile_time_const_nodes[n->id()]) {
continue;
}
absl::string_view cluster_name = *GetXlaClusterForNode(*n);
bool node_on_cluster_edge =
absl::c_all_of(n->in_edges(), [&](const Edge* e) {
std::optional<absl::string_view> incoming_cluster =
GetXlaClusterForNode(*e->src());
return !incoming_cluster || *incoming_cluster != cluster_name;
});
if (node_on_cluster_edge) {
bool must_compile_node;
TF_RETURN_IF_ERROR(MustCompileNode(n, &must_compile_node));
if (!must_compile_node) {
if (n->IsConstant()) {
for (auto it : n->in_edges()) {
if (!it->src()->assigned_device_name().empty() &&
it->src()->assigned_device_name() !=
n->assigned_device_name()) {
VLOG(3) << "Declustering Const with cross-device control input "
<< n->name();
RemoveFromXlaCluster(n);
break;
}
}
} else {
VLOG(3) << "Declustering must-be-constant node " << n->name();
RemoveFromXlaCluster(n);
}
}
}
}
return absl::OkStatus();
}
}
namespace decluster_root_shape_consumers {
Status PartiallyDeclusterGraph(Graph* graph) {
std::vector<Node*> reverse_post_order;
GetReversePostOrder(*graph, &reverse_post_order,
NodeComparatorName(),
NotBackedge);
for (Node* n : reverse_post_order) {
if (!IsShapeConsumerOp(*n)) {
continue;
}
std::optional<absl::string_view> cluster = GetXlaClusterForNode(*n);
if (!cluster.has_value()) {
continue;
}
auto input_belongs_to_same_cluster = [&](const Edge* e) {
return cluster == GetXlaClusterForNode(*e->src());
};
if (absl::c_any_of(n->in_edges(), input_belongs_to_same_cluster)) {
continue;
}
VLOG(2) << "Declustering " << n->name()
<< " because it is a root shape consumer";
RemoveFromXlaCluster(n);
}
return absl::OkStatus();
}
}
}
Status PartiallyDeclusterPass::Run(
const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
TF_RETURN_IF_ERROR(
reduce_device_to_host_copies::PartiallyDeclusterGraph(graph));
if (options.flib_def == nullptr) {
return errors::InvalidArgument(
"GraphOptimizationPassOptions::flib_def must be set for "
"PartiallyDeclusterPass.");
}
if (options.session_options == nullptr ||
options.session_options->env == nullptr) {
return errors::InvalidArgument(
"GraphOptimizationPassOptions::session_options::env must be set for "
"PartiallyDeclusterPass.");
}
TF_RETURN_IF_ERROR(reduce_recompilation::PartiallyDeclusterGraph(
graph, options.flib_def, options.session_options->env));
TF_RETURN_IF_ERROR(
decluster_root_shape_consumers::PartiallyDeclusterGraph(graph));
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/partially_decluster_pass.h"
#include "absl/memory/memory.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_def_builder_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
REGISTER_OP("FakeNullary").Output("out: int32");
REGISTER_OP("FakeBinary")
.Input("host_in: int32")
.Input("device_in: int32")
.Output("host_out: int32")
.Output("device_out: int32");
REGISTER_OP("FakeResourceVar").Output("out: resource");
REGISTER_OP("FakeResourceUpdate")
.Input("in: resource")
.Output("out: resource")
.Output("something_else: int32");
class FakeBinaryOp : public OpKernel {
public:
explicit FakeBinaryOp(OpKernelConstruction* context) : OpKernel(context) {}
void Compute(OpKernelContext* ctx) override { CHECK(false); }
};
class FakeResourceUpdateOp : public OpKernel {
public:
explicit FakeResourceUpdateOp(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override { CHECK(false); }
};
REGISTER_KERNEL_BUILDER(Name("FakeBinary")
.Device(DEVICE_CPU)
.HostMemory("host_in")
.HostMemory("host_out"),
FakeBinaryOp);
REGISTER_KERNEL_BUILDER(
Name("FakeResourceUpdate").Device(DEVICE_CPU).HostMemory("something_else"),
FakeResourceUpdateOp);
Status PartiallyDecluster(std::unique_ptr<Graph>* graph) {
FixupSourceAndSinkEdges(graph->get());
static const char* kCpuDevice = "/job:localhost/replica:0/task:0/cpu:0";
for (Node* n : (*graph)->nodes()) {
if (n->assigned_device_name().empty()) {
n->set_assigned_device_name(kCpuDevice);
}
}
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(graph);
PartiallyDeclusterPass pass;
return pass.Run(opt_options);
}
Node* FindNodeByName(const Graph& graph, const string& name) {
for (Node* node : graph.nodes()) {
if (node->name() == name) {
return node;
}
}
return nullptr;
}
bool GetInputsForNode(const Graph& graph, const string& node_name,
std::vector<Node*>* inputs) {
const Node* node = FindNodeByName(graph, node_name);
if (node == nullptr) {
return false;
}
for (const Edge* e : node->in_edges()) {
inputs->push_back(e->src());
}
std::sort(inputs->begin(), inputs->end(), NodeComparatorName());
return true;
}
TEST(PartiallyDeclusterPassTest, ClusteredAndUnclustered) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer"));
ops::BinaryOp("FakeBinary", clustered_producer, input,
builder.opts().WithName("UnclusteredConsumer"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", {clustered_producer, 1}, input,
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> unclustered_consumer_inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "UnclusteredConsumer",
&unclustered_consumer_inputs));
ASSERT_EQ(unclustered_consumer_inputs.size(), 2);
EXPECT_EQ(unclustered_consumer_inputs[0]->name(),
"ClusteredProducer/declustered");
EXPECT_EQ(unclustered_consumer_inputs[1]->name(), "Input");
std::vector<Node*> clustered_consumer_inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ClusteredConsumer",
&clustered_consumer_inputs));
ASSERT_EQ(clustered_consumer_inputs.size(), 2);
EXPECT_EQ(clustered_consumer_inputs[0]->name(), "ClusteredProducer");
EXPECT_EQ(clustered_consumer_inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DifferentClusters) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer"));
Node* consumer_in_different_cluster =
ops::BinaryOp("FakeBinary", clustered_producer, input,
builder.opts().WithName("ConsumerInDifferentCluster"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", input, {clustered_producer, 1},
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs));
ASSERT_EQ(inputs.size(), 2);
EXPECT_EQ(inputs[0]->name(), "ClusteredProducer/declustered");
EXPECT_EQ(inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DontDeclusterIfUserIsDeviceMem) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer"));
Node* consumer_in_different_cluster =
ops::BinaryOp("FakeBinary", input, clustered_producer,
builder.opts().WithName("ConsumerInDifferentCluster"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", input, {clustered_producer, 1},
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs));
ASSERT_EQ(inputs.size(), 2);
EXPECT_EQ(inputs[0]->name(), "ClusteredProducer");
EXPECT_EQ(inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DontDuplicateResourceVarOps) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* resource_var = ops::SourceOp("FakeResourceVar",
builder.opts().WithName("ResourceVar"));
Node* clustered_producer =
ops::UnaryOp("FakeResourceUpdate", resource_var,
builder.opts().WithName("ClusteredProducer"));
Node* consumer_in_different_cluster =
ops::BinaryOp("FakeBinary", {clustered_producer, 1}, input,
builder.opts().WithName("ConsumerInDifferentCluster"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", input, {clustered_producer, 1},
builder.opts().WithName("ClusteredConsumer"));
clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs));
ASSERT_EQ(inputs.size(), 2);
EXPECT_EQ(inputs[0]->name(), "ClusteredProducer");
EXPECT_EQ(inputs[1]->name(), "Input");
}
TEST(PartiallyDeclusterPassTest, DeclusterDependentNodes) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer_0 =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName("ClusteredProducer0"));
Node* clustered_producer_1 =
ops::BinaryOp("FakeBinary", clustered_producer_0, input,
builder.opts().WithName("ClusteredProducer1"));
ops::BinaryOp("FakeBinary", clustered_producer_1, input,
builder.opts().WithName("UnclusteredConsumer"));
Node* clustered_consumer =
ops::BinaryOp("FakeBinary", {clustered_producer_1, 1}, input,
builder.opts().WithName("ClusteredConsumer"));
clustered_producer_0->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_producer_1->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
std::vector<Node*> unclustered_consumer_inputs, declustered_producer_1_inputs;
ASSERT_TRUE(GetInputsForNode(*graph, "UnclusteredConsumer",
&unclustered_consumer_inputs));
ASSERT_EQ(unclustered_consumer_inputs.size(), 2);
EXPECT_EQ(unclustered_consumer_inputs[0]->name(),
"ClusteredProducer1/declustered");
EXPECT_EQ(unclustered_consumer_inputs[1]->name(), "Input");
ASSERT_TRUE(GetInputsForNode(*graph, "ClusteredProducer1/declustered",
&declustered_producer_1_inputs));
ASSERT_EQ(declustered_producer_1_inputs.size(), 2);
EXPECT_EQ(declustered_producer_1_inputs[0]->name(),
"ClusteredProducer0/declustered");
EXPECT_EQ(declustered_producer_1_inputs[1]->name(), "Input");
}
void AddToCluster(absl::Span<Node* const> nodes,
absl::string_view cluster_name) {
for (Node* n : nodes) {
n->AddAttr(kXlaClusterAttr, string(cluster_name));
}
}
TEST(PartiallyDeclusterPassTest, DeclusterMustBeConstantNodes) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({shape.node(), reshape.node()}, "cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
const Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n), std::nullopt);
}
TEST(PartiallyDeclusterPassTest, DeclusteringStopsAtMetadataOps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output input_a = ops::Placeholder(s.WithOpName("input_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output input_b = ops::Placeholder(s.WithOpName("shape_b"), DT_FLOAT,
ops::Placeholder::Attrs{});
Output mul = ops::Mul(s.WithOpName("mul"), input_b, input_b);
Output shape_of_mul = ops::Shape(s.WithOpName("shape_of_mul"), mul);
Output shape = ops::Add(s.WithOpName("shape"), shape_of_mul, input_a);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({mul.node(), shape_of_mul.node(), shape.node(), reshape.node()},
"cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
const Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_0");
}
TEST(PartiallyDeclusterPassTest, EdgeAcrossDifferentClusters) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({reshape.node()}, "cluster_0");
AddToCluster({shape.node()}, "cluster_1");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
const Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_1");
}
TEST(PartiallyDeclusterPassTest, DontDeclusterXlaDeviceOps) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32,
ops::Placeholder::Attrs{});
Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b);
Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"),
DT_FLOAT, ops::Placeholder::Attrs{});
Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape);
AddToCluster({shape.node(), reshape.node()}, "cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(s.ToGraph(graph.get()));
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddDevices(
SessionOptions(), "/job:localhost/replica:0/task:0", &devices));
Node* n = FindNodeByName(*graph, "shape");
ASSERT_NE(n, nullptr);
n->set_assigned_device_name(
"/job:localhost/replica:0/task:0/device:XLA_GPU:0");
TF_ASSERT_OK(PartiallyDecluster(&graph));
EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_0");
}
TEST(PartiallyDeclusterPassTest, EliminatedUnusedNodes) {
const char* const kClusteredProducer0Name = "ClusteredProducer0";
const char* const kClusteredProducer1Name = "ClusteredProducer1";
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
{
GraphDefBuilder builder(GraphDefBuilder::kFailImmediately);
Node* input =
ops::SourceOp("FakeNullary", builder.opts().WithName("Input"));
Node* clustered_producer_0 =
ops::BinaryOp("FakeBinary", input, input,
builder.opts().WithName(kClusteredProducer0Name));
Node* clustered_producer_1 =
ops::BinaryOp("FakeBinary", clustered_producer_0, input,
builder.opts().WithName(kClusteredProducer1Name));
ops::BinaryOp("FakeBinary", clustered_producer_1, input,
builder.opts().WithName("UnclusteredConsumer"));
clustered_producer_0->AddAttr(kXlaClusterAttr, "cluster_0");
clustered_producer_1->AddAttr(kXlaClusterAttr, "cluster_0");
TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get()));
}
TF_ASSERT_OK(PartiallyDecluster(&graph));
EXPECT_EQ(FindNodeByName(*graph, kClusteredProducer0Name), nullptr);
EXPECT_EQ(FindNodeByName(*graph, kClusteredProducer1Name), nullptr);
}
TEST(PartiallyDeclusterPassTest, MetadataOpsDontStartClusters) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output b = ops::Shape(in_cluster_and.WithOpName("b"), a);
Output c = ops::Rank(in_cluster_and.WithOpName("c"), b);
Output d = ops::Size(in_cluster_and.WithOpName("d"), c);
(void)ops::Shape(in_cluster_and.WithOpName("e"), d);
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), std::nullopt);
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), std::nullopt);
Node* n_d = FindNodeByName(*graph, "d");
ASSERT_NE(n_d, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_d), std::nullopt);
Node* n_e = FindNodeByName(*graph, "e");
ASSERT_NE(n_e, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_e), std::nullopt);
}
TEST(PartiallyDeclusterPassTest, MetaConsumersArentDeclustered) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output b = ops::Add(in_cluster_and.WithOpName("b"), a, a);
Output c = ops::Rank(in_cluster_and.WithOpName("c"), b);
Output e;
TF_ASSERT_OK(
CreateOutputWithScope("FakeBinary", {c, c}, root.WithOpName("e"), &e));
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), "cluster_0");
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0");
}
TEST(PartiallyDeclusterPassTest, ConstInputsToSliceArentDeclustered) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Attrs{{4}});
Output b = ops::Const(in_cluster_and.WithOpName("b"), {1});
Output c = ops::Const(in_cluster_and.WithOpName("c"), {2});
Output d = ops::Slice(in_cluster_and.WithOpName("d"), a, b, c);
TF_ASSERT_OK(root.ToGraph(graph.get()));
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), "cluster_0");
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0");
}
TEST(PartiallyDeclusterPassTest,
ConstInLoopWithCrossDeviceControlInputsAreDeclustered) {
tensorflow::Scope root = tensorflow::Scope::NewRootScope();
tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0");
auto graph = std::make_unique<Graph>(OpRegistry::Global());
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Attrs{{4}});
Output b = ops::Const(in_cluster_and.WithOpName("b"), {1});
Output c = ops::Const(in_cluster_and.WithOpName("c"), {2});
Output slice = ops::Slice(in_cluster_and.WithOpName("slice"), a, b, c);
Output cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
Output loop_cond = ops::LoopCond(root.WithOpName("loop_cond"), cond);
ops::Switch switch_node(root.WithOpName("switch"), value, loop_cond);
Output identity =
ops::Identity(root.WithOpName("identity"), switch_node.output_true);
root.graph()->AddControlEdge(identity.node(), b.node());
TF_ASSERT_OK(root.ToGraph(graph.get()));
std::vector<std::unique_ptr<Device>> devices;
TF_ASSERT_OK(DeviceFactory::AddDevices(
SessionOptions(), "/job:localhost/replica:0/task:0", &devices));
Node* identity_node = FindNodeByName(*graph, "identity");
ASSERT_NE(identity_node, nullptr);
identity_node->set_assigned_device_name(
"/job:localhost/replica:0/task:0/device:XLA_GPU:0");
TF_ASSERT_OK(PartiallyDecluster(&graph));
Node* n_b = FindNodeByName(*graph, "b");
ASSERT_NE(n_b, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_b), std::nullopt);
Node* n_c = FindNodeByName(*graph, "c");
ASSERT_NE(n_c, nullptr);
EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/partially_decluster_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/partially_decluster_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1bcee2a2-6634-4751-95af-54e5197e56ef | cpp | tensorflow/tensorflow | device_executable_persistor | tensorflow/compiler/jit/device_executable_persistor.cc | tensorflow/compiler/jit/device_executable_persistor_test.cc | #include "tensorflow/compiler/jit/device_executable_persistor.h"
#include <string>
#include "absl/strings/str_cat.h"
namespace tensorflow {
std::string XlaSerializedCacheKeyToFileName(const XlaSerializedCacheKey& key) {
static constexpr char kXlaSerializedCacheKeySeparator[] = "__";
return absl::StrCat(
key.prefix(), key.prefix().empty() ? "" : kXlaSerializedCacheKeySeparator,
key.signature_fingerprint(), kXlaSerializedCacheKeySeparator,
key.cluster_fingerprint(), kXlaSerializedCacheKeySeparator,
key.device_type(),
key.compiled_using_pjrt()
? absl::StrCat(kXlaSerializedCacheKeySeparator, "pjrt")
: "",
".pb");
}
} | #include "tensorflow/compiler/jit/device_executable_persistor.h"
#include <stdlib.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/cc/framework/scope.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/xla_compilation_cache.pb.h"
#include "tensorflow/compiler/jit/xla_device_compiler_client.h"
#include "xla/client/client_library.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
namespace tensorflow {
namespace {
using ::testing::_;
using ::testing::ByMove;
using ::testing::Return;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
class DeviceExecutionPersistorTest : public ::testing::Test {
protected:
void SetUp() override {
xla_compiler_client_ = std::make_unique<XlaDeviceCompilerClient>(
xla::ClientLibrary::LocalClientOrDie());
TF_ASSERT_OK(CreatePjRtCompilerClient());
XlaOpRegistry::RegisterCompilationKernels();
flib_def_ = std::make_unique<FunctionLibraryDefinition>(
OpRegistry::Global(), FunctionDefLibrary());
cache_dir_ = testing::TmpDir();
TF_ASSERT_OK_AND_ASSIGN(compilation_result_add_,
BuildSampleCompilationResult());
}
absl::StatusOr<std::unique_ptr<xla::LocalExecutable>>
BuildSampleExecutable() {
return xla_compiler_client_->BuildExecutable(DefaultXlaOptions(),
compilation_result_add_);
}
absl::StatusOr<std::unique_ptr<xla::PjRtLoadedExecutable>>
BuildSamplePjRtExecutable() {
return pjrt_compiler_client_->BuildExecutable(DefaultPjRtOptions(),
compilation_result_add_);
}
absl::StatusOr<XlaCompiler::CompilationResult> BuildSampleCompilationResult(
bool mul = false) {
std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global()));
Scope scope = Scope::NewRootScope().ExitOnError();
auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0);
auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1);
if (mul) {
auto c = ops::Mul(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
} else {
auto c = ops::Add(scope.WithOpName("C"), a, b);
auto d = ops::_Retval(scope.WithOpName("D"), c, 0);
TF_RETURN_IF_ERROR(scope.ToGraph(graph.get()));
}
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kParameter;
args[0].type = DT_INT32;
args[0].shape = TensorShape({2});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({2});
XlaCompiler compiler(DefaultXlaOptions());
XlaCompiler::CompilationResult compilation_result;
TF_RETURN_IF_ERROR(compiler.CompileGraph(XlaCompiler::CompileOptions(),
"graph", std::move(graph), args,
&compilation_result));
return compilation_result;
}
XlaCompiler::Options DefaultXlaOptions() {
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
options.client = xla_compiler_client_->client();
options.flib_def = flib_def_.get();
return options;
}
XlaCompiler::Options DefaultPjRtOptions() {
XlaCompiler::Options options;
options.device_type = DeviceType(DEVICE_CPU_XLA_JIT);
options.client = nullptr;
options.flib_def = flib_def_.get();
return options;
}
Status CreatePjRtCompilerClient() {
TF_RETURN_IF_ERROR(SetPjRtClientInTFGlobalResourceManager(
DEVICE_CPU_XLA_JIT,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSIGN_OR_RETURN(auto pjrt_client,
GetOrCreatePjRtClient(DeviceType(DEVICE_CPU_XLA_JIT)));
pjrt_compiler_client_ =
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client);
return absl::OkStatus();
}
std::unique_ptr<FunctionLibraryDefinition> flib_def_;
std::unique_ptr<XlaDeviceCompilerClient> xla_compiler_client_;
std::unique_ptr<PjRtDeviceCompilerClient> pjrt_compiler_client_;
XlaCompiler::CompilationResult compilation_result_add_;
std::string serialized_xla_executable_ = "serialized_xla_executable";
std::string serialized_pjrt_executable_ = "serialized_pjrt_executable";
std::string cache_dir_;
};
class MockXlaCompilerClient : public XlaDeviceCompilerClient {
public:
MockXlaCompilerClient() : XlaDeviceCompilerClient(nullptr) {}
MOCK_METHOD(absl::StatusOr<std::string>, SerializeExecutable,
(const xla::LocalExecutable& executable), (override));
MOCK_METHOD(absl::StatusOr<std::string>, BuildSerializedExecutable,
(const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result),
(override));
MOCK_METHOD(absl::StatusOr<std::unique_ptr<xla::LocalExecutable>>,
LoadExecutable,
(const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result,
const std::string& serialized_executable),
(override));
};
class MockPjRtCompilerClient : public PjRtDeviceCompilerClient {
public:
MockPjRtCompilerClient() : PjRtDeviceCompilerClient(nullptr) {}
MOCK_METHOD(absl::StatusOr<std::string>, SerializeExecutable,
(const xla::PjRtLoadedExecutable& executable), (override));
MOCK_METHOD(absl::StatusOr<std::string>, BuildSerializedExecutable,
(const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result),
(override));
MOCK_METHOD(absl::StatusOr<std::unique_ptr<xla::PjRtLoadedExecutable>>,
LoadExecutable,
(const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result,
const std::string& serialized_executable),
(override));
};
std::string GetFilePath(XlaSerializedCacheKey key,
const std::string& persistent_cache_dir) {
static constexpr char kXlaSerializedCacheKeySeparator[] = "__";
std::string file_name = absl::StrCat(
key.prefix(), key.prefix().empty() ? "" : kXlaSerializedCacheKeySeparator,
key.signature_fingerprint(), kXlaSerializedCacheKeySeparator,
key.cluster_fingerprint(), kXlaSerializedCacheKeySeparator,
key.device_type(),
key.compiled_using_pjrt()
? absl::StrCat(kXlaSerializedCacheKeySeparator, "pjrt")
: "",
".pb");
return io::JoinPath(persistent_cache_dir, file_name);
}
absl::StatusOr<XlaSerializedCacheEntry> ReadCacheEntryFromFile(
XlaSerializedCacheKey key, const std::string& persistent_cache_dir) {
std::string file_path = GetFilePath(key, persistent_cache_dir);
XlaSerializedCacheEntry entry;
TF_RETURN_IF_ERROR(ReadTextOrBinaryProto(Env::Default(), file_path, &entry));
return entry;
}
XlaSerializedCacheKey CreateCacheKey(
uint64 signature_hash,
const XlaCompiler::CompilationResult& compilation_result,
const DeviceType& device_type, const std::string& persistence_prefix,
bool compiled_using_pjrt = false) {
XlaSerializedCacheKey key;
key.set_signature_fingerprint(signature_hash);
key.set_cluster_fingerprint(
DeterministicProtoHash64(compilation_result.computation->proto()));
key.set_device_type(device_type.type_string());
key.set_prefix(persistence_prefix);
key.set_compiled_using_pjrt(compiled_using_pjrt);
return key;
}
TEST_F(DeviceExecutionPersistorTest, PersistCacheDirNotSet) {
XlaDeviceExecutablePersistor::Config config(
"",
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
TF_EXPECT_OK(persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, *executable, &mock_client));
auto key =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
auto entry = ReadCacheEntryFromFile(key, "");
EXPECT_FALSE(entry.ok());
}
TEST_F(DeviceExecutionPersistorTest, PersistCacheDirReadOnly) {
XlaDeviceExecutablePersistor::Config config(
"cache_dir_",
false,
"xla",
true);
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
TF_EXPECT_OK(persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, *executable, &mock_client));
auto key =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
auto entry = ReadCacheEntryFromFile(key, "");
EXPECT_FALSE(entry.ok());
}
TEST_F(DeviceExecutionPersistorTest, PersistSerializeAlreadyBuiltExecutable) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
EXPECT_CALL(mock_client, SerializeExecutable(_))
.WillOnce(
Return(absl::StatusOr<std::string>(serialized_xla_executable_)));
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
TF_EXPECT_OK(persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, *executable, &mock_client));
auto key =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
TF_ASSERT_OK_AND_ASSIGN(auto entry, ReadCacheEntryFromFile(key, cache_dir_));
EXPECT_EQ(entry.executable(), serialized_xla_executable_);
}
TEST_F(DeviceExecutionPersistorTest, PersistBuildSerializedExecutable) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
EXPECT_CALL(mock_client, SerializeExecutable(_))
.WillOnce(Return(errors::Unimplemented("Unimplemented.")));
EXPECT_CALL(mock_client, BuildSerializedExecutable(_, _))
.WillOnce(Return(serialized_xla_executable_));
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
TF_EXPECT_OK(persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, *executable, &mock_client));
auto key =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
TF_ASSERT_OK_AND_ASSIGN(auto entry, ReadCacheEntryFromFile(key, cache_dir_));
EXPECT_EQ(entry.executable(), serialized_xla_executable_);
}
TEST_F(DeviceExecutionPersistorTest, PersistSerializeExecutableError) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
EXPECT_CALL(mock_client, SerializeExecutable(_))
.WillOnce(Return(errors::InvalidArgument("InvalidArgument.")));
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
EXPECT_THAT(
persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, *executable, &mock_client),
testing::StatusIs(error::INVALID_ARGUMENT));
}
TEST_F(DeviceExecutionPersistorTest, PersistExecutableEmpty) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
xla::LocalExecutable empty_executable(
nullptr, nullptr,
GetExecutableBuildOptions(DefaultXlaOptions(), compilation_result_add_,
0));
EXPECT_CALL(mock_client, SerializeExecutable(_))
.WillOnce(Return(errors::FailedPrecondition("Failed precondition.")));
EXPECT_THAT(
persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, empty_executable, &mock_client),
testing::StatusIs(error::FAILED_PRECONDITION));
}
TEST_F(DeviceExecutionPersistorTest, LoadCacheDirNotSet) {
XlaDeviceExecutablePersistor::Config config(
"",
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
auto executable = persistor.TryToLoadExecutable(
123, "signature_string", DefaultXlaOptions(), compilation_result_add_,
&mock_client);
EXPECT_FALSE(executable.has_value());
}
TEST_F(DeviceExecutionPersistorTest, LoadSuccess) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
EXPECT_CALL(mock_client, LoadExecutable(_, _, serialized_xla_executable_))
.WillOnce(Return(ByMove(std::move(executable))));
auto loaded_executable = persistor.TryToLoadExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, &mock_client);
EXPECT_TRUE(loaded_executable.has_value());
EXPECT_TRUE(loaded_executable.value().ok());
EXPECT_TRUE((*loaded_executable.value())->executable() != nullptr);
}
TEST_F(DeviceExecutionPersistorTest, LoadFileDoesntExist) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_client;
auto loaded_executable = persistor.TryToLoadExecutable(
12345, "different_signature", DefaultXlaOptions(),
compilation_result_add_, &mock_client);
EXPECT_FALSE(loaded_executable.has_value());
}
TEST_F(DeviceExecutionPersistorTest, LoadSerializedKeyMismatch) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
auto key1 =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
auto key2 =
CreateCacheKey(456, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
TF_ASSERT_OK(Env::Default()->CopyFile(
GetFilePath(key1, persistor.persistent_cache_directory()),
GetFilePath(key2, persistor.persistent_cache_directory())));
MockXlaCompilerClient mock_client;
auto loaded_executable = persistor.TryToLoadExecutable(
456, "different_signature", DefaultXlaOptions(),
compilation_result_add_, &mock_client);
EXPECT_TRUE(loaded_executable.has_value());
EXPECT_FALSE(loaded_executable->ok());
EXPECT_THAT(loaded_executable.value(),
testing::StatusIs(error::INVALID_ARGUMENT));
}
TEST_F(DeviceExecutionPersistorTest, LoadSerializedHloMismatch) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
TF_ASSERT_OK_AND_ASSIGN(auto compilation_result_mul,
BuildSampleCompilationResult(true));
auto key1 =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
auto key2 =
CreateCacheKey(123, compilation_result_mul,
persistor.device_type(), persistor.persistence_prefix());
XlaSerializedCacheEntry entry;
TF_ASSERT_OK(ReadTextOrBinaryProto(
Env::Default(), GetFilePath(key1, persistor.persistent_cache_directory()),
&entry));
*entry.mutable_key() = key2;
TF_ASSERT_OK(WriteBinaryProto(
Env::Default(), GetFilePath(key2, persistor.persistent_cache_directory()),
entry));
MockXlaCompilerClient mock_client;
auto loaded_executable = persistor.TryToLoadExecutable(
123, "signature", DefaultXlaOptions(),
compilation_result_mul, &mock_client);
EXPECT_TRUE(loaded_executable.has_value());
EXPECT_FALSE(loaded_executable->ok());
EXPECT_THAT(loaded_executable.value(),
testing::StatusIs(error::INVALID_ARGUMENT));
}
TEST_F(DeviceExecutionPersistorTest, LoadStrictChecksDisabled) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
true,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
TF_ASSERT_OK_AND_ASSIGN(auto compilation_result_mul,
BuildSampleCompilationResult(true));
auto key1 =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
auto key2 =
CreateCacheKey(123, compilation_result_mul,
persistor.device_type(), persistor.persistence_prefix());
XlaSerializedCacheEntry entry;
TF_ASSERT_OK(ReadTextOrBinaryProto(
Env::Default(), GetFilePath(key1, persistor.persistent_cache_directory()),
&entry));
*entry.mutable_key() = key2;
TF_ASSERT_OK(WriteBinaryProto(
Env::Default(), GetFilePath(key2, persistor.persistent_cache_directory()),
entry));
MockXlaCompilerClient mock_client;
TF_ASSERT_OK_AND_ASSIGN(auto executable, BuildSampleExecutable());
EXPECT_CALL(mock_client, LoadExecutable(_, _, serialized_xla_executable_))
.WillOnce(Return(ByMove(std::move(executable))));
auto loaded_executable =
persistor.TryToLoadExecutable(123, "signature", DefaultXlaOptions(),
compilation_result_mul, &mock_client);
EXPECT_TRUE(loaded_executable.has_value());
EXPECT_TRUE(loaded_executable->ok());
}
TEST_F(DeviceExecutionPersistorTest, LoadSerializedExecutableEmpty) {
XlaDeviceExecutablePersistor::Config config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor persistor(config,
DefaultXlaOptions().device_type);
auto key =
CreateCacheKey(123, compilation_result_add_,
persistor.device_type(), persistor.persistence_prefix());
XlaSerializedCacheEntry entry;
TF_ASSERT_OK(ReadTextOrBinaryProto(
Env::Default(), GetFilePath(key, persistor.persistent_cache_directory()),
&entry));
entry.clear_executable();
TF_ASSERT_OK(WriteBinaryProto(
Env::Default(), GetFilePath(key, persistor.persistent_cache_directory()),
entry));
MockXlaCompilerClient mock_client;
auto loaded_executable = persistor.TryToLoadExecutable(
123, "signature", DefaultXlaOptions(),
compilation_result_add_, &mock_client);
EXPECT_TRUE(loaded_executable.has_value());
EXPECT_FALSE(loaded_executable->ok());
EXPECT_THAT(loaded_executable.value(),
testing::StatusIs(error::INVALID_ARGUMENT));
}
TEST_F(DeviceExecutionPersistorTest, PersistPjRtAndXlaExecutables) {
PjRtDeviceExecutablePersistor::Config pjrt_config(
cache_dir_,
false,
"xla");
PjRtDeviceExecutablePersistor pjrt_persistor(
pjrt_config, DefaultPjRtOptions().device_type);
MockPjRtCompilerClient mock_pjrt_client;
EXPECT_CALL(mock_pjrt_client, SerializeExecutable(_))
.WillOnce(Return(serialized_pjrt_executable_));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_executable, BuildSamplePjRtExecutable());
TF_EXPECT_OK(pjrt_persistor.TryToPersistExecutable(
123, "signature_string", DefaultPjRtOptions(),
compilation_result_add_, *pjrt_executable, &mock_pjrt_client));
XlaDeviceExecutablePersistor::Config xla_config(
cache_dir_,
false,
"xla");
XlaDeviceExecutablePersistor xla_persistor(xla_config,
DefaultXlaOptions().device_type);
MockXlaCompilerClient mock_xla_client;
EXPECT_CALL(mock_xla_client, SerializeExecutable(_))
.WillOnce(Return(serialized_xla_executable_));
TF_ASSERT_OK_AND_ASSIGN(auto xla_executable, BuildSampleExecutable());
TF_EXPECT_OK(xla_persistor.TryToPersistExecutable(
123, "signature_string", DefaultXlaOptions(),
compilation_result_add_, *xla_executable, &mock_xla_client));
auto pjrt_key = CreateCacheKey(
123, compilation_result_add_,
pjrt_persistor.device_type(), pjrt_persistor.persistence_prefix(), true);
TF_ASSERT_OK_AND_ASSIGN(auto entry,
ReadCacheEntryFromFile(pjrt_key, cache_dir_));
EXPECT_EQ(entry.executable(), serialized_pjrt_executable_);
auto xla_key = CreateCacheKey(123, compilation_result_add_,
pjrt_persistor.device_type(),
pjrt_persistor.persistence_prefix());
TF_ASSERT_OK_AND_ASSIGN(entry, ReadCacheEntryFromFile(xla_key, cache_dir_));
EXPECT_EQ(entry.executable(), serialized_xla_executable_);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_executable_persistor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_executable_persistor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
75b6f883-e565-4650-9b2a-45a8960c2078 | cpp | tensorflow/tensorflow | node_matchers | tensorflow/compiler/jit/node_matchers.cc | tensorflow/compiler/jit/node_matchers_test.cc | #include "tensorflow/compiler/jit/node_matchers.h"
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/graph/graph_node_util.h"
namespace tensorflow {
namespace testing {
namespace matchers {
namespace {
using impl::NodeMatcherProperties;
using impl::OutEdge;
string IndentAllButFirstLine(absl::string_view text) {
std::vector<std::string> lines = absl::StrSplit(text, '\n');
for (int i = 1; i < lines.size(); i++) {
lines[i].insert(0, " ");
}
return absl::StrJoin(lines, "\n");
}
template <typename T>
bool CompareTensor(const Tensor& actual, const Tensor& expected,
::testing::MatchResultListener* listener) {
if (actual.NumElements() != expected.NumElements()) {
if (listener->IsInterested()) {
*listener << "\nwas looking for tensor with " << expected.NumElements()
<< " elements, found tensor with " << actual.NumElements()
<< " elements";
return false;
}
}
for (int64_t i = 0, e = actual.NumElements(); i < e; i++) {
if (actual.flat<T>()(i) != expected.flat<T>()(i)) {
*listener << "\nmismatch in constant tensor at index " << i
<< " expected = " << expected.flat<T>()(i)
<< " actual = " << actual.flat<T>()(i);
return false;
}
}
return true;
}
bool MatchAndExplainTensor(const Tensor& tensor, const Tensor& expected_tensor,
::testing::MatchResultListener* listener) {
if (tensor.dtype() != expected_tensor.dtype()) {
if (listener->IsInterested()) {
*listener << "\nexpected tensor of type "
<< DataType_Name(expected_tensor.dtype())
<< " but found one of type " << DataType_Name(tensor.dtype());
return false;
}
}
switch (tensor.dtype()) {
case DT_HALF:
return CompareTensor<Eigen::half>(tensor, expected_tensor, listener);
case DT_FLOAT:
return CompareTensor<float>(tensor, expected_tensor, listener);
case DT_DOUBLE:
return CompareTensor<double>(tensor, expected_tensor, listener);
case DT_INT8:
return CompareTensor<int8>(tensor, expected_tensor, listener);
case DT_INT16:
return CompareTensor<int16>(tensor, expected_tensor, listener);
case DT_INT32:
return CompareTensor<int32>(tensor, expected_tensor, listener);
case DT_INT64:
return CompareTensor<int64_t>(tensor, expected_tensor, listener);
case DT_UINT8:
return CompareTensor<uint8>(tensor, expected_tensor, listener);
case DT_UINT16:
return CompareTensor<uint16>(tensor, expected_tensor, listener);
case DT_UINT32:
return CompareTensor<uint32>(tensor, expected_tensor, listener);
case DT_UINT64:
return CompareTensor<uint64>(tensor, expected_tensor, listener);
default:
LOG(FATAL) << "Unsupported dtype "
<< DataType_Name(tensor.dtype());
}
}
struct NodeMatcher : public ::testing::MatcherInterface<const Node*> {
bool MatchAndExplain(
const Node* node,
::testing::MatchResultListener* listener) const override {
if (op && node->type_string() != *op) {
if (listener->IsInterested()) {
*listener << "\nexpected op " << *op << " but found "
<< node->type_string();
}
return false;
}
if (assigned_device && node->assigned_device_name() != *assigned_device) {
if (listener->IsInterested()) {
*listener << "\nexpected assigned_device " << *assigned_device
<< " but found \"" << node->assigned_device_name() << "\"";
}
return false;
}
if (name && node->name() != *name) {
if (listener->IsInterested()) {
*listener << "\nexpected name " << *name << " but found "
<< node->name();
}
return false;
}
if (constant_value) {
const TensorProto* proto = nullptr;
if (!TryGetNodeAttr(node->def(), "value", &proto)) {
if (listener->IsInterested()) {
*listener << "\ncould not find \"value\" attribute in node";
}
return false;
}
Tensor tensor(proto->dtype());
if (!tensor.FromProto(*proto)) {
if (listener->IsInterested()) {
*listener << "\ncould not convert TensorProto in \"value\" attribute "
"to Tensor";
}
return false;
}
if (!MatchAndExplainTensor(tensor,
*constant_value,
listener)) {
return false;
}
}
if (input_matchers) {
if (input_matchers->size() != node->num_inputs()) {
if (listener->IsInterested()) {
*listener << "\nexpected " << input_matchers->size()
<< " inputs but node has " << node->num_inputs();
}
return false;
}
for (int input_idx = 0, e = input_matchers->size(); input_idx < e;
input_idx++) {
if (!MatchAndExplainInput(node, input_idx, listener)) {
return false;
}
}
}
std::vector<const Node*> control_deps;
for (const Edge* e : node->in_edges()) {
if (e->IsControlEdge()) {
control_deps.push_back(e->src());
}
}
::testing::StringMatchResultListener inner_listener;
if (control_dep_set &&
!control_dep_set->MatchAndExplain(control_deps, &inner_listener)) {
if (listener->IsInterested()) {
string explanation = inner_listener.str();
if (!explanation.empty()) {
explanation = absl::StrCat(", ", explanation, ",");
}
*listener << "ctrl_deps" << explanation << " does not match expected: ";
control_dep_set->DescribeTo(listener->stream());
}
return false;
}
const AttrValueMap attr_value_map = node->def().attr();
for (const auto& attr_kv_pair : attrs) {
auto it = attr_value_map.find(attr_kv_pair.first);
if (it == attr_value_map.end()) {
if (listener->IsInterested()) {
*listener << "did not find attribute named \"" << attr_kv_pair.first
<< "\" in node";
}
return false;
}
if (attr_kv_pair.second &&
!AreAttrValuesEqual(it->second, *attr_kv_pair.second)) {
if (listener->IsInterested()) {
*listener << "attribute named " << attr_kv_pair.first
<< " does not match value; expected: \""
<< SummarizeAttrValue(*attr_kv_pair.second)
<< "\", found: \"" << SummarizeAttrValue(it->second)
<< "\"";
}
return false;
}
}
return true;
}
void DescribeTo(::std::ostream* os) const override {
std::vector<string> predicates;
if (name) {
predicates.push_back(absl::StrCat("name: ", *name));
}
if (op) {
predicates.push_back(absl::StrCat("op: ", *op));
}
if (assigned_device) {
predicates.push_back(absl::StrCat("assigned device: ", *assigned_device));
}
bool printed_something = !predicates.empty();
*os << absl::StrJoin(predicates, ", ");
if (constant_value) {
printed_something = true;
*os << "constant value: " << constant_value->DebugString();
}
if (input_matchers) {
if (!input_matchers->empty()) {
printed_something = true;
*os << " with " << (input_matchers->size() == 1 ? "only " : "")
<< "input" << (input_matchers->size() == 1 ? "" : "s") << " ";
}
if (input_matchers->size() == 1) {
::std::stringstream ss;
input_matchers->front().DescribeTo(&ss);
printed_something = true;
*os << "matching " << ss.str();
} else {
int edge_idx = 0;
for (const ::testing::Matcher<OutEdge>& matcher : (*input_matchers)) {
*os << "\n [" << edge_idx << "] matching (";
::std::stringstream ss;
matcher.DescribeTo(&ss);
printed_something = true;
*os << IndentAllButFirstLine(ss.str());
*os << ")";
edge_idx++;
}
}
}
if (control_dep_set) {
printed_something = true;
*os << " and control deps ";
control_dep_set->DescribeTo(os);
}
if (!attrs.empty()) {
printed_something = true;
std::vector<string> attrs_str;
absl::c_transform(
attrs, std::back_inserter(attrs_str),
[](const std::pair<string, std::optional<AttrValue>>& attr_kv_pair) {
return absl::StrCat(attr_kv_pair.first, "->",
attr_kv_pair.second
? SummarizeAttrValue(*attr_kv_pair.second)
: "*");
});
*os << " and attr values matching [" << absl::StrJoin(attrs_str, ", ")
<< "]";
}
if (!printed_something) {
*os << "is any node";
}
}
bool MatchAndExplainInput(const Node* node, int input_idx,
::testing::MatchResultListener* listener) const {
const Edge* edge;
if (!node->input_edge(input_idx, &edge).ok()) {
if (listener->IsInterested()) {
*listener << "\ncould not find incoming edge for input " << input_idx;
}
return false;
}
::testing::StringMatchResultListener inner_listener;
OutEdge input = {edge->src(), edge->src_output()};
if ((*input_matchers)[input_idx].MatchAndExplain(input, &inner_listener)) {
return true;
}
if (listener->IsInterested()) {
*listener << "\ninput " << input_idx << " does not match expected:\n";
(*input_matchers)[input_idx].DescribeTo(listener->stream());
string explanation = inner_listener.str();
if (!explanation.empty()) {
*listener << ", " << explanation;
}
}
return false;
}
std::optional<string> op;
std::optional<string> name;
std::optional<string> assigned_device;
std::optional<Tensor> constant_value;
std::optional<std::vector<::testing::Matcher<OutEdge>>> input_matchers;
std::optional<::testing::Matcher<absl::Span<const Node* const>>>
control_dep_set;
std::map<string, std::optional<AttrValue>> attrs;
};
class OutEdgeMatcher : public ::testing::MatcherInterface<OutEdge> {
public:
OutEdgeMatcher(::testing::Matcher<const Node*> src_matcher, int src_oidx)
: src_matcher_(std::move(src_matcher)), src_oidx_(src_oidx) {}
bool MatchAndExplain(
OutEdge out_edge,
::testing::MatchResultListener* listener) const override {
::testing::StringMatchResultListener inner_listener;
if (!src_matcher_.MatchAndExplain(out_edge.first, &inner_listener)) {
if (listener->IsInterested()) {
*listener << "\nsource does not match expected ";
src_matcher_.DescribeTo(listener->stream());
string explanation = inner_listener.str();
if (!explanation.empty()) {
*listener << "\n\t" << explanation;
}
}
return false;
}
if (out_edge.second != src_oidx_) {
if (listener->IsInterested()) {
*listener << "\nexpected output slot to be " << src_oidx_
<< " but found " << out_edge.second;
}
return false;
}
return true;
}
void DescribeTo(::std::ostream* os) const override {
if (src_oidx_) {
*os << "output slot: " << src_oidx_ << ", source: (";
}
src_matcher_.DescribeTo(os);
if (src_oidx_) {
*os << ")";
}
}
private:
::testing::Matcher<const Node*> src_matcher_;
int src_oidx_;
};
}
::testing::Matcher<const Node*> impl::NodeWith(
absl::Span<const NodeMatcherProperties> props) {
NodeMatcher* matcher = new NodeMatcher();
for (const NodeMatcherProperties& prop : props) {
if (prop.name()) {
DCHECK(!matcher->name);
matcher->name = prop.name();
}
if (prop.op()) {
DCHECK(!matcher->op);
matcher->op = prop.op();
}
if (prop.constant_value()) {
DCHECK(!matcher->constant_value);
matcher->constant_value = prop.constant_value();
}
if (prop.assigned_device()) {
DCHECK(!matcher->assigned_device);
matcher->assigned_device = prop.assigned_device();
}
if (prop.inputs()) {
DCHECK(!matcher->input_matchers);
matcher->input_matchers = *prop.inputs();
}
if (prop.control_deps()) {
DCHECK(!matcher->control_dep_set);
matcher->control_dep_set =
::testing::UnorderedElementsAreArray(*prop.control_deps());
}
if (prop.attr()) {
auto insert_result = matcher->attrs.insert(*prop.attr());
DCHECK(insert_result.second);
}
}
return ::testing::MakeMatcher(matcher);
}
impl::NodeMatcherProperties Name(string name) {
impl::NodeMatcherProperties props;
props.set_name(std::move(name));
return props;
}
impl::NodeMatcherProperties Op(string op) {
impl::NodeMatcherProperties props;
props.set_op(std::move(op));
return props;
}
impl::NodeMatcherProperties AssignedDevice(string assigned_device) {
impl::NodeMatcherProperties props;
props.set_assigned_device(std::move(assigned_device));
return props;
}
impl::NodeMatcherProperties impl::Inputs(
absl::Span<const ::testing::Matcher<OutEdge>> inputs) {
std::vector<::testing::Matcher<OutEdge>> inputs_vector;
absl::c_copy(inputs, std::back_inserter(inputs_vector));
impl::NodeMatcherProperties props;
props.set_inputs(std::move(inputs_vector));
return props;
}
impl::NodeMatcherProperties impl::CtrlDeps(
absl::Span<const ::testing::Matcher<const Node*>> control_deps) {
std::vector<::testing::Matcher<const Node*>> control_deps_vector;
absl::c_copy(control_deps, std::back_inserter(control_deps_vector));
impl::NodeMatcherProperties props;
props.set_control_deps(std::move(control_deps_vector));
return props;
}
std::pair<string, AttrValue> impl::AttrLiteralHelper(
const std::pair<string, bool>& bool_attr) {
AttrValue attr_value;
attr_value.set_b(bool_attr.second);
return {bool_attr.first, attr_value};
}
std::pair<string, AttrValue> impl::AttrLiteralHelper(
const std::pair<string, absl::Span<const int>>& int_list_attr) {
AttrValue attr_value;
AttrValue::ListValue* list = attr_value.mutable_list();
for (int i : int_list_attr.second) {
list->add_i(i);
}
return {int_list_attr.first, attr_value};
}
std::pair<string, AttrValue> impl::AttrLiteralHelper(
const std::pair<string, absl::Span<const string>>& string_list_attr) {
AttrValue attr_value;
AttrValue::ListValue* list = attr_value.mutable_list();
for (const string& s : string_list_attr.second) {
list->add_s(s);
}
return {string_list_attr.first, attr_value};
}
impl::NodeMatcherProperties impl::Attr(std::pair<string, AttrValue> attr) {
impl::NodeMatcherProperties props;
props.set_attr(std::move(attr));
return props;
}
impl::NodeMatcherProperties impl::Attr(string name) {
impl::NodeMatcherProperties props;
props.set_attr({std::move(name), std::nullopt});
return props;
}
NodeMatcherProperties ConstantValue(
const ::tensorflow::Input::Initializer& val) {
TF_CHECK_OK(val.status);
NodeMatcherProperties props;
props.set_constant_value(val.tensor);
return props;
}
::testing::Matcher<impl::OutEdge> Const(
const ::tensorflow::Input::Initializer& val) {
return Out(NodeWith(ConstantValue(val)));
}
::testing::Matcher<impl::OutEdge> Out(
int oidx, ::testing::Matcher<const Node*> node_matcher) {
return ::testing::MakeMatcher(new OutEdgeMatcher(node_matcher, oidx));
}
}
Node* FindNodeByName(Graph* g, absl::string_view name) {
for (Node* n : g->nodes()) {
if (n->name() == name) {
return n;
}
}
return nullptr;
}
}
void PrintTo(const Node* n, ::std::ostream* os) { *os << SummarizeNode(*n); }
void PrintTo(Node* n, ::std::ostream* os) { *os << SummarizeNode(*n); }
} | #include "tensorflow/compiler/jit/node_matchers.h"
#include <string>
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/math_ops.h"
namespace tensorflow {
namespace testing {
namespace {
using ::testing::_;
using testing::matchers::AssignedDevice;
using testing::matchers::Attr;
using testing::matchers::ConstantValue;
using testing::matchers::CtrlDeps;
using testing::matchers::Inputs;
using testing::matchers::Name;
using testing::matchers::NodeWith;
using testing::matchers::Op;
using testing::matchers::Out;
template <typename M, typename T>
string Explain(const T& t, const M& m) {
::testing::StringMatchResultListener listener;
EXPECT_THAT(t, ::testing::Not(m));
EXPECT_FALSE(m.MatchAndExplain(t, &listener));
return listener.str();
}
TEST(NodeMatchers, CheckAgainstConstant) {
Scope root = Scope::NewRootScope().ExitOnError();
Output placeholder =
ops::Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
EXPECT_THAT(placeholder.node(), NodeWith(Op("Placeholder")));
EXPECT_THAT(placeholder.node(), NodeWith(Name("placeholder")));
EXPECT_THAT(placeholder.node(),
NodeWith(Op("Placeholder"), Name("placeholder")));
EXPECT_THAT(placeholder.node(),
NodeWith(Name("placeholder"), Op("Placeholder")));
EXPECT_THAT(placeholder.node(), NodeWith(Inputs()));
EXPECT_THAT(placeholder.node(),
NodeWith(Op("Placeholder"), Name("placeholder"), Inputs()));
EXPECT_EQ(Explain(placeholder.node(), NodeWith(Op("Add"))),
"\nexpected op Add but found Placeholder");
EXPECT_EQ(Explain(placeholder.node(), NodeWith(Name("add"))),
"\nexpected name add but found placeholder");
EXPECT_EQ(Explain(placeholder.node(), NodeWith(Inputs(Out(NodeWith())))),
"\nexpected 1 inputs but node has 0");
}
TEST(NodeMatchers, CheckAgainstBinary) {
Scope root = Scope::NewRootScope().ExitOnError();
Output placeholder_a =
ops::Placeholder(root.WithOpName("placeholder_a"), DT_FLOAT);
Output placeholder_b =
ops::Placeholder(root.WithOpName("placeholder_b"), DT_FLOAT);
Output add = ops::Add(root.WithOpName("add"), placeholder_a, placeholder_b);
EXPECT_THAT(add.node(),
NodeWith(Op("Add"), Name("add"),
Inputs(Out(NodeWith(Name("placeholder_a"))),
Out(NodeWith(Name("placeholder_b"))))));
EXPECT_EQ(Explain(add.node(), NodeWith(Inputs())),
"\nexpected 0 inputs but node has 2");
EXPECT_EQ(
Explain(add.node(), NodeWith(Inputs(Out(NodeWith(Name("blah"))), _))),
"\ninput 0 does not match expected:\nname: blah, \nsource does not match "
"expected name: blah\n\t\nexpected name blah but found placeholder_a");
EXPECT_EQ(
Explain(add.node(), NodeWith(Inputs(_, Out(NodeWith(Name("blah")))))),
"\ninput 1 does not match expected:\nname: blah, \nsource does not match "
"expected name: blah\n\t\nexpected name blah but found placeholder_b");
}
TEST(NodeMatchers, CheckControlDependence) {
Scope root = Scope::NewRootScope().ExitOnError();
Output placeholder_a =
ops::Placeholder(root.WithOpName("placeholder_a"), DT_FLOAT);
Output placeholder_b =
ops::Placeholder(root.WithOpName("placeholder_b"), DT_FLOAT);
Output placeholder_c =
ops::Placeholder(root.WithOpName("placeholder_c"), DT_FLOAT);
Output placeholder_d =
ops::Placeholder(root.WithOpName("placeholder_d"), DT_FLOAT);
root.graph()->AddControlEdge(placeholder_a.node(), placeholder_c.node());
root.graph()->AddControlEdge(placeholder_b.node(), placeholder_c.node());
EXPECT_THAT(placeholder_c.node(),
NodeWith(Name("placeholder_c"),
CtrlDeps(NodeWith(Name("placeholder_a")),
NodeWith(Name("placeholder_b")))));
EXPECT_THAT(placeholder_d.node(),
NodeWith(Name("placeholder_d"), CtrlDeps()));
{
const std::string explanation =
Explain(placeholder_c.node(), NodeWith(CtrlDeps()));
EXPECT_NE(explanation.find("ctrl_deps, which has 2 elements"),
std::string::npos);
EXPECT_NE(explanation.find("does not match expected: is empty"),
std::string::npos);
}
{
const std::string explanation =
Explain(placeholder_d.node(), NodeWith(CtrlDeps(NodeWith())));
EXPECT_NE(explanation.find("ctrl_deps"), std::string::npos);
EXPECT_NE(explanation.find("does not match expected: has 1 element and "
"that element is any node"),
std::string::npos);
}
}
TEST(NodeMatchers, ConstValue) {
Scope root = Scope::NewRootScope().ExitOnError();
Output placeholder =
ops::Placeholder(root.WithOpName("placeholder"), DT_FLOAT);
Output const_0d = ops::Const(root.WithOpName("const_0d"), 42);
Output const_2d = ops::Const(root.WithOpName("const_2d"), {{1, 2}, {4, 3}});
EXPECT_THAT(const_0d.node(), NodeWith(ConstantValue(42)));
EXPECT_THAT(const_0d.node(), NodeWith(ConstantValue(42), Name("const_0d")));
EXPECT_THAT(const_2d.node(), NodeWith(ConstantValue({{1, 2}, {4, 3}})));
EXPECT_EQ(Explain(placeholder.node(), NodeWith(ConstantValue(42))),
"\nexpected op Const but found Placeholder");
EXPECT_EQ(
Explain(const_0d.node(), NodeWith(ConstantValue(43))),
"\nmismatch in constant tensor at index 0 expected = 43 actual = 42");
EXPECT_EQ(
Explain(const_0d.node(), NodeWith(ConstantValue({{1, 2}, {4, 3}}))),
"\nwas looking for tensor with 4 elements, found tensor with 1 elements");
EXPECT_EQ(
Explain(const_2d.node(), NodeWith(ConstantValue(42))),
"\nwas looking for tensor with 1 elements, found tensor with 4 elements");
}
TEST(NodeMatchers, AssignedDevice) {
Scope root = Scope::NewRootScope().ExitOnError();
Output placeholder_a =
ops::Placeholder(root.WithOpName("placeholder_a"), DT_FLOAT);
Output placeholder_b =
ops::Placeholder(root.WithOpName("placeholder_b"), DT_FLOAT);
Output assigned_add =
ops::Add(root.WithOpName("assigned_add"), placeholder_a, placeholder_b);
assigned_add.node()->set_assigned_device_name(
"/job:localhost/replica:0/task:0/device:CPU:0");
Output unassigned_add =
ops::Add(root.WithOpName("unassigned_add"), placeholder_a, placeholder_b);
EXPECT_THAT(
assigned_add.node(),
NodeWith(AssignedDevice("/job:localhost/replica:0/task:0/device:CPU:0")));
EXPECT_THAT(unassigned_add.node(), NodeWith(AssignedDevice("")));
EXPECT_EQ(Explain(unassigned_add.node(),
NodeWith(AssignedDevice(
"/job:localhost/replica:0/task:0/device:CPU:0"))),
"\nexpected assigned_device "
"/job:localhost/replica:0/task:0/device:CPU:0 but found \"\"");
}
TEST(NodeMatchers, OutputIndices) {
Scope root = Scope::NewRootScope().ExitOnError();
Output pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL);
Output data = ops::Placeholder(root.WithOpName("data"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), data, pred);
Output add = ops::Add(root.WithOpName("add"), sw.output_true,
ops::Placeholder(root.WithOpName("addend"), DT_FLOAT));
EXPECT_THAT(add.node(), NodeWith(Inputs(Out(1, NodeWith(Op("Switch"))), _)));
EXPECT_EQ(
Explain(add.node(), NodeWith(Inputs(Out(0, NodeWith(Op("Switch"))), _))),
"\ninput 0 does not match expected:\nop: Switch, \nexpected output slot "
"to be 0 but found 1");
}
TEST(NodeMatchers, Attrs) {
Scope root = Scope::NewRootScope().ExitOnError();
Output enter = ops::internal::Enter(
root.WithOpName("enter"),
ops::Placeholder(root.WithOpName("data"), DT_FLOAT), "frame_name",
ops::internal::Enter::Attrs{}.IsConstant(true));
EXPECT_THAT(enter.node(), NodeWith(Attr("is_constant", true)));
EXPECT_EQ(Explain(enter.node(), NodeWith(Attr("is_constant", false))),
"attribute named is_constant does not match value; expected: "
"\"false\", found: \"true\"");
EXPECT_EQ(Explain(enter.node(), NodeWith(Attr("missing_attr", false))),
"did not find attribute named \"missing_attr\" in node");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/node_matchers.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/node_matchers_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
50beb168-8ca5-4457-99cd-a70833c5bb18 | cpp | tensorflow/tensorflow | device_compilation_cluster_signature | tensorflow/compiler/jit/device_compilation_cluster_signature.cc | tensorflow/compiler/jit/device_compilation_cluster_signature_test.cc | #include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include <string>
#include <utility>
#include <variant>
namespace tensorflow {
namespace {
using Signature = DeviceCompilationClusterSignature;
using TensorTypeAndShape = Signature::TensorTypeAndShape;
struct SignatureHumanStringAppender {
explicit SignatureHumanStringAppender(std::string* dest) : dest(dest) {}
std::string* dest;
void operator()(const Tensor& arg) {
absl::StrAppend(dest, "; ", arg.DebugString());
}
void operator()(const TensorTypeAndShape& arg) {
absl::StrAppend(dest, ",", DataTypeString(arg.first));
absl::StrAppend(dest, " [", absl::StrJoin(arg.second, ","), "]");
}
};
struct SignatureNotEqual {
bool operator()(const Tensor& arg, const Tensor& other) {
return arg.dtype() != other.dtype() || arg.shape() != other.shape() ||
arg.tensor_data() != other.tensor_data();
}
bool operator()(const TensorTypeAndShape& arg,
const TensorTypeAndShape& other) {
return arg.first != other.first || arg.second != other.second;
}
bool operator()(const Tensor& arg, const TensorTypeAndShape& other) {
return true;
}
bool operator()(const TensorTypeAndShape& arg, const Tensor& other) {
return true;
}
};
struct SignatureHashCombiner {
explicit SignatureHashCombiner(const uint64 h) : h(h) {}
uint64 h;
uint64 operator()(const Tensor& arg) {
h = Hash64Combine(h, std::hash<int>()(static_cast<int>(arg.dtype())));
h = Hash64Combine(
h, Hash64(arg.tensor_data().data(), arg.tensor_data().size()));
for (int dim = 0; dim < arg.dims(); ++dim) {
h = Hash64Combine(h, std::hash<int>()(arg.dim_size(dim)));
}
return h;
}
uint64 operator()(const TensorTypeAndShape& arg) {
h = Hash64Combine(h, std::hash<int>()(static_cast<int>(arg.first)));
h = Hash64Combine(h, std::hash<int>()(arg.second.size()));
for (int dim : arg.second) {
h = Hash64Combine(h, std::hash<int>()(dim));
}
return h;
}
};
}
std::string Signature::HumanString() const {
std::string result = name;
for (const auto& arg : args) {
std::visit(SignatureHumanStringAppender(&result), arg);
}
return result;
}
bool Signature::operator==(const Signature& other) const {
if (name != other.name) return false;
if (args.size() != other.args.size()) return false;
for (int i = 0, end = args.size(); i < end; ++i) {
if (std::visit(SignatureNotEqual(), args[i], other.args[i])) {
return false;
}
}
return true;
}
uint64 Signature::Hash::operator()(const Signature& signature) const {
uint64 h = std::hash<string>()(signature.name);
for (const auto& arg : signature.args) {
h = std::visit(SignatureHashCombiner(h), arg);
}
return h;
}
absl::StatusOr<Signature> Signature::Build(
const NameAttrList& function,
absl::Span<const XlaCompiler::Argument> args) {
Signature signature;
signature.name = Canonicalize(function.name(), AttrSlice(&function.attr()));
for (const XlaCompiler::Argument& arg : args) {
switch (arg.kind) {
case XlaCompiler::Argument::kConstant:
case XlaCompiler::Argument::kConstantResource:
signature.args.push_back(arg.constant_value);
break;
case XlaCompiler::Argument::kParameter:
case XlaCompiler::Argument::kResource:
signature.args.push_back(
TensorTypeAndShape(arg.type, arg.DimensionSizesAsInlinedVector()));
break;
default:
return errors::InvalidArgument(
"Unhandled argument kind in XlaCompilationCache: ",
arg.HumanString());
}
}
return std::move(signature);
}
} | #include "tensorflow/compiler/jit/device_compilation_cluster_signature.h"
#include <utility>
#include <vector>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/tf2xla/shape_util.h"
#include "xla/client/client_library.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
using SignatureHash = DeviceCompilationClusterSignature::Hash;
TEST(DeviceCompilationClusterSignatureTest, SignatureEquality) {
NameAttrList fn;
fn.set_name("afunction");
std::vector<XlaCompiler::Argument> args(1);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_INT32;
args[0].shape = TensorShape({4, 0});
args[0].constant_value = Tensor(DT_INT32, {4, 0});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s1,
DeviceCompilationClusterSignature::Build(fn, args));
args[0].type = DT_FLOAT;
args[0].constant_value = Tensor(DT_FLOAT, {4, 0});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s2,
DeviceCompilationClusterSignature::Build(fn, args));
args[0].shape = TensorShape({0, 4});
args[0].constant_value = Tensor(DT_FLOAT, {0, 4});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s3,
DeviceCompilationClusterSignature::Build(fn, args));
std::vector<DeviceCompilationClusterSignature> signatures = {s1, s2, s3};
for (int i = 0; i < signatures.size(); ++i) {
for (int j = 0; j < signatures.size(); ++j) {
EXPECT_EQ(i == j, signatures[i] == signatures[j])
<< "s1: " << signatures[i].HumanString() << "\n"
<< "s2: " << signatures[j].HumanString();
EXPECT_EQ(i == j,
signatures[i].HumanString() == signatures[j].HumanString())
<< "s1: " << signatures[i].HumanString() << "\n"
<< "s2: " << signatures[j].HumanString();
EXPECT_EQ(i == j, SignatureHash()(signatures[i]) ==
SignatureHash()(signatures[j]))
<< "s1: " << signatures[i].HumanString() << "\n"
<< "s1_hash: " << SignatureHash()(signatures[i]) << "\n"
<< "s2: " << signatures[j].HumanString() << "\n"
<< "s2_hash: " << SignatureHash()(signatures[j]);
}
}
}
TEST(DeviceCompilationClusterSignatureTest, SignatureUniqueness) {
NameAttrList fn;
fn.set_name("afunction");
std::vector<XlaCompiler::Argument> args(2);
args[0].kind = XlaCompiler::Argument::kConstant;
args[0].type = DT_INT32;
args[0].constant_value = Tensor(DT_INT32, {4, 0});
args[1].kind = XlaCompiler::Argument::kParameter;
args[1].type = DT_INT32;
args[1].shape = TensorShape({4, 0});
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s1,
DeviceCompilationClusterSignature::Build(fn, args));
using std::swap;
swap(args[0], args[1]);
TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s2,
DeviceCompilationClusterSignature::Build(fn, args));
EXPECT_NE(s1.HumanString(), s2.HumanString());
EXPECT_NE(SignatureHash()(s1), SignatureHash()(s2));
EXPECT_FALSE(s1 == s2);
}
void BM_BuildSignature(::testing::benchmark::State& state) {
const int n_args = state.range(0);
NameAttrList fn;
fn.set_name("afunction");
for (int i = 0; i < n_args; i++) {
(*fn.mutable_attr())[absl::StrCat("T", i)].set_type(DT_FLOAT);
}
std::vector<XlaCompiler::Argument> args(n_args);
for (int i = 0; i < n_args; i++) {
args[i].kind = (((i % 3) == 0) ? XlaCompiler::Argument::kConstant
: XlaCompiler::Argument::kParameter);
args[i].type = DT_INT32;
args[i].shape = TensorShape({4, 0});
args[i].constant_value = Tensor(DT_INT32, {4, 0});
}
for (auto i : state) {
auto s = DeviceCompilationClusterSignature::Build(fn, args);
CHECK(s.ok());
DeviceCompilationClusterSignature sig = std::move(s.value());
}
}
BENCHMARK(BM_BuildSignature)->Arg(0)->Arg(1)->Arg(2)->Arg(5)->Arg(10);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cluster_signature.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compilation_cluster_signature_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.