File size: 1,498 Bytes
d5ee97c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
#include "tfg2p.h"
#include <stdexcept>
TFG2P::TFG2P()
{
G2P = nullptr;
}
TFG2P::TFG2P(const std::string &SavedModelFolder)
{
G2P = nullptr;
Initialize(SavedModelFolder);
}
bool TFG2P::Initialize(const std::string &SavedModelFolder)
{
try {
G2P = new Model(SavedModelFolder);
}
catch (...) {
G2P = nullptr;
return false;
}
return true;
}
TFTensor<int32_t> TFG2P::DoInference(const std::vector<int32_t> &InputIDs, float Temperature)
{
if (!G2P)
throw std::invalid_argument("Tried to do inference on unloaded or invalid model!");
// Convenience reference so that we don't have to constantly derefer pointers.
Model& Mdl = *G2P;
// Convenience reference so that we don't have to constantly derefer pointers.
Tensor input_ids{ Mdl,"serving_default_input_ids" };
Tensor input_len{Mdl,"serving_default_input_len"};
Tensor input_temp{Mdl,"serving_default_input_temperature"};
input_ids.set_data(InputIDs, std::vector<int64_t>{(int64_t)InputIDs.size()});
input_len.set_data(std::vector<int32_t>{(int32_t)InputIDs.size()});
input_temp.set_data(std::vector<float>{Temperature});
std::vector<Tensor*> Inputs {&input_ids,&input_len,&input_temp};
Tensor out_ids{ Mdl,"StatefulPartitionedCall" };
Mdl.run(Inputs, out_ids);
TFTensor<int32_t> RetTensor = VoxUtil::CopyTensor<int32_t>(out_ids);
return RetTensor;
}
TFG2P::~TFG2P()
{
if (G2P)
delete G2P;
}
|