1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 10:42:39 +01:00

[llvm][NFC] TensorSpec abstraction for ML evaluator

Further abstracting the specification of a tensor, to more easily
support different types and shapes of tensor, and also to perform
initialization up-front, at TFModelEvaluator construction time.

Differential Revision: https://reviews.llvm.org/D84685
This commit is contained in:
Mircea Trofin 2020-07-29 16:29:21 -07:00
parent ba8d265716
commit ff4bf8bfb5
5 changed files with 115 additions and 144 deletions

View File

@ -36,6 +36,43 @@ namespace llvm {
class TFModelEvaluatorImpl;
class EvaluationResultImpl;
/// TensorSpec encapsulates the specification of a tensor: its dimensions, or
/// "shape" (row-major), its type (see TensorSpec::getDataType specializations
/// for supported types), its name and port (see "TensorFlow: Large-Scale
/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
///
/// TensorSpec is used to set up a TFModelEvaluator by describing the expected
/// inputs and outputs.
class TensorSpec final {
public:
template <typename T>
static TensorSpec createSpec(const std::string &Name,
const std::vector<int64_t> &Shape,
int Port = 0) {
return TensorSpec(Name, Port, getDataType<T>(), Shape);
}
const std::string &name() const { return Name; }
int port() const { return Port; }
int typeIndex() const { return TypeIndex; }
const std::vector<int64_t> &shape() const { return Shape; }
private:
TensorSpec(const std::string &Name, int Port, int TypeIndex,
const std::vector<int64_t> &Shape)
: Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape) {}
template <typename T> static int getDataType() {
llvm_unreachable("Undefined tensor type");
}
std::string Name;
int Port = 0;
int TypeIndex = 0;
std::vector<int64_t> Shape;
};
class TFModelEvaluator final {
public:
/// The result of a model evaluation. Handles the lifetime of the output
@ -60,8 +97,8 @@ public:
};
TFModelEvaluator(StringRef SavedModelPath,
const std::vector<std::string> &InputNames,
const std::vector<std::string> &OutputNames,
const std::vector<TensorSpec> &InputSpecs,
const std::vector<TensorSpec> &OutputSpecs,
const char *Tags = "serve");
~TFModelEvaluator();
TFModelEvaluator(const TFModelEvaluator &) = delete;
@ -82,32 +119,21 @@ public:
/// otherwise.
bool isValid() const { return !!Impl; }
/// Initialize the input at Index as a tensor of the given type and
/// dimensions.
template <typename T>
void initInput(size_t Index, const std::vector<int64_t> &Dimensions) {
return initInput(Index, getModelTypeIndex<T>(), Dimensions);
}
private:
void *getUntypedInput(size_t Index);
template <typename T> int getModelTypeIndex();
void initInput(size_t Index, int TypeIndex,
const std::vector<int64_t> &Dimensions);
std::unique_ptr<TFModelEvaluatorImpl> Impl;
};
template <> int TFModelEvaluator::getModelTypeIndex<float>();
template <> int TFModelEvaluator::getModelTypeIndex<double>();
template <> int TFModelEvaluator::getModelTypeIndex<int8_t>();
template <> int TFModelEvaluator::getModelTypeIndex<uint8_t>();
template <> int TFModelEvaluator::getModelTypeIndex<int16_t>();
template <> int TFModelEvaluator::getModelTypeIndex<uint16_t>();
template <> int TFModelEvaluator::getModelTypeIndex<int32_t>();
template <> int TFModelEvaluator::getModelTypeIndex<uint32_t>();
template <> int TFModelEvaluator::getModelTypeIndex<int64_t>();
template <> int TFModelEvaluator::getModelTypeIndex<uint64_t>();
template <> int TensorSpec::getDataType<float>();
template <> int TensorSpec::getDataType<double>();
template <> int TensorSpec::getDataType<int8_t>();
template <> int TensorSpec::getDataType<uint8_t>();
template <> int TensorSpec::getDataType<int16_t>();
template <> int TensorSpec::getDataType<uint16_t>();
template <> int TensorSpec::getDataType<int32_t>();
template <> int TensorSpec::getDataType<uint32_t>();
template <> int TensorSpec::getDataType<int64_t>();
template <> int TensorSpec::getDataType<uint64_t>();
} // namespace llvm

View File

@ -298,35 +298,12 @@ public:
private:
std::unique_ptr<TFModelEvaluator> Evaluator;
// The training framework needs some additional features, that just need to
// be set to 0.
struct TensorSpec {
std::string Name;
std::function<void(TFModelEvaluator *, size_t Index,
const std::vector<int64_t> &Dim)>
Initializer;
};
// The training framework needs some additional features.
const std::vector<TensorSpec> TrainingOnlyFeatures{
{"inlining_default",
[](TFModelEvaluator *Evaluator, size_t Index,
const std::vector<int64_t> &Dim) {
Evaluator->initInput<int64_t>(Index, Dim);
}},
{"discount",
[](TFModelEvaluator *Evaluator, size_t Index,
const std::vector<int64_t> &Dim) {
Evaluator->initInput<float>(Index, Dim);
}},
{"reward",
[](TFModelEvaluator *Evaluator, size_t Index,
const std::vector<int64_t> &Dim) {
Evaluator->initInput<float>(Index, Dim);
}},
{"step_type", [](TFModelEvaluator *Evaluator, size_t Index,
const std::vector<int64_t> &Dim) {
Evaluator->initInput<int32_t>(Index, Dim);
}}};
TensorSpec::createSpec<int64_t>(TFFeedPrefix + "inlining_default", {1}),
TensorSpec::createSpec<float>(TFFeedPrefix + "discount", {1}),
TensorSpec::createSpec<float>(TFFeedPrefix + "reward", {1}),
TensorSpec::createSpec<int32_t>(TFFeedPrefix + "step_type", {1})};
};
} // namespace
@ -409,33 +386,22 @@ size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx,
const std::string &ModelPath)
: MLModelRunner(Ctx) {
std::vector<std::string> InputNames;
std::vector<std::string> OutputNames;
std::vector<TensorSpec> InputSpecs;
std::vector<TensorSpec> OutputSpecs;
for (size_t I = 0; I < NumberOfFeatures; ++I)
InputNames.push_back(TFFeedPrefix + FeatureNameMap[I]);
for (size_t I = 0; I < TrainingOnlyFeatures.size(); ++I)
InputNames.push_back(TFFeedPrefix + TrainingOnlyFeatures[I].Name);
OutputNames.push_back(TFDecisionName);
InputSpecs.push_back(
TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(),
TrainingOnlyFeatures.end());
OutputSpecs.push_back(TensorSpec::createSpec<int64_t>(TFDecisionName, {1}));
Evaluator =
std::make_unique<TFModelEvaluator>(ModelPath, InputNames, OutputNames);
std::make_unique<TFModelEvaluator>(ModelPath, InputSpecs, OutputSpecs);
if (!Evaluator || !Evaluator->isValid()) {
Ctx.emitError("Failed to create inliner saved model evaluator");
Evaluator.reset();
return;
}
static const std::vector<int64_t> Dim{1};
size_t InputIndex = 0;
for (; InputIndex < NumberOfFeatures; ++InputIndex) {
Evaluator->initInput<int64_t>(InputIndex, Dim);
}
for (; InputIndex < InputNames.size(); ++InputIndex) {
TrainingOnlyFeatures[InputIndex - NumberOfFeatures].Initializer(
Evaluator.get(), InputIndex, Dim);
}
}
bool ModelUnderTrainingRunner::run() {

View File

@ -244,19 +244,18 @@ InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {
if (!isEvaluatorRequested()) {
return;
}
std::vector<std::string> InputNames{"serving_default_input_1"};
std::vector<std::string> OutputName{"StatefulPartitionedCall"};
std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
"serving_default_input_1",
{1, static_cast<int64_t>(
IRToNativeSizeLearning::FunctionFeatures::FeatureCount)})};
std::vector<TensorSpec> OutputSpecs{
TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
Evaluator = std::make_unique<TFModelEvaluator>(
TFIR2NativeModelPath.getValue().c_str(), InputNames, OutputName);
TFIR2NativeModelPath.getValue().c_str(), InputSpecs, OutputSpecs);
if (!Evaluator || !Evaluator->isValid()) {
Evaluator.reset();
return;
}
static const std::vector<int64_t> Dim{
1, static_cast<int64_t>(
IRToNativeSizeLearning::FunctionFeatures::FeatureCount)};
Evaluator->initInput<int32_t>(0, Dim);
}
InlineSizeEstimatorAnalysis::Result

View File

@ -86,8 +86,8 @@ private:
class TFModelEvaluatorImpl {
public:
TFModelEvaluatorImpl(StringRef SavedModelPath,
const std::vector<std::string> &InputNames,
const std::vector<std::string> &OutputNames,
const std::vector<TensorSpec> &InputSpecs,
const std::vector<TensorSpec> &OutputSpecs,
const char *Tags);
bool isValid() const { return IsValid; }
@ -132,16 +132,17 @@ private:
/// Reusable utility for ensuring we can bind the requested Name to a node in
/// the SavedModel Graph.
bool checkReportAndInvalidate(const TF_Output &Output, StringRef Name);
bool checkReportAndInvalidate(const TF_Output &Output,
const TensorSpec &OutputSpec);
};
} // namespace llvm
TFModelEvaluatorImpl::TFModelEvaluatorImpl(
StringRef SavedModelPath, const std::vector<std::string> &InputNames,
const std::vector<std::string> &OutputNames, const char *Tags)
StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
const std::vector<TensorSpec> &OutputSpecs, const char *Tags)
: Graph(createTFGraph()), Options(createTFSessionOptions()),
InputFeed(InputNames.size()), Input(InputNames.size()),
OutputFeed(OutputNames.size()) {
InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
OutputFeed(OutputSpecs.size()) {
if (!ensureInitTF()) {
errs() << "Tensorflow should have been initialized";
return;
@ -155,25 +156,31 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl(
errs() << TF_Message(Status.get());
invalidate();
}
for (size_t I = 0; I < InputNames.size(); ++I) {
for (size_t I = 0; I < InputSpecs.size(); ++I) {
auto &InputSpec = InputSpecs[I];
InputFeed[I] = {
TF_GraphOperationByName(Graph.get(), (InputNames[I]).c_str()), 0};
if (!checkReportAndInvalidate(InputFeed[I], InputNames[I]))
TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
InputSpec.port()};
if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
return;
initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
InputSpec.shape());
}
for (size_t I = 0; I < OutputNames.size(); ++I) {
for (size_t I = 0; I < OutputSpecs.size(); ++I) {
auto &OutputSpec = OutputSpecs[I];
OutputFeed[I] = {
TF_GraphOperationByName(Graph.get(), (OutputNames[I]).c_str()), 0};
if (!checkReportAndInvalidate(OutputFeed[I], OutputNames[I]))
TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
OutputSpec.port()};
if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
return;
}
}
TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
const std::vector<std::string> &InputNames,
const std::vector<std::string> &OutputNames,
const std::vector<TensorSpec> &InputSpecs,
const std::vector<TensorSpec> &OutputSpecs,
const char *Tags)
: Impl(new TFModelEvaluatorImpl(SavedModelPath, InputNames, OutputNames,
: Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
Tags)) {
if (!Impl->isValid())
Impl.reset();
@ -192,11 +199,11 @@ TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
errs() << "Could not delete TF session";
}
bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TF_Output &Output,
StringRef Name) {
bool TFModelEvaluatorImpl::checkReportAndInvalidate(
const TF_Output &Output, const TensorSpec &OutputSpec) {
if (Output.oper)
return true;
errs() << "Could not find TF_Output named: " + Name;
errs() << "Could not find TF_Output named: " + OutputSpec.name();
IsValid = false;
return IsValid;
}
@ -242,50 +249,25 @@ void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
return TF_TensorData(Impl->getOutput()[Index]);
}
void TFModelEvaluator::initInput(size_t Index, int TypeIndex,
const std::vector<int64_t> &Dimensions) {
Impl->initInput(Index, static_cast<TF_DataType>(TypeIndex), Dimensions);
}
template <> int TensorSpec::getDataType<float>() { return TF_FLOAT; }
template <> int TFModelEvaluator::getModelTypeIndex<float>() {
return TF_FLOAT;
}
template <> int TensorSpec::getDataType<double>() { return TF_DOUBLE; }
template <> int TFModelEvaluator::getModelTypeIndex<double>() {
return TF_DOUBLE;
}
template <> int TensorSpec::getDataType<int8_t>() { return TF_INT8; }
template <> int TFModelEvaluator::getModelTypeIndex<int8_t>() {
return TF_INT8;
}
template <> int TensorSpec::getDataType<uint8_t>() { return TF_UINT8; }
template <> int TFModelEvaluator::getModelTypeIndex<uint8_t>() {
return TF_UINT8;
}
template <> int TensorSpec::getDataType<int16_t>() { return TF_INT16; }
template <> int TFModelEvaluator::getModelTypeIndex<int16_t>() {
return TF_INT16;
}
template <> int TensorSpec::getDataType<uint16_t>() { return TF_UINT16; }
template <> int TFModelEvaluator::getModelTypeIndex<uint16_t>() {
return TF_UINT16;
}
template <> int TensorSpec::getDataType<int32_t>() { return TF_INT32; }
template <> int TFModelEvaluator::getModelTypeIndex<int32_t>() {
return TF_INT32;
}
template <> int TensorSpec::getDataType<uint32_t>() { return TF_UINT32; }
template <> int TFModelEvaluator::getModelTypeIndex<uint32_t>() {
return TF_UINT32;
}
template <> int TensorSpec::getDataType<int64_t>() { return TF_INT64; }
template <> int TFModelEvaluator::getModelTypeIndex<int64_t>() {
return TF_INT64;
}
template <> int TFModelEvaluator::getModelTypeIndex<uint64_t>() {
return TF_UINT64;
}
template <> int TensorSpec::getDataType<uint64_t>() { return TF_UINT64; }
TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
TFModelEvaluator::~TFModelEvaluator() {}

View File

@ -37,15 +37,14 @@ TEST(TFUtilsTest, NoModel) {
TEST(TFUtilsTest, LoadAndExecuteTest) {
// We use the ir2native model for test. We know it has one feature of
// dimension (1, 214)
std::vector<std::string> InputNames{"serving_default_input_1"};
std::vector<std::string> OutputName{"StatefulPartitionedCall"};
const static int64_t KnownSize = 214;
std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
"serving_default_input_1", {1, KnownSize})};
std::vector<TensorSpec> OutputSpecs{
TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
TFModelEvaluator Evaluator(getModelPath(), InputNames, OutputName);
static const std::vector<int64_t> Dim{1, KnownSize};
TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
EXPECT_TRUE(Evaluator.isValid());
Evaluator.initInput<int32_t>(0, Dim);
int32_t *V = Evaluator.getInput<int32_t>(0);
// Fill it up with 1's, we know the output.
@ -77,15 +76,14 @@ TEST(TFUtilsTest, LoadAndExecuteTest) {
TEST(TFUtilsTest, EvalError) {
// We use the ir2native model for test. We know it has one feature of
// dimension (1, 214)
std::vector<std::string> InputNames{"serving_default_input_1"};
std::vector<std::string> OutputName{"StatefulPartitionedCall"};
const static int64_t KnownSize = 213;
std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
"serving_default_input_1", {1, KnownSize})};
std::vector<TensorSpec> OutputSpecs{
TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
TFModelEvaluator Evaluator(getModelPath(), InputNames, OutputName);
static const std::vector<int64_t> Dim{1, KnownSize};
TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
EXPECT_TRUE(Evaluator.isValid());
Evaluator.initInput<int32_t>(0, Dim);
int32_t *V = Evaluator.getInput<int32_t>(0);
// Fill it up with 1's, we know the output.