2020-07-13 23:12:32 +02:00
|
|
|
//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements utilities for interfacing with tensorflow C APIs.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2020-07-21 17:44:47 +02:00
|
|
|
#include "llvm/Config/config.h"
|
|
|
|
#if defined(LLVM_HAVE_TF_API)
|
2020-07-13 23:12:32 +02:00
|
|
|
|
|
|
|
#include "llvm/ADT/Twine.h"
|
2020-07-30 21:44:07 +02:00
|
|
|
#include "llvm/Analysis/Utils/TFUtils.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
2020-07-30 21:44:07 +02:00
|
|
|
#include "llvm/Support/JSON.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2020-11-19 01:16:10 +01:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2020-11-19 05:54:04 +01:00
|
|
|
#include "llvm/Support/Path.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
#include "tensorflow/c/c_api.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "tensorflow/c/c_api_experimental.h"
|
|
|
|
|
|
|
|
#include <cassert>
|
2020-08-05 00:00:35 +02:00
|
|
|
#include <numeric>
|
2020-07-13 23:12:32 +02:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
|
|
|
|
using TFSessionOptionsPtr =
|
|
|
|
std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
|
|
|
|
using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
|
|
|
|
|
2020-07-13 23:12:32 +02:00
|
|
|
struct TFInitializer {
|
|
|
|
TFInitializer() {
|
|
|
|
assert(!IsInitialized && "TFInitialized should be called only once");
|
|
|
|
int Argc = 1;
|
|
|
|
const char *Name = "";
|
|
|
|
const char **NamePtr = &Name;
|
|
|
|
TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
|
|
|
|
IsInitialized = true;
|
|
|
|
}
|
|
|
|
bool IsInitialized = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
llvm::ManagedStatic<TFInitializer> TFLibInitializer;
|
|
|
|
|
|
|
|
bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFGraphPtr createTFGraph() {
|
|
|
|
return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFStatusPtr createTFStatus() {
|
|
|
|
return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFSessionOptionsPtr createTFSessionOptions() {
|
|
|
|
return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
2020-10-03 05:28:49 +02:00
|
|
|
|
|
|
|
/// Write the values of one tensor as a list.
|
|
|
|
template <typename T>
|
|
|
|
void writeTensorValues(raw_ostream &OutFile, const char *TensorData,
|
|
|
|
size_t ElemCount) {
|
|
|
|
OutFile << "[";
|
|
|
|
const T *TypedData = reinterpret_cast<const T *>(TensorData);
|
|
|
|
for (size_t I = 0; I < ElemCount; ++I) {
|
|
|
|
if (I > 0)
|
|
|
|
OutFile << ", ";
|
|
|
|
OutFile << TypedData[I];
|
|
|
|
}
|
|
|
|
OutFile << "]";
|
|
|
|
}
|
|
|
|
|
2020-10-17 17:40:44 +02:00
|
|
|
/// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
|
|
|
|
/// The tensors are assumed to be stored contiguously, in row-major format,
|
|
|
|
/// in the TensorData buffer. Each tensor has the shape given by Spec. The
|
|
|
|
/// feature name in the output is either the provided LoggingName, if
|
|
|
|
/// specified, otherwise it's the name of the tensor (as given by Spec).
|
2020-10-03 05:28:49 +02:00
|
|
|
void writeRawTensorsAsFeatureLists(raw_ostream &OutFile,
|
2020-11-19 01:16:10 +01:00
|
|
|
const LoggedFeatureSpec &LoggedSpec,
|
2020-10-17 17:40:44 +02:00
|
|
|
const char *TensorData, size_t TensorCount,
|
|
|
|
bool FinalReward = false) {
|
2020-10-03 05:28:49 +02:00
|
|
|
const char *FieldName = "<invalid>";
|
|
|
|
std::function<void(const char *)> ValueWriter;
|
|
|
|
const auto &Spec = LoggedSpec.Spec;
|
|
|
|
// The 'Feature' protobuf only has 3 possible fields: float_list,
|
|
|
|
// int64_list, or bytes_list, so we capture int32 values as int64. We don't
|
|
|
|
// support any other types.
|
|
|
|
if (Spec.isElementType<int64_t>()) {
|
|
|
|
FieldName = "int64_list";
|
|
|
|
ValueWriter = [&](const char *Data) {
|
|
|
|
writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
|
|
|
|
};
|
|
|
|
} else if (Spec.isElementType<int32_t>()) {
|
|
|
|
FieldName = "int64_list";
|
|
|
|
ValueWriter = [&](const char *Data) {
|
|
|
|
writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
|
|
|
|
};
|
|
|
|
|
|
|
|
} else if (Spec.isElementType<float>()) {
|
|
|
|
FieldName = "float_list";
|
|
|
|
ValueWriter = [&](const char *Data) {
|
|
|
|
writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
|
|
|
|
};
|
|
|
|
|
|
|
|
} else {
|
|
|
|
llvm_unreachable("Unsupported tensor type.");
|
|
|
|
}
|
|
|
|
|
|
|
|
OutFile << " feature_list: {\n";
|
|
|
|
OutFile << " key: "
|
|
|
|
<< "\""
|
|
|
|
<< (LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())
|
|
|
|
<< "\" ";
|
|
|
|
OutFile << "value: {\n";
|
|
|
|
size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
|
2020-10-17 17:40:44 +02:00
|
|
|
|
|
|
|
auto WriteFeatureProto = [&](const char *P) {
|
2020-10-03 05:28:49 +02:00
|
|
|
OutFile << " feature: { " << FieldName << ": { value: ";
|
|
|
|
ValueWriter(P);
|
|
|
|
OutFile << " } }\n";
|
2020-10-17 17:40:44 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
const char *CurrentTensor = TensorData;
|
|
|
|
static int64_t Zero = 0;
|
|
|
|
// Write all but the last value. If this is the final reward, don't increment
|
|
|
|
// the CurrentTensor, and just write 0.
|
|
|
|
for (size_t I = 0; I < TensorCount - 1; ++I) {
|
|
|
|
if (FinalReward)
|
|
|
|
WriteFeatureProto(reinterpret_cast<const char *>(&Zero));
|
|
|
|
else {
|
|
|
|
WriteFeatureProto(CurrentTensor);
|
|
|
|
CurrentTensor += TensorByteSize;
|
|
|
|
}
|
2020-10-03 05:28:49 +02:00
|
|
|
}
|
2020-10-17 17:40:44 +02:00
|
|
|
|
|
|
|
WriteFeatureProto(CurrentTensor);
|
|
|
|
|
2020-10-03 05:28:49 +02:00
|
|
|
OutFile << " }\n";
|
|
|
|
OutFile << " }\n";
|
|
|
|
}
|
2020-07-13 23:12:32 +02:00
|
|
|
} // namespace
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
namespace llvm {
|
|
|
|
class EvaluationResultImpl {
|
|
|
|
public:
|
|
|
|
EvaluationResultImpl(size_t OutputSize)
|
|
|
|
: OutputSize(OutputSize), Output(OutputSize){};
|
|
|
|
|
|
|
|
~EvaluationResultImpl() {
|
|
|
|
for (auto *P : Output)
|
|
|
|
if (P)
|
|
|
|
TF_DeleteTensor(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
EvaluationResultImpl(const EvaluationResultImpl &) = delete;
|
|
|
|
EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
|
|
|
|
std::vector<TF_Tensor *> &getOutput() { return Output; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
const size_t OutputSize;
|
|
|
|
std::vector<TF_Tensor *> Output;
|
|
|
|
};
|
|
|
|
|
2020-08-05 00:00:35 +02:00
|
|
|
size_t TensorSpec::getElementByteSize() const {
|
|
|
|
return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
|
|
|
|
}
|
|
|
|
|
|
|
|
TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
|
|
|
|
const std::vector<int64_t> &Shape)
|
|
|
|
: Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
|
|
|
|
ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
|
|
|
|
std::multiplies<int64_t>())) {}
|
|
|
|
|
2020-07-30 21:44:07 +02:00
|
|
|
Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
|
|
|
|
const json::Value &Value) {
|
|
|
|
auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
|
|
|
|
std::string S;
|
|
|
|
llvm::raw_string_ostream OS(S);
|
|
|
|
OS << Value;
|
|
|
|
Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
|
|
|
|
return None;
|
|
|
|
};
|
[JSON] Add error reporting to fromJSON and ObjectMapper
Translating between JSON objects and C++ strutctures is common.
From experience in clangd, fromJSON/ObjectMapper work well and save a lot of
code, but aren't adopted elsewhere at least partly due to total lack of error
reporting beyond "ok"/"bad".
The recently-added error model should be rich enough for most applications.
It requires tracking the path within the root object and reporting local
errors at appropriate places.
To do this, we exploit the fact that the call graph of recursive
parse functions mirror the structure of the JSON itself.
The current path is represented as a linked list of segments, each of which is
on the stack as a parameter. Concretely, fromJSON now looks like:
bool fromJSON(const Value&, T&, Path);
Beyond the signature change, this is reasonably unobtrusive: building
the path segments is mostly handled by ObjectMapper and the vector<T> fromJSON.
However the root caller of fromJSON must now create a Root object to
store the errors, which is a little clunky.
I've added high-level parse<T>(StringRef) -> Expected<T>, but it's not
general enough to be the primary interface I think (at least, not usable in
clangd).
All existing users (mostly just clangd) are updated in this patch,
making this change backwards-compatible is a bit hairy.
Differential Revision: https://reviews.llvm.org/D88103
2020-09-24 01:14:12 +02:00
|
|
|
// FIXME: accept a Path as a parameter, and use it for error reporting.
|
|
|
|
json::Path::Root Root("tensor_spec");
|
|
|
|
json::ObjectMapper Mapper(Value, Root);
|
2020-07-30 21:44:07 +02:00
|
|
|
if (!Mapper)
|
|
|
|
return EmitError("Value is not a dict");
|
|
|
|
|
|
|
|
std::string TensorName;
|
|
|
|
int TensorPort = -1;
|
|
|
|
std::string TensorType;
|
|
|
|
std::vector<int64_t> TensorShape;
|
|
|
|
|
|
|
|
if (!Mapper.map<std::string>("name", TensorName))
|
|
|
|
return EmitError("'name' property not present or not a string");
|
|
|
|
if (!Mapper.map<std::string>("type", TensorType))
|
|
|
|
return EmitError("'type' property not present or not a string");
|
|
|
|
if (!Mapper.map<int>("port", TensorPort))
|
|
|
|
return EmitError("'port' property not present or not an int");
|
|
|
|
if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
|
|
|
|
return EmitError("'shape' property not present or not an int array");
|
|
|
|
|
2020-08-25 18:58:49 +02:00
|
|
|
#define PARSE_TYPE(T, E) \
|
|
|
|
if (TensorType == #T) \
|
2020-07-30 21:44:07 +02:00
|
|
|
return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
|
|
|
|
TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
|
|
|
|
#undef PARSE_TYPE
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2020-11-19 05:54:04 +01:00
|
|
|
Optional<std::vector<LoggedFeatureSpec>>
|
|
|
|
loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
|
|
|
|
StringRef ModelPath, StringRef SpecFileOverride) {
|
|
|
|
SmallVector<char, 128> OutputSpecsPath;
|
|
|
|
StringRef FileName = SpecFileOverride;
|
|
|
|
if (FileName.empty()) {
|
|
|
|
llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
|
|
|
|
FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
|
|
|
|
}
|
|
|
|
|
2020-11-19 01:16:10 +01:00
|
|
|
auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
|
|
|
|
if (!BufferOrError) {
|
|
|
|
Ctx.emitError("Error opening output specs file: " + FileName + " : " +
|
|
|
|
BufferOrError.getError().message());
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
|
|
|
|
if (!ParsedJSONValues) {
|
|
|
|
Ctx.emitError("Could not parse specs file: " + FileName);
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
auto ValuesArray = ParsedJSONValues->getAsArray();
|
|
|
|
if (!ValuesArray) {
|
|
|
|
Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
|
|
|
|
"logging_name:<name>} dictionaries");
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
2020-11-19 05:54:04 +01:00
|
|
|
std::vector<LoggedFeatureSpec> Ret;
|
2020-11-19 01:16:10 +01:00
|
|
|
for (const auto &Value : *ValuesArray)
|
|
|
|
if (const auto *Obj = Value.getAsObject())
|
|
|
|
if (const auto *SpecPart = Obj->get("tensor_spec"))
|
|
|
|
if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
|
|
|
|
if (auto LoggingName = Obj->getString("logging_name")) {
|
|
|
|
if (!TensorSpec->isElementType<int64_t>() &&
|
|
|
|
!TensorSpec->isElementType<int32_t>() &&
|
|
|
|
!TensorSpec->isElementType<float>()) {
|
|
|
|
Ctx.emitError(
|
|
|
|
"Only int64, int32, and float tensors are supported. "
|
|
|
|
"Found unsupported type for tensor named " +
|
|
|
|
TensorSpec->name());
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
Ret.push_back({*TensorSpec, LoggingName->str()});
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ValuesArray->size() != Ret.size()) {
|
|
|
|
Ctx.emitError(
|
|
|
|
"Unable to parse output spec. It should be a json file containing an "
|
|
|
|
"array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
|
|
|
|
"with a json object describing a TensorSpec; and a 'logging_name' key, "
|
|
|
|
"which is a string to use as name when logging this tensor in the "
|
|
|
|
"training log.");
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
|
|
|
|
Ctx.emitError("The first output spec must describe the decision tensor, "
|
|
|
|
"and must have the logging_name " +
|
|
|
|
StringRef(ExpectedDecisionName));
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
2020-11-19 05:54:04 +01:00
|
|
|
return Ret;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
class TFModelEvaluatorImpl {
|
|
|
|
public:
|
|
|
|
TFModelEvaluatorImpl(StringRef SavedModelPath,
|
2020-07-30 01:29:21 +02:00
|
|
|
const std::vector<TensorSpec> &InputSpecs,
|
2020-11-19 01:16:10 +01:00
|
|
|
function_ref<TensorSpec(size_t)> GetOutputSpecs,
|
|
|
|
size_t OutputSpecsSize, const char *Tags);
|
2020-07-15 04:32:37 +02:00
|
|
|
|
|
|
|
bool isValid() const { return IsValid; }
|
|
|
|
size_t OutputSize() const { return OutputFeed.size(); }
|
|
|
|
|
|
|
|
void evaluate(TF_Tensor **Output, TF_Status *Status) {
|
|
|
|
TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
|
|
|
|
Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
|
|
|
|
nullptr, 0, nullptr, Status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void initInput(size_t Index, TF_DataType Type,
|
|
|
|
const std::vector<int64_t> &Dimensions);
|
|
|
|
const std::vector<TF_Tensor *> &getInput() const { return Input; }
|
|
|
|
|
|
|
|
~TFModelEvaluatorImpl();
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// The objects necessary for carrying out an evaluation of the SavedModel.
|
|
|
|
/// They are expensive to set up, and we maintain them accross all the
|
|
|
|
/// evaluations of the model.
|
|
|
|
TF_Session *Session = nullptr;
|
|
|
|
TFGraphPtr Graph;
|
|
|
|
TFSessionOptionsPtr Options;
|
|
|
|
|
|
|
|
/// The specification of the input nodes.
|
|
|
|
std::vector<TF_Output> InputFeed;
|
|
|
|
|
|
|
|
/// The input tensors. They must match by index of the corresponding InputFeed
|
|
|
|
/// value. We set up the tensors once and just mutate theirs scalars before
|
|
|
|
/// each evaluation. The input tensors keep their value after an evaluation.
|
|
|
|
std::vector<TF_Tensor *> Input;
|
|
|
|
|
|
|
|
/// The specification of the output nodes. When evaluating, the tensors in the
|
|
|
|
/// output tensor vector must match by index the corresponding element in the
|
|
|
|
/// OutputFeed.
|
|
|
|
std::vector<TF_Output> OutputFeed;
|
|
|
|
|
|
|
|
void invalidate() { IsValid = false; }
|
|
|
|
|
|
|
|
bool IsValid = true;
|
|
|
|
|
|
|
|
/// Reusable utility for ensuring we can bind the requested Name to a node in
|
|
|
|
/// the SavedModel Graph.
|
2020-07-30 01:29:21 +02:00
|
|
|
bool checkReportAndInvalidate(const TF_Output &Output,
|
|
|
|
const TensorSpec &OutputSpec);
|
2020-07-15 04:32:37 +02:00
|
|
|
};
|
|
|
|
} // namespace llvm
|
|
|
|
|
|
|
|
TFModelEvaluatorImpl::TFModelEvaluatorImpl(
|
2020-07-30 01:29:21 +02:00
|
|
|
StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
|
2020-11-19 01:16:10 +01:00
|
|
|
function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
|
|
|
|
const char *Tags = "serve")
|
2020-07-13 23:12:32 +02:00
|
|
|
: Graph(createTFGraph()), Options(createTFSessionOptions()),
|
2020-07-30 01:29:21 +02:00
|
|
|
InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
|
2020-11-19 01:16:10 +01:00
|
|
|
OutputFeed(OutputSpecsSize) {
|
2020-07-13 23:12:32 +02:00
|
|
|
if (!ensureInitTF()) {
|
|
|
|
errs() << "Tensorflow should have been initialized";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto Status = createTFStatus();
|
|
|
|
|
|
|
|
Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
|
|
|
|
SavedModelPath.str().c_str(), &Tags, 1,
|
|
|
|
Graph.get(), nullptr, Status.get());
|
|
|
|
if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
|
|
|
|
errs() << TF_Message(Status.get());
|
2020-07-15 04:32:37 +02:00
|
|
|
invalidate();
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
2020-07-30 01:29:21 +02:00
|
|
|
for (size_t I = 0; I < InputSpecs.size(); ++I) {
|
|
|
|
auto &InputSpec = InputSpecs[I];
|
2020-07-13 23:12:32 +02:00
|
|
|
InputFeed[I] = {
|
2020-07-30 01:29:21 +02:00
|
|
|
TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
|
|
|
|
InputSpec.port()};
|
|
|
|
if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
|
2020-07-13 23:12:32 +02:00
|
|
|
return;
|
2020-07-30 01:29:21 +02:00
|
|
|
initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
|
|
|
|
InputSpec.shape());
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
2020-11-19 01:16:10 +01:00
|
|
|
for (size_t I = 0; I < OutputSpecsSize; ++I) {
|
|
|
|
auto OutputSpec = GetOutputSpecs(I);
|
2020-07-13 23:12:32 +02:00
|
|
|
OutputFeed[I] = {
|
2020-07-30 01:29:21 +02:00
|
|
|
TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
|
|
|
|
OutputSpec.port()};
|
|
|
|
if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
|
2020-07-13 23:12:32 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-19 01:16:10 +01:00
|
|
|
TFModelEvaluator::TFModelEvaluator(
|
|
|
|
StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
|
|
|
|
function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
|
|
|
|
const char *Tags)
|
|
|
|
: Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs,
|
|
|
|
OutputSpecsSize, Tags)) {
|
|
|
|
if (!Impl->isValid())
|
|
|
|
Impl.reset();
|
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
|
2020-07-30 01:29:21 +02:00
|
|
|
const std::vector<TensorSpec> &InputSpecs,
|
|
|
|
const std::vector<TensorSpec> &OutputSpecs,
|
2020-07-15 04:32:37 +02:00
|
|
|
const char *Tags)
|
2020-11-19 01:16:10 +01:00
|
|
|
: TFModelEvaluator(
|
|
|
|
SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; },
|
|
|
|
OutputSpecs.size(), Tags) {}
|
2020-07-15 04:32:37 +02:00
|
|
|
|
|
|
|
TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
|
2020-07-13 23:12:32 +02:00
|
|
|
for (auto *T : Input) {
|
|
|
|
TF_DeleteTensor(T);
|
|
|
|
}
|
|
|
|
if (Session == nullptr)
|
|
|
|
return;
|
|
|
|
auto Status = createTFStatus();
|
|
|
|
TF_DeleteSession(Session, Status.get());
|
|
|
|
Session = nullptr;
|
|
|
|
if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
|
|
|
|
errs() << "Could not delete TF session";
|
|
|
|
}
|
|
|
|
|
2020-07-30 01:29:21 +02:00
|
|
|
bool TFModelEvaluatorImpl::checkReportAndInvalidate(
|
|
|
|
const TF_Output &Output, const TensorSpec &OutputSpec) {
|
2020-07-15 04:32:37 +02:00
|
|
|
if (Output.oper)
|
|
|
|
return true;
|
2020-07-30 01:29:21 +02:00
|
|
|
errs() << "Could not find TF_Output named: " + OutputSpec.name();
|
2020-07-15 04:32:37 +02:00
|
|
|
IsValid = false;
|
|
|
|
return IsValid;
|
|
|
|
}
|
|
|
|
|
2020-07-13 23:12:32 +02:00
|
|
|
Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
|
|
|
|
if (!isValid())
|
|
|
|
return None;
|
2020-07-15 04:32:37 +02:00
|
|
|
std::unique_ptr<EvaluationResultImpl> Ret =
|
|
|
|
std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
|
2020-07-13 23:12:32 +02:00
|
|
|
auto Status = createTFStatus();
|
2020-07-15 04:32:37 +02:00
|
|
|
Impl->evaluate(Ret->getOutput().data(), Status.get());
|
2020-07-13 23:12:32 +02:00
|
|
|
if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
|
|
|
|
errs() << TF_Message(Status.get());
|
2020-07-15 04:32:37 +02:00
|
|
|
Impl.reset();
|
2020-07-13 23:12:32 +02:00
|
|
|
return None;
|
|
|
|
}
|
2020-07-15 04:32:37 +02:00
|
|
|
return EvaluationResult(std::move(Ret));
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
|
|
|
|
const std::vector<int64_t> &Dimensions) {
|
2020-07-13 23:12:32 +02:00
|
|
|
int64_t TotalSize = TF_DataTypeSize(Type);
|
|
|
|
for (auto &D : Dimensions)
|
|
|
|
TotalSize *= D;
|
|
|
|
|
|
|
|
Input[Index] =
|
|
|
|
TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
|
|
|
|
std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
|
2020-07-15 04:32:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *TFModelEvaluator::getUntypedInput(size_t Index) {
|
|
|
|
return TF_TensorData(Impl->getInput()[Index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
TFModelEvaluator::EvaluationResult::EvaluationResult(
|
|
|
|
std::unique_ptr<EvaluationResultImpl> Impl)
|
|
|
|
: Impl(std::move(Impl)) {}
|
|
|
|
|
|
|
|
TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
|
|
|
|
: Impl(std::move(Other.Impl)) {}
|
|
|
|
|
2020-08-05 19:22:45 +02:00
|
|
|
TFModelEvaluator::EvaluationResult &
|
|
|
|
TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
|
|
|
|
Impl = std::move(Other.Impl);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
|
|
|
|
return TF_TensorData(Impl->getOutput()[Index]);
|
|
|
|
}
|
|
|
|
|
2020-08-05 19:22:45 +02:00
|
|
|
const void *
|
|
|
|
TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
|
|
|
|
return TF_TensorData(Impl->getOutput()[Index]);
|
|
|
|
}
|
|
|
|
|
2020-08-25 18:58:49 +02:00
|
|
|
#define TFUTILS_GETDATATYPE_IMPL(T, E) \
|
|
|
|
template <> int TensorSpec::getDataType<T>() { return E; }
|
2020-07-15 04:32:37 +02:00
|
|
|
|
2020-07-30 21:44:07 +02:00
|
|
|
TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
|
2020-07-15 04:32:37 +02:00
|
|
|
|
2020-07-30 21:44:07 +02:00
|
|
|
#undef TFUTILS_GETDATATYPE_IMPL
|
2020-07-15 04:32:37 +02:00
|
|
|
|
|
|
|
TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
|
|
|
|
TFModelEvaluator::~TFModelEvaluator() {}
|
2020-10-03 05:28:49 +02:00
|
|
|
|
|
|
|
void Logger::print(raw_ostream &OS) {
|
|
|
|
if (RawLogData.empty())
|
|
|
|
return;
|
|
|
|
if (RawLogData[0].empty())
|
|
|
|
return;
|
|
|
|
size_t Tensor0Size = FeatureSpecs[0].Spec.getElementCount() *
|
|
|
|
FeatureSpecs[0].Spec.getElementByteSize();
|
|
|
|
size_t NumberOfRecords = RawLogData[0].size() / Tensor0Size;
|
|
|
|
if (NumberOfRecords == 0)
|
|
|
|
return;
|
2020-10-17 17:40:44 +02:00
|
|
|
size_t RewardSize =
|
|
|
|
RewardSpec.getElementCount() * RewardSpec.getElementByteSize();
|
|
|
|
size_t NumberOfRewards = RawLogData.back().size() / RewardSize;
|
2020-10-03 05:28:49 +02:00
|
|
|
|
|
|
|
OS << "feature_lists: {\n";
|
|
|
|
for (size_t I = 0; I < FeatureSpecs.size(); ++I)
|
2020-10-17 17:40:44 +02:00
|
|
|
writeRawTensorsAsFeatureLists(OS, FeatureSpecs[I], RawLogData[I].data(),
|
|
|
|
NumberOfRecords);
|
2020-10-03 05:28:49 +02:00
|
|
|
|
|
|
|
if (IncludeReward)
|
2020-10-17 17:40:44 +02:00
|
|
|
writeRawTensorsAsFeatureLists(OS, {RewardSpec, None},
|
|
|
|
RawLogData.back().data(), NumberOfRecords,
|
|
|
|
NumberOfRewards == 1);
|
2020-10-03 05:28:49 +02:00
|
|
|
|
|
|
|
OS << "}\n";
|
|
|
|
}
|
2020-07-21 17:44:47 +02:00
|
|
|
#endif // defined(LLVM_HAVE_TF_API)
|