2020-07-13 23:12:32 +02:00
|
|
|
//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements utilities for interfacing with tensorflow C APIs.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2020-07-21 17:44:47 +02:00
|
|
|
#include "llvm/Config/config.h"
|
|
|
|
#if defined(LLVM_HAVE_TF_API)
|
2020-07-13 23:12:32 +02:00
|
|
|
|
|
|
|
#include "llvm/ADT/Twine.h"
|
2020-07-30 21:44:07 +02:00
|
|
|
#include "llvm/Analysis/Utils/TFUtils.h"
|
2021-07-15 00:03:14 +02:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "llvm/Support/Debug.h"
|
2020-07-30 21:44:07 +02:00
|
|
|
#include "llvm/Support/JSON.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "llvm/Support/ManagedStatic.h"
|
2020-11-19 01:16:10 +01:00
|
|
|
#include "llvm/Support/MemoryBuffer.h"
|
2020-11-19 05:54:04 +01:00
|
|
|
#include "llvm/Support/Path.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
2021-07-15 00:03:14 +02:00
|
|
|
#include "google/protobuf/text_format.h"
|
2020-07-15 04:32:37 +02:00
|
|
|
#include "tensorflow/c/c_api.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include "tensorflow/c/c_api_experimental.h"
|
2021-07-15 00:03:14 +02:00
|
|
|
#include "tensorflow/core/example/example.pb.h"
|
2020-07-13 23:12:32 +02:00
|
|
|
#include <cassert>
|
2020-08-05 00:00:35 +02:00
|
|
|
#include <numeric>
|
2020-07-13 23:12:32 +02:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2021-07-22 21:45:32 +02:00
|
|
|
using google::protobuf::Message;
|
|
|
|
using google::protobuf::TextFormat;
|
|
|
|
|
2021-07-15 00:03:14 +02:00
|
|
|
static cl::opt<bool>
|
|
|
|
ProtobufTextMode("tfutils-text-log", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Output textual (human-readable) protobuf."));
|
|
|
|
|
2020-07-13 23:12:32 +02:00
|
|
|
namespace {
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
|
|
|
|
using TFSessionOptionsPtr =
|
|
|
|
std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
|
|
|
|
using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
|
|
|
|
|
2020-07-13 23:12:32 +02:00
|
|
|
struct TFInitializer {
|
|
|
|
TFInitializer() {
|
|
|
|
assert(!IsInitialized && "TFInitialized should be called only once");
|
|
|
|
int Argc = 1;
|
|
|
|
const char *Name = "";
|
|
|
|
const char **NamePtr = &Name;
|
|
|
|
TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
|
|
|
|
IsInitialized = true;
|
|
|
|
}
|
|
|
|
bool IsInitialized = false;
|
|
|
|
};
|
|
|
|
|
|
|
|
llvm::ManagedStatic<TFInitializer> TFLibInitializer;
|
|
|
|
|
|
|
|
bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFGraphPtr createTFGraph() {
|
|
|
|
return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFStatusPtr createTFStatus() {
|
|
|
|
return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFSessionOptionsPtr createTFSessionOptions() {
|
|
|
|
return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
namespace llvm {
|
|
|
|
class EvaluationResultImpl {
|
|
|
|
public:
|
|
|
|
EvaluationResultImpl(size_t OutputSize)
|
|
|
|
: OutputSize(OutputSize), Output(OutputSize){};
|
|
|
|
|
|
|
|
~EvaluationResultImpl() {
|
|
|
|
for (auto *P : Output)
|
|
|
|
if (P)
|
|
|
|
TF_DeleteTensor(P);
|
|
|
|
}
|
|
|
|
|
|
|
|
EvaluationResultImpl(const EvaluationResultImpl &) = delete;
|
|
|
|
EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
|
|
|
|
std::vector<TF_Tensor *> &getOutput() { return Output; }
|
|
|
|
|
|
|
|
private:
|
|
|
|
const size_t OutputSize;
|
|
|
|
std::vector<TF_Tensor *> Output;
|
|
|
|
};
|
|
|
|
|
2020-08-05 00:00:35 +02:00
|
|
|
size_t TensorSpec::getElementByteSize() const {
|
|
|
|
return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
|
|
|
|
}
|
|
|
|
|
|
|
|
TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
|
|
|
|
const std::vector<int64_t> &Shape)
|
|
|
|
: Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
|
|
|
|
ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
|
|
|
|
std::multiplies<int64_t>())) {}
|
|
|
|
|
2020-07-30 21:44:07 +02:00
|
|
|
Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
|
|
|
|
const json::Value &Value) {
|
|
|
|
auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
|
|
|
|
std::string S;
|
|
|
|
llvm::raw_string_ostream OS(S);
|
|
|
|
OS << Value;
|
|
|
|
Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
|
|
|
|
return None;
|
|
|
|
};
|
[JSON] Add error reporting to fromJSON and ObjectMapper
Translating between JSON objects and C++ strutctures is common.
From experience in clangd, fromJSON/ObjectMapper work well and save a lot of
code, but aren't adopted elsewhere at least partly due to total lack of error
reporting beyond "ok"/"bad".
The recently-added error model should be rich enough for most applications.
It requires tracking the path within the root object and reporting local
errors at appropriate places.
To do this, we exploit the fact that the call graph of recursive
parse functions mirror the structure of the JSON itself.
The current path is represented as a linked list of segments, each of which is
on the stack as a parameter. Concretely, fromJSON now looks like:
bool fromJSON(const Value&, T&, Path);
Beyond the signature change, this is reasonably unobtrusive: building
the path segments is mostly handled by ObjectMapper and the vector<T> fromJSON.
However the root caller of fromJSON must now create a Root object to
store the errors, which is a little clunky.
I've added high-level parse<T>(StringRef) -> Expected<T>, but it's not
general enough to be the primary interface I think (at least, not usable in
clangd).
All existing users (mostly just clangd) are updated in this patch,
making this change backwards-compatible is a bit hairy.
Differential Revision: https://reviews.llvm.org/D88103
2020-09-24 01:14:12 +02:00
|
|
|
// FIXME: accept a Path as a parameter, and use it for error reporting.
|
|
|
|
json::Path::Root Root("tensor_spec");
|
|
|
|
json::ObjectMapper Mapper(Value, Root);
|
2020-07-30 21:44:07 +02:00
|
|
|
if (!Mapper)
|
|
|
|
return EmitError("Value is not a dict");
|
|
|
|
|
|
|
|
std::string TensorName;
|
|
|
|
int TensorPort = -1;
|
|
|
|
std::string TensorType;
|
|
|
|
std::vector<int64_t> TensorShape;
|
|
|
|
|
|
|
|
if (!Mapper.map<std::string>("name", TensorName))
|
|
|
|
return EmitError("'name' property not present or not a string");
|
|
|
|
if (!Mapper.map<std::string>("type", TensorType))
|
|
|
|
return EmitError("'type' property not present or not a string");
|
|
|
|
if (!Mapper.map<int>("port", TensorPort))
|
|
|
|
return EmitError("'port' property not present or not an int");
|
|
|
|
if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
|
|
|
|
return EmitError("'shape' property not present or not an int array");
|
|
|
|
|
2020-08-25 18:58:49 +02:00
|
|
|
#define PARSE_TYPE(T, E) \
|
|
|
|
if (TensorType == #T) \
|
2020-07-30 21:44:07 +02:00
|
|
|
return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
|
|
|
|
TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
|
|
|
|
#undef PARSE_TYPE
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2020-11-19 05:54:04 +01:00
|
|
|
Optional<std::vector<LoggedFeatureSpec>>
|
|
|
|
loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
|
|
|
|
StringRef ModelPath, StringRef SpecFileOverride) {
|
|
|
|
SmallVector<char, 128> OutputSpecsPath;
|
|
|
|
StringRef FileName = SpecFileOverride;
|
|
|
|
if (FileName.empty()) {
|
|
|
|
llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
|
|
|
|
FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
|
|
|
|
}
|
|
|
|
|
2020-11-19 01:16:10 +01:00
|
|
|
auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
|
|
|
|
if (!BufferOrError) {
|
|
|
|
Ctx.emitError("Error opening output specs file: " + FileName + " : " +
|
|
|
|
BufferOrError.getError().message());
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
|
|
|
|
if (!ParsedJSONValues) {
|
|
|
|
Ctx.emitError("Could not parse specs file: " + FileName);
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
auto ValuesArray = ParsedJSONValues->getAsArray();
|
|
|
|
if (!ValuesArray) {
|
|
|
|
Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
|
|
|
|
"logging_name:<name>} dictionaries");
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
2020-11-19 05:54:04 +01:00
|
|
|
std::vector<LoggedFeatureSpec> Ret;
|
2020-11-19 01:16:10 +01:00
|
|
|
for (const auto &Value : *ValuesArray)
|
|
|
|
if (const auto *Obj = Value.getAsObject())
|
|
|
|
if (const auto *SpecPart = Obj->get("tensor_spec"))
|
|
|
|
if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
|
|
|
|
if (auto LoggingName = Obj->getString("logging_name")) {
|
|
|
|
if (!TensorSpec->isElementType<int64_t>() &&
|
|
|
|
!TensorSpec->isElementType<int32_t>() &&
|
|
|
|
!TensorSpec->isElementType<float>()) {
|
|
|
|
Ctx.emitError(
|
|
|
|
"Only int64, int32, and float tensors are supported. "
|
|
|
|
"Found unsupported type for tensor named " +
|
|
|
|
TensorSpec->name());
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
Ret.push_back({*TensorSpec, LoggingName->str()});
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ValuesArray->size() != Ret.size()) {
|
|
|
|
Ctx.emitError(
|
|
|
|
"Unable to parse output spec. It should be a json file containing an "
|
|
|
|
"array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
|
|
|
|
"with a json object describing a TensorSpec; and a 'logging_name' key, "
|
|
|
|
"which is a string to use as name when logging this tensor in the "
|
|
|
|
"training log.");
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
|
|
|
|
Ctx.emitError("The first output spec must describe the decision tensor, "
|
|
|
|
"and must have the logging_name " +
|
|
|
|
StringRef(ExpectedDecisionName));
|
2020-11-19 05:54:04 +01:00
|
|
|
return None;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
2020-11-19 05:54:04 +01:00
|
|
|
return Ret;
|
2020-11-19 01:16:10 +01:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
class TFModelEvaluatorImpl {
|
|
|
|
public:
|
|
|
|
TFModelEvaluatorImpl(StringRef SavedModelPath,
|
2020-07-30 01:29:21 +02:00
|
|
|
const std::vector<TensorSpec> &InputSpecs,
|
2020-11-19 01:16:10 +01:00
|
|
|
function_ref<TensorSpec(size_t)> GetOutputSpecs,
|
|
|
|
size_t OutputSpecsSize, const char *Tags);
|
2020-07-15 04:32:37 +02:00
|
|
|
|
|
|
|
bool isValid() const { return IsValid; }
|
|
|
|
size_t OutputSize() const { return OutputFeed.size(); }
|
|
|
|
|
|
|
|
void evaluate(TF_Tensor **Output, TF_Status *Status) {
|
|
|
|
TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
|
|
|
|
Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
|
|
|
|
nullptr, 0, nullptr, Status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void initInput(size_t Index, TF_DataType Type,
|
|
|
|
const std::vector<int64_t> &Dimensions);
|
|
|
|
const std::vector<TF_Tensor *> &getInput() const { return Input; }
|
|
|
|
|
|
|
|
~TFModelEvaluatorImpl();
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// The objects necessary for carrying out an evaluation of the SavedModel.
|
|
|
|
/// They are expensive to set up, and we maintain them accross all the
|
|
|
|
/// evaluations of the model.
|
|
|
|
TF_Session *Session = nullptr;
|
|
|
|
TFGraphPtr Graph;
|
|
|
|
TFSessionOptionsPtr Options;
|
|
|
|
|
|
|
|
/// The specification of the input nodes.
|
|
|
|
std::vector<TF_Output> InputFeed;
|
|
|
|
|
|
|
|
/// The input tensors. They must match by index of the corresponding InputFeed
|
|
|
|
/// value. We set up the tensors once and just mutate theirs scalars before
|
|
|
|
/// each evaluation. The input tensors keep their value after an evaluation.
|
|
|
|
std::vector<TF_Tensor *> Input;
|
|
|
|
|
|
|
|
/// The specification of the output nodes. When evaluating, the tensors in the
|
|
|
|
/// output tensor vector must match by index the corresponding element in the
|
|
|
|
/// OutputFeed.
|
|
|
|
std::vector<TF_Output> OutputFeed;
|
|
|
|
|
|
|
|
void invalidate() { IsValid = false; }
|
|
|
|
|
|
|
|
bool IsValid = true;
|
|
|
|
|
|
|
|
/// Reusable utility for ensuring we can bind the requested Name to a node in
|
|
|
|
/// the SavedModel Graph.
|
2020-07-30 01:29:21 +02:00
|
|
|
bool checkReportAndInvalidate(const TF_Output &Output,
|
|
|
|
const TensorSpec &OutputSpec);
|
2020-07-15 04:32:37 +02:00
|
|
|
};
|
2021-07-22 21:45:32 +02:00
|
|
|
|
|
|
|
class LoggerDataImpl {
|
|
|
|
const std::vector<LoggedFeatureSpec> LoggedFeatureSpecs;
|
|
|
|
const TensorSpec RewardSpec;
|
|
|
|
|
|
|
|
tensorflow::SequenceExample SE;
|
|
|
|
std::vector<tensorflow::FeatureList *> FeatureLists;
|
|
|
|
tensorflow::FeatureList *Reward = nullptr;
|
|
|
|
|
|
|
|
public:
|
|
|
|
LoggerDataImpl(const std::vector<LoggedFeatureSpec> &LoggedSpecs,
|
|
|
|
const TensorSpec &RewardSpec, bool IncludeReward)
|
|
|
|
: LoggedFeatureSpecs(LoggedSpecs), RewardSpec(RewardSpec) {
|
|
|
|
auto *FL = SE.mutable_feature_lists()->mutable_feature_list();
|
|
|
|
if (IncludeReward)
|
|
|
|
Reward = &(*FL)[RewardSpec.name()];
|
|
|
|
// Allocate first the map entries, then capture their address. We will not
|
|
|
|
// mutate the set of features after this (i.e. the pointers won't dangle).
|
|
|
|
for (const auto &LFS : LoggedSpecs) {
|
|
|
|
(*FL)[LFS.LoggingName ? *LFS.LoggingName : LFS.Spec.name()] = {};
|
|
|
|
}
|
|
|
|
for (const auto &LFS : LoggedSpecs)
|
|
|
|
FeatureLists.push_back(
|
|
|
|
&(*FL)[LFS.LoggingName ? *LFS.LoggingName : LFS.Spec.name()]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void print(raw_ostream &OS) {
|
|
|
|
std::string OutStr;
|
|
|
|
if (ProtobufTextMode)
|
|
|
|
google::protobuf::TextFormat::PrintToString(SE, &OutStr);
|
|
|
|
else
|
|
|
|
OutStr = SE.SerializeAsString();
|
|
|
|
|
|
|
|
OS << OutStr;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *addNewTensor(size_t FeatureID) {
|
|
|
|
const auto &Spec = LoggedFeatureSpecs[FeatureID].Spec;
|
|
|
|
if (Spec.isElementType<float>()) {
|
|
|
|
auto *RF = FeatureLists[FeatureID]
|
|
|
|
->add_feature()
|
|
|
|
->mutable_float_list()
|
|
|
|
->mutable_value();
|
|
|
|
RF->Resize(Spec.getElementCount(), 0.0);
|
|
|
|
return reinterpret_cast<char *>(RF->mutable_data());
|
|
|
|
} else if (Spec.isElementType<int32_t>() || Spec.isElementType<int64_t>()) {
|
|
|
|
auto *RF = FeatureLists[FeatureID]
|
|
|
|
->add_feature()
|
|
|
|
->mutable_int64_list()
|
|
|
|
->mutable_value();
|
|
|
|
RF->Resize(Spec.getElementCount(), 0);
|
|
|
|
return reinterpret_cast<char *>(RF->mutable_data());
|
|
|
|
}
|
|
|
|
llvm_unreachable("Unsupported tensor type.");
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T> void logReward(T Value) {
|
|
|
|
if (RewardSpec.isElementType<float>())
|
|
|
|
Reward->add_feature()->mutable_float_list()->add_value(Value);
|
|
|
|
else if (RewardSpec.isElementType<int32_t>() ||
|
|
|
|
RewardSpec.isElementType<int64_t>())
|
|
|
|
Reward->add_feature()->mutable_int64_list()->add_value(Value);
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unsupported tensor type.");
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t getNrRecords() const {
|
|
|
|
return FeatureLists.empty() ? 0 : FeatureLists[0]->feature().size();
|
|
|
|
}
|
|
|
|
};
|
2020-07-15 04:32:37 +02:00
|
|
|
} // namespace llvm
|
|
|
|
|
|
|
|
TFModelEvaluatorImpl::TFModelEvaluatorImpl(
|
2020-07-30 01:29:21 +02:00
|
|
|
StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
|
2020-11-19 01:16:10 +01:00
|
|
|
function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
|
|
|
|
const char *Tags = "serve")
|
2020-07-13 23:12:32 +02:00
|
|
|
: Graph(createTFGraph()), Options(createTFSessionOptions()),
|
2020-07-30 01:29:21 +02:00
|
|
|
InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
|
2020-11-19 01:16:10 +01:00
|
|
|
OutputFeed(OutputSpecsSize) {
|
2020-07-13 23:12:32 +02:00
|
|
|
if (!ensureInitTF()) {
|
|
|
|
errs() << "Tensorflow should have been initialized";
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto Status = createTFStatus();
|
|
|
|
|
|
|
|
Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
|
|
|
|
SavedModelPath.str().c_str(), &Tags, 1,
|
|
|
|
Graph.get(), nullptr, Status.get());
|
|
|
|
if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
|
|
|
|
errs() << TF_Message(Status.get());
|
2020-07-15 04:32:37 +02:00
|
|
|
invalidate();
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
2020-07-30 01:29:21 +02:00
|
|
|
for (size_t I = 0; I < InputSpecs.size(); ++I) {
|
|
|
|
auto &InputSpec = InputSpecs[I];
|
2020-07-13 23:12:32 +02:00
|
|
|
InputFeed[I] = {
|
2020-07-30 01:29:21 +02:00
|
|
|
TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
|
|
|
|
InputSpec.port()};
|
|
|
|
if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
|
2020-07-13 23:12:32 +02:00
|
|
|
return;
|
2020-07-30 01:29:21 +02:00
|
|
|
initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
|
|
|
|
InputSpec.shape());
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
2020-11-19 01:16:10 +01:00
|
|
|
for (size_t I = 0; I < OutputSpecsSize; ++I) {
|
|
|
|
auto OutputSpec = GetOutputSpecs(I);
|
2020-07-13 23:12:32 +02:00
|
|
|
OutputFeed[I] = {
|
2020-07-30 01:29:21 +02:00
|
|
|
TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
|
|
|
|
OutputSpec.port()};
|
|
|
|
if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
|
2020-07-13 23:12:32 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-19 01:16:10 +01:00
|
|
|
TFModelEvaluator::TFModelEvaluator(
|
|
|
|
StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
|
|
|
|
function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
|
|
|
|
const char *Tags)
|
|
|
|
: Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs,
|
|
|
|
OutputSpecsSize, Tags)) {
|
|
|
|
if (!Impl->isValid())
|
|
|
|
Impl.reset();
|
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
|
2020-07-30 01:29:21 +02:00
|
|
|
const std::vector<TensorSpec> &InputSpecs,
|
|
|
|
const std::vector<TensorSpec> &OutputSpecs,
|
2020-07-15 04:32:37 +02:00
|
|
|
const char *Tags)
|
2020-11-19 01:16:10 +01:00
|
|
|
: TFModelEvaluator(
|
|
|
|
SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; },
|
|
|
|
OutputSpecs.size(), Tags) {}
|
2020-07-15 04:32:37 +02:00
|
|
|
|
|
|
|
TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
|
2020-07-13 23:12:32 +02:00
|
|
|
for (auto *T : Input) {
|
|
|
|
TF_DeleteTensor(T);
|
|
|
|
}
|
|
|
|
if (Session == nullptr)
|
|
|
|
return;
|
|
|
|
auto Status = createTFStatus();
|
|
|
|
TF_DeleteSession(Session, Status.get());
|
|
|
|
Session = nullptr;
|
|
|
|
if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
|
|
|
|
errs() << "Could not delete TF session";
|
|
|
|
}
|
|
|
|
|
2020-07-30 01:29:21 +02:00
|
|
|
bool TFModelEvaluatorImpl::checkReportAndInvalidate(
|
|
|
|
const TF_Output &Output, const TensorSpec &OutputSpec) {
|
2020-07-15 04:32:37 +02:00
|
|
|
if (Output.oper)
|
|
|
|
return true;
|
2020-07-30 01:29:21 +02:00
|
|
|
errs() << "Could not find TF_Output named: " + OutputSpec.name();
|
2020-07-15 04:32:37 +02:00
|
|
|
IsValid = false;
|
|
|
|
return IsValid;
|
|
|
|
}
|
|
|
|
|
2020-07-13 23:12:32 +02:00
|
|
|
Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
|
|
|
|
if (!isValid())
|
|
|
|
return None;
|
2020-07-15 04:32:37 +02:00
|
|
|
std::unique_ptr<EvaluationResultImpl> Ret =
|
|
|
|
std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
|
2020-07-13 23:12:32 +02:00
|
|
|
auto Status = createTFStatus();
|
2020-07-15 04:32:37 +02:00
|
|
|
Impl->evaluate(Ret->getOutput().data(), Status.get());
|
2020-07-13 23:12:32 +02:00
|
|
|
if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
|
|
|
|
errs() << TF_Message(Status.get());
|
2020-07-15 04:32:37 +02:00
|
|
|
Impl.reset();
|
2020-07-13 23:12:32 +02:00
|
|
|
return None;
|
|
|
|
}
|
2020-07-15 04:32:37 +02:00
|
|
|
return EvaluationResult(std::move(Ret));
|
2020-07-13 23:12:32 +02:00
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
|
|
|
|
const std::vector<int64_t> &Dimensions) {
|
2020-07-13 23:12:32 +02:00
|
|
|
int64_t TotalSize = TF_DataTypeSize(Type);
|
|
|
|
for (auto &D : Dimensions)
|
|
|
|
TotalSize *= D;
|
|
|
|
|
|
|
|
Input[Index] =
|
|
|
|
TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
|
|
|
|
std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
|
2020-07-15 04:32:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void *TFModelEvaluator::getUntypedInput(size_t Index) {
|
|
|
|
return TF_TensorData(Impl->getInput()[Index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
TFModelEvaluator::EvaluationResult::EvaluationResult(
|
|
|
|
std::unique_ptr<EvaluationResultImpl> Impl)
|
|
|
|
: Impl(std::move(Impl)) {}
|
|
|
|
|
|
|
|
TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
|
|
|
|
: Impl(std::move(Other.Impl)) {}
|
|
|
|
|
2020-08-05 19:22:45 +02:00
|
|
|
TFModelEvaluator::EvaluationResult &
|
|
|
|
TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
|
|
|
|
Impl = std::move(Other.Impl);
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2020-07-15 04:32:37 +02:00
|
|
|
void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
|
|
|
|
return TF_TensorData(Impl->getOutput()[Index]);
|
|
|
|
}
|
|
|
|
|
2020-08-05 19:22:45 +02:00
|
|
|
const void *
|
|
|
|
TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
|
|
|
|
return TF_TensorData(Impl->getOutput()[Index]);
|
|
|
|
}
|
|
|
|
|
2020-08-25 18:58:49 +02:00
|
|
|
#define TFUTILS_GETDATATYPE_IMPL(T, E) \
|
|
|
|
template <> int TensorSpec::getDataType<T>() { return E; }
|
2020-07-15 04:32:37 +02:00
|
|
|
|
2020-07-30 21:44:07 +02:00
|
|
|
TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
|
2020-07-15 04:32:37 +02:00
|
|
|
|
2020-07-30 21:44:07 +02:00
|
|
|
#undef TFUTILS_GETDATATYPE_IMPL
|
2020-07-15 04:32:37 +02:00
|
|
|
|
|
|
|
TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
|
|
|
|
TFModelEvaluator::~TFModelEvaluator() {}
|
2020-10-03 05:28:49 +02:00
|
|
|
|
2021-07-22 21:45:32 +02:00
|
|
|
Logger::Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
|
|
|
|
const TensorSpec &RewardSpec, bool IncludeReward)
|
|
|
|
: FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
|
|
|
|
IncludeReward(IncludeReward),
|
|
|
|
LoggerData(std::make_unique<LoggerDataImpl>(FeatureSpecs, RewardSpec,
|
|
|
|
IncludeReward)) {}
|
2021-07-15 00:03:14 +02:00
|
|
|
|
2021-07-22 21:45:32 +02:00
|
|
|
Logger::~Logger() {}
|
|
|
|
|
|
|
|
#define LOG_REWARD(NAME, TYPE) \
|
|
|
|
void Logger::log##NAME##Reward(TYPE Value) { \
|
|
|
|
assert(IncludeReward); \
|
|
|
|
LoggerData->logReward(Value); \
|
2021-07-15 00:03:14 +02:00
|
|
|
}
|
2021-07-22 21:45:32 +02:00
|
|
|
|
|
|
|
LOG_REWARD(Float, float)
|
|
|
|
LOG_REWARD(Int32, int32_t)
|
|
|
|
LOG_REWARD(Int64, int64_t)
|
|
|
|
#undef LOG_REWARD
|
|
|
|
|
|
|
|
#define LOG_FINAL_REWARD(NAME, TYPE) \
|
|
|
|
void Logger::log##NAME##FinalReward(TYPE Value) { \
|
|
|
|
assert(RewardSpec.isElementType<TYPE>()); \
|
|
|
|
for (size_t I = 1; I < LoggerData->getNrRecords(); ++I) \
|
|
|
|
log##NAME##Reward(0); \
|
|
|
|
log##NAME##Reward(Value); \
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG_FINAL_REWARD(Float, float)
|
|
|
|
LOG_FINAL_REWARD(Int32, int32_t)
|
|
|
|
LOG_FINAL_REWARD(Int64, int64_t)
|
|
|
|
#undef LOG_FINAL_REWARD
|
|
|
|
|
|
|
|
void Logger::logFloatValue(size_t FeatureID, const float *Value) {
|
|
|
|
assert(FeatureSpecs[FeatureID].Spec.isElementType<float>());
|
|
|
|
logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Logger::logInt64Value(size_t FeatureID, const int64_t *Value) {
|
|
|
|
assert(FeatureSpecs[FeatureID].Spec.isElementType<int64_t>());
|
|
|
|
logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Logger::logInt32Value(size_t FeatureID, const int32_t *Value) {
|
|
|
|
assert(FeatureSpecs[FeatureID].Spec.isElementType<int32_t>());
|
|
|
|
logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
|
2020-10-03 05:28:49 +02:00
|
|
|
}
|
2021-07-22 21:45:32 +02:00
|
|
|
|
|
|
|
void Logger::logSpecifiedTensorValue(size_t FeatureID, const char *RawData) {
|
|
|
|
const auto &Spec = FeatureSpecs[FeatureID].Spec;
|
|
|
|
char *Buff = addEntryAndGetFloatOrInt64Buffer(FeatureID);
|
|
|
|
if (Spec.isElementType<int32_t>())
|
|
|
|
for (size_t I = 0; I < Spec.getElementCount(); ++I)
|
|
|
|
(reinterpret_cast<int64_t *>(Buff))[I] =
|
|
|
|
static_cast<int64_t>((reinterpret_cast<const int32_t *>(RawData))[I]);
|
|
|
|
else if (Spec.isElementType<int64_t>() || Spec.isElementType<float>())
|
|
|
|
std::memcpy(Buff, RawData,
|
|
|
|
Spec.getElementCount() * Spec.getElementByteSize());
|
|
|
|
else
|
|
|
|
llvm_unreachable("Unsupported tensor type");
|
|
|
|
}
|
|
|
|
|
|
|
|
char *Logger::addEntryAndGetFloatOrInt64Buffer(size_t FeatureID) {
|
|
|
|
return reinterpret_cast<char *>(LoggerData->addNewTensor(FeatureID));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Logger::print(raw_ostream &OS) { LoggerData->print(OS); }
|
2020-07-21 17:44:47 +02:00
|
|
|
#endif // defined(LLVM_HAVE_TF_API)
|