1 //===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements utilities for interfacing with tensorflow C APIs.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "llvm/Config/config.h"
14 #if defined(LLVM_HAVE_TF_API)
15 
16 #include "llvm/ADT/Twine.h"
17 #include "llvm/Analysis/Utils/TFUtils.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/JSON.h"
20 #include "llvm/Support/ManagedStatic.h"
21 #include "llvm/Support/raw_ostream.h"
22 
23 #include "tensorflow/c/c_api.h"
24 #include "tensorflow/c/c_api_experimental.h"
25 
26 #include <cassert>
27 #include <numeric>
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
34 using TFSessionOptionsPtr =
35     std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
36 using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
37 
38 struct TFInitializer {
39   TFInitializer() {
40     assert(!IsInitialized && "TFInitialized should be called only once");
41     int Argc = 1;
42     const char *Name = "";
43     const char **NamePtr = &Name;
44     TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
45     IsInitialized = true;
46   }
47   bool IsInitialized = false;
48 };
49 
50 llvm::ManagedStatic<TFInitializer> TFLibInitializer;
51 
52 bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
53 
54 TFGraphPtr createTFGraph() {
55   return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
56 }
57 
58 TFStatusPtr createTFStatus() {
59   return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
60 }
61 
62 TFSessionOptionsPtr createTFSessionOptions() {
63   return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
64 }
65 
66 /// Write the values of one tensor as a list.
67 template <typename T>
68 void writeTensorValues(raw_ostream &OutFile, const char *TensorData,
69                        size_t ElemCount) {
70   OutFile << "[";
71   const T *TypedData = reinterpret_cast<const T *>(TensorData);
72   for (size_t I = 0; I < ElemCount; ++I) {
73     if (I > 0)
74       OutFile << ", ";
75     OutFile << TypedData[I];
76   }
77   OutFile << "]";
78 }
79 
80 /// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
81 /// The tensors are assumed to be stored contiguously, in row-major format,
82 /// in the TensorData buffer. Each tensor has the shape given by Spec. The
83 /// feature name in the output is either the provided LoggingName, if
84 /// specified, otherwise it's the name of the tensor (as given by Spec).
85 void writeRawTensorsAsFeatureLists(raw_ostream &OutFile,
86                                    const Logger::LoggedFeatureSpec &LoggedSpec,
87                                    const char *TensorData, size_t TensorCount,
88                                    bool FinalReward = false) {
89   const char *FieldName = "<invalid>";
90   std::function<void(const char *)> ValueWriter;
91   const auto &Spec = LoggedSpec.Spec;
92   // The 'Feature' protobuf only has 3 possible fields: float_list,
93   // int64_list, or bytes_list, so we capture int32 values as int64. We don't
94   // support any other types.
95   if (Spec.isElementType<int64_t>()) {
96     FieldName = "int64_list";
97     ValueWriter = [&](const char *Data) {
98       writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
99     };
100   } else if (Spec.isElementType<int32_t>()) {
101     FieldName = "int64_list";
102     ValueWriter = [&](const char *Data) {
103       writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
104     };
105 
106   } else if (Spec.isElementType<float>()) {
107     FieldName = "float_list";
108     ValueWriter = [&](const char *Data) {
109       writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
110     };
111 
112   } else {
113     llvm_unreachable("Unsupported tensor type.");
114   }
115 
116   OutFile << "  feature_list: {\n";
117   OutFile << "    key: "
118           << "\""
119           << (LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())
120           << "\" ";
121   OutFile << "value: {\n";
122   size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
123 
124   auto WriteFeatureProto = [&](const char *P) {
125     OutFile << "      feature: { " << FieldName << ": { value: ";
126     ValueWriter(P);
127     OutFile << " } }\n";
128   };
129 
130   const char *CurrentTensor = TensorData;
131   static int64_t Zero = 0;
132   // Write all but the last value. If this is the final reward, don't increment
133   // the CurrentTensor, and just write 0.
134   for (size_t I = 0; I < TensorCount - 1; ++I) {
135     if (FinalReward)
136       WriteFeatureProto(reinterpret_cast<const char *>(&Zero));
137     else {
138       WriteFeatureProto(CurrentTensor);
139       CurrentTensor += TensorByteSize;
140     }
141   }
142 
143   WriteFeatureProto(CurrentTensor);
144 
145   OutFile << "    }\n";
146   OutFile << "  }\n";
147 }
148 } // namespace
149 
150 namespace llvm {
151 class EvaluationResultImpl {
152 public:
153   EvaluationResultImpl(size_t OutputSize)
154       : OutputSize(OutputSize), Output(OutputSize){};
155 
156   ~EvaluationResultImpl() {
157     for (auto *P : Output)
158       if (P)
159         TF_DeleteTensor(P);
160   }
161 
162   EvaluationResultImpl(const EvaluationResultImpl &) = delete;
163   EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
164   std::vector<TF_Tensor *> &getOutput() { return Output; }
165 
166 private:
167   const size_t OutputSize;
168   std::vector<TF_Tensor *> Output;
169 };
170 
171 size_t TensorSpec::getElementByteSize() const {
172   return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
173 }
174 
175 TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
176                        const std::vector<int64_t> &Shape)
177     : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
178       ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
179                                    std::multiplies<int64_t>())) {}
180 
181 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
182                                            const json::Value &Value) {
183   auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
184     std::string S;
185     llvm::raw_string_ostream OS(S);
186     OS << Value;
187     Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
188     return None;
189   };
190   // FIXME: accept a Path as a parameter, and use it for error reporting.
191   json::Path::Root Root("tensor_spec");
192   json::ObjectMapper Mapper(Value, Root);
193   if (!Mapper)
194     return EmitError("Value is not a dict");
195 
196   std::string TensorName;
197   int TensorPort = -1;
198   std::string TensorType;
199   std::vector<int64_t> TensorShape;
200 
201   if (!Mapper.map<std::string>("name", TensorName))
202     return EmitError("'name' property not present or not a string");
203   if (!Mapper.map<std::string>("type", TensorType))
204     return EmitError("'type' property not present or not a string");
205   if (!Mapper.map<int>("port", TensorPort))
206     return EmitError("'port' property not present or not an int");
207   if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
208     return EmitError("'shape' property not present or not an int array");
209 
210 #define PARSE_TYPE(T, E)                                                       \
211   if (TensorType == #T)                                                        \
212     return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
213   TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
214 #undef PARSE_TYPE
215   return None;
216 }
217 
218 class TFModelEvaluatorImpl {
219 public:
220   TFModelEvaluatorImpl(StringRef SavedModelPath,
221                        const std::vector<TensorSpec> &InputSpecs,
222                        const std::vector<TensorSpec> &OutputSpecs,
223                        const char *Tags);
224 
225   bool isValid() const { return IsValid; }
226   size_t OutputSize() const { return OutputFeed.size(); }
227 
228   void evaluate(TF_Tensor **Output, TF_Status *Status) {
229     TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
230                   Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
231                   nullptr, 0, nullptr, Status);
232   }
233 
234   void initInput(size_t Index, TF_DataType Type,
235                  const std::vector<int64_t> &Dimensions);
236   const std::vector<TF_Tensor *> &getInput() const { return Input; }
237 
238   ~TFModelEvaluatorImpl();
239 
240 private:
241   /// The objects necessary for carrying out an evaluation of the SavedModel.
242   /// They are expensive to set up, and we maintain them accross all the
243   /// evaluations of the model.
244   TF_Session *Session = nullptr;
245   TFGraphPtr Graph;
246   TFSessionOptionsPtr Options;
247 
248   /// The specification of the input nodes.
249   std::vector<TF_Output> InputFeed;
250 
251   /// The input tensors. They must match by index of the corresponding InputFeed
252   /// value. We set up the tensors once and just mutate theirs scalars before
253   /// each evaluation. The input tensors keep their value after an evaluation.
254   std::vector<TF_Tensor *> Input;
255 
256   /// The specification of the output nodes. When evaluating, the tensors in the
257   /// output tensor vector must match by index the corresponding element in the
258   /// OutputFeed.
259   std::vector<TF_Output> OutputFeed;
260 
261   void invalidate() { IsValid = false; }
262 
263   bool IsValid = true;
264 
265   /// Reusable utility for ensuring we can bind the requested Name to a node in
266   /// the SavedModel Graph.
267   bool checkReportAndInvalidate(const TF_Output &Output,
268                                 const TensorSpec &OutputSpec);
269 };
270 } // namespace llvm
271 
272 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
273     StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
274     const std::vector<TensorSpec> &OutputSpecs, const char *Tags)
275     : Graph(createTFGraph()), Options(createTFSessionOptions()),
276       InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
277       OutputFeed(OutputSpecs.size()) {
278   if (!ensureInitTF()) {
279     errs() << "Tensorflow should have been initialized";
280     return;
281   }
282   auto Status = createTFStatus();
283 
284   Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
285                                          SavedModelPath.str().c_str(), &Tags, 1,
286                                          Graph.get(), nullptr, Status.get());
287   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
288     errs() << TF_Message(Status.get());
289     invalidate();
290   }
291   for (size_t I = 0; I < InputSpecs.size(); ++I) {
292     auto &InputSpec = InputSpecs[I];
293     InputFeed[I] = {
294         TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
295         InputSpec.port()};
296     if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
297       return;
298     initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
299               InputSpec.shape());
300   }
301   for (size_t I = 0; I < OutputSpecs.size(); ++I) {
302     auto &OutputSpec = OutputSpecs[I];
303     OutputFeed[I] = {
304         TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
305         OutputSpec.port()};
306     if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
307       return;
308   }
309 }
310 
311 TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
312                                    const std::vector<TensorSpec> &InputSpecs,
313                                    const std::vector<TensorSpec> &OutputSpecs,
314                                    const char *Tags)
315     : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
316                                     Tags)) {
317   if (!Impl->isValid())
318     Impl.reset();
319 }
320 
321 TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
322   for (auto *T : Input) {
323     TF_DeleteTensor(T);
324   }
325   if (Session == nullptr)
326     return;
327   auto Status = createTFStatus();
328   TF_DeleteSession(Session, Status.get());
329   Session = nullptr;
330   if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
331     errs() << "Could not delete TF session";
332 }
333 
334 bool TFModelEvaluatorImpl::checkReportAndInvalidate(
335     const TF_Output &Output, const TensorSpec &OutputSpec) {
336   if (Output.oper)
337     return true;
338   errs() << "Could not find TF_Output named: " + OutputSpec.name();
339   IsValid = false;
340   return IsValid;
341 }
342 
343 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
344   if (!isValid())
345     return None;
346   std::unique_ptr<EvaluationResultImpl> Ret =
347       std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
348   auto Status = createTFStatus();
349   Impl->evaluate(Ret->getOutput().data(), Status.get());
350   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
351     errs() << TF_Message(Status.get());
352     Impl.reset();
353     return None;
354   }
355   return EvaluationResult(std::move(Ret));
356 }
357 
358 void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
359                                      const std::vector<int64_t> &Dimensions) {
360   int64_t TotalSize = TF_DataTypeSize(Type);
361   for (auto &D : Dimensions)
362     TotalSize *= D;
363 
364   Input[Index] =
365       TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
366   std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
367 }
368 
369 void *TFModelEvaluator::getUntypedInput(size_t Index) {
370   return TF_TensorData(Impl->getInput()[Index]);
371 }
372 
373 TFModelEvaluator::EvaluationResult::EvaluationResult(
374     std::unique_ptr<EvaluationResultImpl> Impl)
375     : Impl(std::move(Impl)) {}
376 
377 TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
378     : Impl(std::move(Other.Impl)) {}
379 
380 TFModelEvaluator::EvaluationResult &
381 TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
382   Impl = std::move(Other.Impl);
383   return *this;
384 }
385 
386 void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
387   return TF_TensorData(Impl->getOutput()[Index]);
388 }
389 
390 const void *
391 TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
392   return TF_TensorData(Impl->getOutput()[Index]);
393 }
394 
395 #define TFUTILS_GETDATATYPE_IMPL(T, E)                                         \
396   template <> int TensorSpec::getDataType<T>() { return E; }
397 
398 TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
399 
400 #undef TFUTILS_GETDATATYPE_IMPL
401 
402 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
403 TFModelEvaluator::~TFModelEvaluator() {}
404 
405 void Logger::print(raw_ostream &OS) {
406   if (RawLogData.empty())
407     return;
408   if (RawLogData[0].empty())
409     return;
410   size_t Tensor0Size = FeatureSpecs[0].Spec.getElementCount() *
411                        FeatureSpecs[0].Spec.getElementByteSize();
412   size_t NumberOfRecords = RawLogData[0].size() / Tensor0Size;
413   if (NumberOfRecords == 0)
414     return;
415   size_t RewardSize =
416       RewardSpec.getElementCount() * RewardSpec.getElementByteSize();
417   size_t NumberOfRewards = RawLogData.back().size() / RewardSize;
418 
419   OS << "feature_lists: {\n";
420   for (size_t I = 0; I < FeatureSpecs.size(); ++I)
421     writeRawTensorsAsFeatureLists(OS, FeatureSpecs[I], RawLogData[I].data(),
422                                   NumberOfRecords);
423 
424   if (IncludeReward)
425     writeRawTensorsAsFeatureLists(OS, {RewardSpec, None},
426                                   RawLogData.back().data(), NumberOfRecords,
427                                   NumberOfRewards == 1);
428 
429   OS << "}\n";
430 }
431 #endif // defined(LLVM_HAVE_TF_API)
432