1 //===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements utilities for interfacing with tensorflow C APIs.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "llvm/Config/config.h"
14 #if defined(LLVM_HAVE_TF_API)
15 
16 #include "llvm/ADT/Twine.h"
17 #include "llvm/Analysis/Utils/TFUtils.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/JSON.h"
20 #include "llvm/Support/ManagedStatic.h"
21 #include "llvm/Support/raw_ostream.h"
22 
23 #include "tensorflow/c/c_api.h"
24 #include "tensorflow/c/c_api_experimental.h"
25 
26 #include <cassert>
27 #include <numeric>
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
34 using TFSessionOptionsPtr =
35     std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
36 using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
37 
38 struct TFInitializer {
39   TFInitializer() {
40     assert(!IsInitialized && "TFInitialized should be called only once");
41     int Argc = 1;
42     const char *Name = "";
43     const char **NamePtr = &Name;
44     TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
45     IsInitialized = true;
46   }
47   bool IsInitialized = false;
48 };
49 
50 llvm::ManagedStatic<TFInitializer> TFLibInitializer;
51 
52 bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
53 
54 TFGraphPtr createTFGraph() {
55   return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
56 }
57 
58 TFStatusPtr createTFStatus() {
59   return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
60 }
61 
62 TFSessionOptionsPtr createTFSessionOptions() {
63   return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
64 }
65 } // namespace
66 
67 namespace llvm {
68 class EvaluationResultImpl {
69 public:
70   EvaluationResultImpl(size_t OutputSize)
71       : OutputSize(OutputSize), Output(OutputSize){};
72 
73   ~EvaluationResultImpl() {
74     for (auto *P : Output)
75       if (P)
76         TF_DeleteTensor(P);
77   }
78 
79   EvaluationResultImpl(const EvaluationResultImpl &) = delete;
80   EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
81   std::vector<TF_Tensor *> &getOutput() { return Output; }
82 
83 private:
84   const size_t OutputSize;
85   std::vector<TF_Tensor *> Output;
86 };
87 
88 size_t TensorSpec::getElementByteSize() const {
89   return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
90 }
91 
92 TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
93                        const std::vector<int64_t> &Shape)
94     : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
95       ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
96                                    std::multiplies<int64_t>())) {}
97 
98 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
99                                            const json::Value &Value) {
100   auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
101     std::string S;
102     llvm::raw_string_ostream OS(S);
103     OS << Value;
104     Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
105     return None;
106   };
107   json::ObjectMapper Mapper(Value);
108   if (!Mapper)
109     return EmitError("Value is not a dict");
110 
111   std::string TensorName;
112   int TensorPort = -1;
113   std::string TensorType;
114   std::vector<int64_t> TensorShape;
115 
116   if (!Mapper.map<std::string>("name", TensorName))
117     return EmitError("'name' property not present or not a string");
118   if (!Mapper.map<std::string>("type", TensorType))
119     return EmitError("'type' property not present or not a string");
120   if (!Mapper.map<int>("port", TensorPort))
121     return EmitError("'port' property not present or not an int");
122   if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
123     return EmitError("'shape' property not present or not an int array");
124 
125 #define PARSE_TYPE(T, E)                                                       \
126   if (TensorType == #T)                                                        \
127     return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
128   TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
129 #undef PARSE_TYPE
130   return None;
131 }
132 
133 class TFModelEvaluatorImpl {
134 public:
135   TFModelEvaluatorImpl(StringRef SavedModelPath,
136                        const std::vector<TensorSpec> &InputSpecs,
137                        const std::vector<TensorSpec> &OutputSpecs,
138                        const char *Tags);
139 
140   bool isValid() const { return IsValid; }
141   size_t OutputSize() const { return OutputFeed.size(); }
142 
143   void evaluate(TF_Tensor **Output, TF_Status *Status) {
144     TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
145                   Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
146                   nullptr, 0, nullptr, Status);
147   }
148 
149   void initInput(size_t Index, TF_DataType Type,
150                  const std::vector<int64_t> &Dimensions);
151   const std::vector<TF_Tensor *> &getInput() const { return Input; }
152 
153   ~TFModelEvaluatorImpl();
154 
155 private:
156   /// The objects necessary for carrying out an evaluation of the SavedModel.
157   /// They are expensive to set up, and we maintain them accross all the
158   /// evaluations of the model.
159   TF_Session *Session = nullptr;
160   TFGraphPtr Graph;
161   TFSessionOptionsPtr Options;
162 
163   /// The specification of the input nodes.
164   std::vector<TF_Output> InputFeed;
165 
166   /// The input tensors. They must match by index of the corresponding InputFeed
167   /// value. We set up the tensors once and just mutate theirs scalars before
168   /// each evaluation. The input tensors keep their value after an evaluation.
169   std::vector<TF_Tensor *> Input;
170 
171   /// The specification of the output nodes. When evaluating, the tensors in the
172   /// output tensor vector must match by index the corresponding element in the
173   /// OutputFeed.
174   std::vector<TF_Output> OutputFeed;
175 
176   void invalidate() { IsValid = false; }
177 
178   bool IsValid = true;
179 
180   /// Reusable utility for ensuring we can bind the requested Name to a node in
181   /// the SavedModel Graph.
182   bool checkReportAndInvalidate(const TF_Output &Output,
183                                 const TensorSpec &OutputSpec);
184 };
185 } // namespace llvm
186 
187 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
188     StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
189     const std::vector<TensorSpec> &OutputSpecs, const char *Tags)
190     : Graph(createTFGraph()), Options(createTFSessionOptions()),
191       InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
192       OutputFeed(OutputSpecs.size()) {
193   if (!ensureInitTF()) {
194     errs() << "Tensorflow should have been initialized";
195     return;
196   }
197   auto Status = createTFStatus();
198 
199   Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
200                                          SavedModelPath.str().c_str(), &Tags, 1,
201                                          Graph.get(), nullptr, Status.get());
202   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
203     errs() << TF_Message(Status.get());
204     invalidate();
205   }
206   for (size_t I = 0; I < InputSpecs.size(); ++I) {
207     auto &InputSpec = InputSpecs[I];
208     InputFeed[I] = {
209         TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
210         InputSpec.port()};
211     if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
212       return;
213     initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
214               InputSpec.shape());
215   }
216   for (size_t I = 0; I < OutputSpecs.size(); ++I) {
217     auto &OutputSpec = OutputSpecs[I];
218     OutputFeed[I] = {
219         TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
220         OutputSpec.port()};
221     if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
222       return;
223   }
224 }
225 
226 TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
227                                    const std::vector<TensorSpec> &InputSpecs,
228                                    const std::vector<TensorSpec> &OutputSpecs,
229                                    const char *Tags)
230     : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
231                                     Tags)) {
232   if (!Impl->isValid())
233     Impl.reset();
234 }
235 
236 TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
237   for (auto *T : Input) {
238     TF_DeleteTensor(T);
239   }
240   if (Session == nullptr)
241     return;
242   auto Status = createTFStatus();
243   TF_DeleteSession(Session, Status.get());
244   Session = nullptr;
245   if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
246     errs() << "Could not delete TF session";
247 }
248 
249 bool TFModelEvaluatorImpl::checkReportAndInvalidate(
250     const TF_Output &Output, const TensorSpec &OutputSpec) {
251   if (Output.oper)
252     return true;
253   errs() << "Could not find TF_Output named: " + OutputSpec.name();
254   IsValid = false;
255   return IsValid;
256 }
257 
258 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
259   if (!isValid())
260     return None;
261   std::unique_ptr<EvaluationResultImpl> Ret =
262       std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
263   auto Status = createTFStatus();
264   Impl->evaluate(Ret->getOutput().data(), Status.get());
265   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
266     errs() << TF_Message(Status.get());
267     Impl.reset();
268     return None;
269   }
270   return EvaluationResult(std::move(Ret));
271 }
272 
273 void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
274                                      const std::vector<int64_t> &Dimensions) {
275   int64_t TotalSize = TF_DataTypeSize(Type);
276   for (auto &D : Dimensions)
277     TotalSize *= D;
278 
279   Input[Index] =
280       TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
281   std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
282 }
283 
284 void *TFModelEvaluator::getUntypedInput(size_t Index) {
285   return TF_TensorData(Impl->getInput()[Index]);
286 }
287 
288 TFModelEvaluator::EvaluationResult::EvaluationResult(
289     std::unique_ptr<EvaluationResultImpl> Impl)
290     : Impl(std::move(Impl)) {}
291 
292 TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
293     : Impl(std::move(Other.Impl)) {}
294 
295 TFModelEvaluator::EvaluationResult &
296 TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
297   Impl = std::move(Other.Impl);
298   return *this;
299 }
300 
301 void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
302   return TF_TensorData(Impl->getOutput()[Index]);
303 }
304 
305 const void *
306 TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
307   return TF_TensorData(Impl->getOutput()[Index]);
308 }
309 
310 #define TFUTILS_GETDATATYPE_IMPL(T, E)                                         \
311   template <> int TensorSpec::getDataType<T>() { return E; }
312 
313 TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
314 
315 #undef TFUTILS_GETDATATYPE_IMPL
316 
317 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
318 TFModelEvaluator::~TFModelEvaluator() {}
319 #endif // defined(LLVM_HAVE_TF_API)
320