1 //===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements utilities for interfacing with tensorflow C APIs.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "llvm/Config/config.h"
14 #if defined(LLVM_HAVE_TF_API)
15 
16 #include "llvm/ADT/Twine.h"
17 #include "llvm/Analysis/Utils/TFUtils.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/JSON.h"
20 #include "llvm/Support/ManagedStatic.h"
21 #include "llvm/Support/raw_ostream.h"
22 
23 #include "tensorflow/c/c_api.h"
24 #include "tensorflow/c/c_api_experimental.h"
25 
26 #include <cassert>
27 #include <numeric>
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
34 using TFSessionOptionsPtr =
35     std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
36 using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
37 
38 struct TFInitializer {
39   TFInitializer() {
40     assert(!IsInitialized && "TFInitialized should be called only once");
41     int Argc = 1;
42     const char *Name = "";
43     const char **NamePtr = &Name;
44     TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
45     IsInitialized = true;
46   }
47   bool IsInitialized = false;
48 };
49 
50 llvm::ManagedStatic<TFInitializer> TFLibInitializer;
51 
52 bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
53 
54 TFGraphPtr createTFGraph() {
55   return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
56 }
57 
58 TFStatusPtr createTFStatus() {
59   return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
60 }
61 
62 TFSessionOptionsPtr createTFSessionOptions() {
63   return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
64 }
65 } // namespace
66 
67 namespace llvm {
68 class EvaluationResultImpl {
69 public:
70   EvaluationResultImpl(size_t OutputSize)
71       : OutputSize(OutputSize), Output(OutputSize){};
72 
73   ~EvaluationResultImpl() {
74     for (auto *P : Output)
75       if (P)
76         TF_DeleteTensor(P);
77   }
78 
79   EvaluationResultImpl(const EvaluationResultImpl &) = delete;
80   EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
81   std::vector<TF_Tensor *> &getOutput() { return Output; }
82 
83 private:
84   const size_t OutputSize;
85   std::vector<TF_Tensor *> Output;
86 };
87 
88 size_t TensorSpec::getElementByteSize() const {
89   return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
90 }
91 
92 TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
93                        const std::vector<int64_t> &Shape)
94     : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
95       ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
96                                    std::multiplies<int64_t>())) {}
97 
98 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
99                                            const json::Value &Value) {
100   auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
101     std::string S;
102     llvm::raw_string_ostream OS(S);
103     OS << Value;
104     Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
105     return None;
106   };
107   // FIXME: accept a Path as a parameter, and use it for error reporting.
108   json::Path::Root Root("tensor_spec");
109   json::ObjectMapper Mapper(Value, Root);
110   if (!Mapper)
111     return EmitError("Value is not a dict");
112 
113   std::string TensorName;
114   int TensorPort = -1;
115   std::string TensorType;
116   std::vector<int64_t> TensorShape;
117 
118   if (!Mapper.map<std::string>("name", TensorName))
119     return EmitError("'name' property not present or not a string");
120   if (!Mapper.map<std::string>("type", TensorType))
121     return EmitError("'type' property not present or not a string");
122   if (!Mapper.map<int>("port", TensorPort))
123     return EmitError("'port' property not present or not an int");
124   if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
125     return EmitError("'shape' property not present or not an int array");
126 
127 #define PARSE_TYPE(T, E)                                                       \
128   if (TensorType == #T)                                                        \
129     return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
130   TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
131 #undef PARSE_TYPE
132   return None;
133 }
134 
135 class TFModelEvaluatorImpl {
136 public:
137   TFModelEvaluatorImpl(StringRef SavedModelPath,
138                        const std::vector<TensorSpec> &InputSpecs,
139                        const std::vector<TensorSpec> &OutputSpecs,
140                        const char *Tags);
141 
142   bool isValid() const { return IsValid; }
143   size_t OutputSize() const { return OutputFeed.size(); }
144 
145   void evaluate(TF_Tensor **Output, TF_Status *Status) {
146     TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
147                   Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
148                   nullptr, 0, nullptr, Status);
149   }
150 
151   void initInput(size_t Index, TF_DataType Type,
152                  const std::vector<int64_t> &Dimensions);
153   const std::vector<TF_Tensor *> &getInput() const { return Input; }
154 
155   ~TFModelEvaluatorImpl();
156 
157 private:
158   /// The objects necessary for carrying out an evaluation of the SavedModel.
159   /// They are expensive to set up, and we maintain them accross all the
160   /// evaluations of the model.
161   TF_Session *Session = nullptr;
162   TFGraphPtr Graph;
163   TFSessionOptionsPtr Options;
164 
165   /// The specification of the input nodes.
166   std::vector<TF_Output> InputFeed;
167 
168   /// The input tensors. They must match by index of the corresponding InputFeed
169   /// value. We set up the tensors once and just mutate theirs scalars before
170   /// each evaluation. The input tensors keep their value after an evaluation.
171   std::vector<TF_Tensor *> Input;
172 
173   /// The specification of the output nodes. When evaluating, the tensors in the
174   /// output tensor vector must match by index the corresponding element in the
175   /// OutputFeed.
176   std::vector<TF_Output> OutputFeed;
177 
178   void invalidate() { IsValid = false; }
179 
180   bool IsValid = true;
181 
182   /// Reusable utility for ensuring we can bind the requested Name to a node in
183   /// the SavedModel Graph.
184   bool checkReportAndInvalidate(const TF_Output &Output,
185                                 const TensorSpec &OutputSpec);
186 };
187 } // namespace llvm
188 
189 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
190     StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
191     const std::vector<TensorSpec> &OutputSpecs, const char *Tags)
192     : Graph(createTFGraph()), Options(createTFSessionOptions()),
193       InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
194       OutputFeed(OutputSpecs.size()) {
195   if (!ensureInitTF()) {
196     errs() << "Tensorflow should have been initialized";
197     return;
198   }
199   auto Status = createTFStatus();
200 
201   Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
202                                          SavedModelPath.str().c_str(), &Tags, 1,
203                                          Graph.get(), nullptr, Status.get());
204   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
205     errs() << TF_Message(Status.get());
206     invalidate();
207   }
208   for (size_t I = 0; I < InputSpecs.size(); ++I) {
209     auto &InputSpec = InputSpecs[I];
210     InputFeed[I] = {
211         TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
212         InputSpec.port()};
213     if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
214       return;
215     initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
216               InputSpec.shape());
217   }
218   for (size_t I = 0; I < OutputSpecs.size(); ++I) {
219     auto &OutputSpec = OutputSpecs[I];
220     OutputFeed[I] = {
221         TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
222         OutputSpec.port()};
223     if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
224       return;
225   }
226 }
227 
228 TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
229                                    const std::vector<TensorSpec> &InputSpecs,
230                                    const std::vector<TensorSpec> &OutputSpecs,
231                                    const char *Tags)
232     : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
233                                     Tags)) {
234   if (!Impl->isValid())
235     Impl.reset();
236 }
237 
238 TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
239   for (auto *T : Input) {
240     TF_DeleteTensor(T);
241   }
242   if (Session == nullptr)
243     return;
244   auto Status = createTFStatus();
245   TF_DeleteSession(Session, Status.get());
246   Session = nullptr;
247   if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
248     errs() << "Could not delete TF session";
249 }
250 
251 bool TFModelEvaluatorImpl::checkReportAndInvalidate(
252     const TF_Output &Output, const TensorSpec &OutputSpec) {
253   if (Output.oper)
254     return true;
255   errs() << "Could not find TF_Output named: " + OutputSpec.name();
256   IsValid = false;
257   return IsValid;
258 }
259 
260 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
261   if (!isValid())
262     return None;
263   std::unique_ptr<EvaluationResultImpl> Ret =
264       std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
265   auto Status = createTFStatus();
266   Impl->evaluate(Ret->getOutput().data(), Status.get());
267   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
268     errs() << TF_Message(Status.get());
269     Impl.reset();
270     return None;
271   }
272   return EvaluationResult(std::move(Ret));
273 }
274 
275 void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
276                                      const std::vector<int64_t> &Dimensions) {
277   int64_t TotalSize = TF_DataTypeSize(Type);
278   for (auto &D : Dimensions)
279     TotalSize *= D;
280 
281   Input[Index] =
282       TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
283   std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
284 }
285 
286 void *TFModelEvaluator::getUntypedInput(size_t Index) {
287   return TF_TensorData(Impl->getInput()[Index]);
288 }
289 
290 TFModelEvaluator::EvaluationResult::EvaluationResult(
291     std::unique_ptr<EvaluationResultImpl> Impl)
292     : Impl(std::move(Impl)) {}
293 
294 TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
295     : Impl(std::move(Other.Impl)) {}
296 
297 TFModelEvaluator::EvaluationResult &
298 TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
299   Impl = std::move(Other.Impl);
300   return *this;
301 }
302 
303 void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
304   return TF_TensorData(Impl->getOutput()[Index]);
305 }
306 
307 const void *
308 TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
309   return TF_TensorData(Impl->getOutput()[Index]);
310 }
311 
312 #define TFUTILS_GETDATATYPE_IMPL(T, E)                                         \
313   template <> int TensorSpec::getDataType<T>() { return E; }
314 
315 TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
316 
317 #undef TFUTILS_GETDATATYPE_IMPL
318 
319 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
320 TFModelEvaluator::~TFModelEvaluator() {}
321 #endif // defined(LLVM_HAVE_TF_API)
322