1 //===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements utilities for interfacing with tensorflow C APIs.
11 //
12 //===----------------------------------------------------------------------===//
13 #include "llvm/Config/config.h"
14 #if defined(LLVM_HAVE_TF_API)
15 
16 #include "llvm/ADT/Twine.h"
17 #include "llvm/Analysis/Utils/TFUtils.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/JSON.h"
20 #include "llvm/Support/ManagedStatic.h"
21 #include "llvm/Support/raw_ostream.h"
22 
23 #include "tensorflow/c/c_api.h"
24 #include "tensorflow/c/c_api_experimental.h"
25 
26 #include <cassert>
27 
28 using namespace llvm;
29 
30 namespace {
31 
32 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
33 using TFSessionOptionsPtr =
34     std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
35 using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
36 
37 struct TFInitializer {
38   TFInitializer() {
39     assert(!IsInitialized && "TFInitialized should be called only once");
40     int Argc = 1;
41     const char *Name = "";
42     const char **NamePtr = &Name;
43     TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
44     IsInitialized = true;
45   }
46   bool IsInitialized = false;
47 };
48 
49 llvm::ManagedStatic<TFInitializer> TFLibInitializer;
50 
51 bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
52 
53 TFGraphPtr createTFGraph() {
54   return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
55 }
56 
57 TFStatusPtr createTFStatus() {
58   return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
59 }
60 
61 TFSessionOptionsPtr createTFSessionOptions() {
62   return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
63 }
64 } // namespace
65 
66 namespace llvm {
67 class EvaluationResultImpl {
68 public:
69   EvaluationResultImpl(size_t OutputSize)
70       : OutputSize(OutputSize), Output(OutputSize){};
71 
72   ~EvaluationResultImpl() {
73     for (auto *P : Output)
74       if (P)
75         TF_DeleteTensor(P);
76   }
77 
78   EvaluationResultImpl(const EvaluationResultImpl &) = delete;
79   EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
80   std::vector<TF_Tensor *> &getOutput() { return Output; }
81 
82 private:
83   const size_t OutputSize;
84   std::vector<TF_Tensor *> Output;
85 };
86 
87 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
88                                            const json::Value &Value) {
89   auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
90     std::string S;
91     llvm::raw_string_ostream OS(S);
92     OS << Value;
93     Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
94     return None;
95   };
96   json::ObjectMapper Mapper(Value);
97   if (!Mapper)
98     return EmitError("Value is not a dict");
99 
100   std::string TensorName;
101   int TensorPort = -1;
102   std::string TensorType;
103   std::vector<int64_t> TensorShape;
104 
105   if (!Mapper.map<std::string>("name", TensorName))
106     return EmitError("'name' property not present or not a string");
107   if (!Mapper.map<std::string>("type", TensorType))
108     return EmitError("'type' property not present or not a string");
109   if (!Mapper.map<int>("port", TensorPort))
110     return EmitError("'port' property not present or not an int");
111   if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
112     return EmitError("'shape' property not present or not an int array");
113 
114 #define PARSE_TYPE(T, S, E)                                                    \
115   if (TensorType == #S)                                                        \
116     return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
117   TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
118 #undef PARSE_TYPE
119   return None;
120 }
121 
122 class TFModelEvaluatorImpl {
123 public:
124   TFModelEvaluatorImpl(StringRef SavedModelPath,
125                        const std::vector<TensorSpec> &InputSpecs,
126                        const std::vector<TensorSpec> &OutputSpecs,
127                        const char *Tags);
128 
129   bool isValid() const { return IsValid; }
130   size_t OutputSize() const { return OutputFeed.size(); }
131 
132   void evaluate(TF_Tensor **Output, TF_Status *Status) {
133     TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
134                   Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
135                   nullptr, 0, nullptr, Status);
136   }
137 
138   void initInput(size_t Index, TF_DataType Type,
139                  const std::vector<int64_t> &Dimensions);
140   const std::vector<TF_Tensor *> &getInput() const { return Input; }
141 
142   ~TFModelEvaluatorImpl();
143 
144 private:
145   /// The objects necessary for carrying out an evaluation of the SavedModel.
146   /// They are expensive to set up, and we maintain them accross all the
147   /// evaluations of the model.
148   TF_Session *Session = nullptr;
149   TFGraphPtr Graph;
150   TFSessionOptionsPtr Options;
151 
152   /// The specification of the input nodes.
153   std::vector<TF_Output> InputFeed;
154 
155   /// The input tensors. They must match by index of the corresponding InputFeed
156   /// value. We set up the tensors once and just mutate theirs scalars before
157   /// each evaluation. The input tensors keep their value after an evaluation.
158   std::vector<TF_Tensor *> Input;
159 
160   /// The specification of the output nodes. When evaluating, the tensors in the
161   /// output tensor vector must match by index the corresponding element in the
162   /// OutputFeed.
163   std::vector<TF_Output> OutputFeed;
164 
165   void invalidate() { IsValid = false; }
166 
167   bool IsValid = true;
168 
169   /// Reusable utility for ensuring we can bind the requested Name to a node in
170   /// the SavedModel Graph.
171   bool checkReportAndInvalidate(const TF_Output &Output,
172                                 const TensorSpec &OutputSpec);
173 };
174 } // namespace llvm
175 
176 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
177     StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
178     const std::vector<TensorSpec> &OutputSpecs, const char *Tags)
179     : Graph(createTFGraph()), Options(createTFSessionOptions()),
180       InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
181       OutputFeed(OutputSpecs.size()) {
182   if (!ensureInitTF()) {
183     errs() << "Tensorflow should have been initialized";
184     return;
185   }
186   auto Status = createTFStatus();
187 
188   Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
189                                          SavedModelPath.str().c_str(), &Tags, 1,
190                                          Graph.get(), nullptr, Status.get());
191   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
192     errs() << TF_Message(Status.get());
193     invalidate();
194   }
195   for (size_t I = 0; I < InputSpecs.size(); ++I) {
196     auto &InputSpec = InputSpecs[I];
197     InputFeed[I] = {
198         TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
199         InputSpec.port()};
200     if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
201       return;
202     initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
203               InputSpec.shape());
204   }
205   for (size_t I = 0; I < OutputSpecs.size(); ++I) {
206     auto &OutputSpec = OutputSpecs[I];
207     OutputFeed[I] = {
208         TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
209         OutputSpec.port()};
210     if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
211       return;
212   }
213 }
214 
215 TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
216                                    const std::vector<TensorSpec> &InputSpecs,
217                                    const std::vector<TensorSpec> &OutputSpecs,
218                                    const char *Tags)
219     : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
220                                     Tags)) {
221   if (!Impl->isValid())
222     Impl.reset();
223 }
224 
225 TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
226   for (auto *T : Input) {
227     TF_DeleteTensor(T);
228   }
229   if (Session == nullptr)
230     return;
231   auto Status = createTFStatus();
232   TF_DeleteSession(Session, Status.get());
233   Session = nullptr;
234   if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
235     errs() << "Could not delete TF session";
236 }
237 
238 bool TFModelEvaluatorImpl::checkReportAndInvalidate(
239     const TF_Output &Output, const TensorSpec &OutputSpec) {
240   if (Output.oper)
241     return true;
242   errs() << "Could not find TF_Output named: " + OutputSpec.name();
243   IsValid = false;
244   return IsValid;
245 }
246 
247 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
248   if (!isValid())
249     return None;
250   std::unique_ptr<EvaluationResultImpl> Ret =
251       std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
252   auto Status = createTFStatus();
253   Impl->evaluate(Ret->getOutput().data(), Status.get());
254   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
255     errs() << TF_Message(Status.get());
256     Impl.reset();
257     return None;
258   }
259   return EvaluationResult(std::move(Ret));
260 }
261 
262 void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
263                                      const std::vector<int64_t> &Dimensions) {
264   int64_t TotalSize = TF_DataTypeSize(Type);
265   for (auto &D : Dimensions)
266     TotalSize *= D;
267 
268   Input[Index] =
269       TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
270   std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
271 }
272 
273 void *TFModelEvaluator::getUntypedInput(size_t Index) {
274   return TF_TensorData(Impl->getInput()[Index]);
275 }
276 
277 TFModelEvaluator::EvaluationResult::EvaluationResult(
278     std::unique_ptr<EvaluationResultImpl> Impl)
279     : Impl(std::move(Impl)) {}
280 
281 TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
282     : Impl(std::move(Other.Impl)) {}
283 
284 void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
285   return TF_TensorData(Impl->getOutput()[Index]);
286 }
287 
288 #define TFUTILS_GETDATATYPE_IMPL(T, S, E)                                      \
289   template <> int TensorSpec::getDataType<T>() { return TF_##E; }
290 
291 TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
292 
293 #undef TFUTILS_GETDATATYPE_IMPL
294 
295 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
296 TFModelEvaluator::~TFModelEvaluator() {}
297 #endif // defined(LLVM_HAVE_TF_API)
298