1 //===- DevelopmentModeInlineAdvisor.cpp - runtime-loadable model runner  --===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a model runner using Tensorflow C APIs, allowing the
11 // loading of a model from a command line option.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "llvm/Config/config.h"
15 #if defined(LLVM_HAVE_TF_API)
16 
17 #include "llvm/Analysis/CallGraph.h"
18 #include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
19 #include "llvm/Analysis/MLInlineAdvisor.h"
20 #include "llvm/Analysis/Utils/TFUtils.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/ManagedStatic.h"
24 #include "llvm/Support/Path.h"
25 
26 #include <vector>
27 
28 using namespace llvm;
29 
30 static cl::opt<std::string> TrainingLog(
31     "training-log", cl::Hidden,
32     cl::desc("Path where the development - mode inlining log is saved."));
33 
34 static cl::opt<std::string> TFModelUnderTrainingPath(
35     "ml-inliner-model-under-training", cl::Hidden,
36     cl::desc(R"(Path to SavedModel from the previous training iteration.
37 The directory is also expected to contain a JSON specification of the
38 outputs expected to be logged, where the first entry must be the
39 inlining decision. The file containing the specification should be
40 called output_spec.json. The expected JSON value is an array of
41 dictionaries. Each dictionary should have 2 keys:
42 
43 - "tensor_spec, followed by the TensorSpec description of the
44 output; and
45 - "logging_name", a string indicating the name to use when
46 logging the output values.
47 
48 Example:
49 [
50   {
51     "logging_name" : "some_name",
52     "tensor_spec" : {
53       "name" : "model_name",
54       "port" : 0,
55       "shape" : [2, 3],
56       "type" : "float"
57       }
58   }
59 ]
60 
61 The first value must always correspond to the decision.)"));
62 
63 static cl::opt<std::string> TFOutputSpecOverride(
64     "ml-inliner-output-spec-override", cl::Hidden,
65     cl::desc("Override the path to the output spec json file. See "
66              "-ml-inliner-model-under-training documentation for the "
67              "specification of that file."));
68 
69 static cl::opt<std::string> TFFeedPrefix("ml-inliner-trained-model-feed-prefix",
70                                          cl::Hidden, cl::init("action_"),
71                                          cl::desc("Prefix for feature names."));
72 
73 namespace {
74 /// An InlineEvent, used by TrainingLogger.
75 struct InlineEvent {
76   /// What the default policy's decision would have been.
77   bool DefaultDecision = false;
78 
79   /// What we advised. When training off the default policy, this is the same as
80   /// DefaultDecision.
81   bool AdvisedDecision = false;
82 
83   /// What actually happened. This would be 'false' in the case of an inline
84   /// error, even if AdvisedDecision were true, otherwise it agrees with
85   /// AdvisedDecision.
86   bool Effect = false;
87 
88   /// What the change in size was: size_after - size_before
89   int64_t Reward = 0;
90 };
91 
92 /// Collect data we may use for training a model, and write it as a textual
93 /// Tensorflow SequenceExample
94 /// (https://www.tensorflow.org/api_docs/python/tf/train/SequenceExample)
95 /// protobuf (https://developers.google.com/protocol-buffers).
96 /// Because this is a protobuf, we cannot just stream the events as they come.
97 /// Internally, TrainingLogger stores data in column-major format, because that
98 /// lines up with how TF SequenceExample represents it.
99 class ModelUnderTrainingRunner;
100 class TrainingLogger final {
101 public:
102   TrainingLogger(StringRef LogFileName, const ModelUnderTrainingRunner *MUTR);
103 
104   /// Log one inlining event.
105   void logInlineEvent(const InlineEvent &Event,
106                       const MLModelRunner &ModelRunner);
107 
108   /// Print the stored tensors.
109   void print();
110 
111 private:
112   /// Write the values of one tensor as a list.
113   template <typename T>
114   void writeTensorValues(raw_fd_ostream &OutFile, const char *TensorData,
115                          size_t ElemCount) const {
116     OutFile << "[";
117     const T *TypedData = reinterpret_cast<const T *>(TensorData);
118     for (size_t I = 0; I < ElemCount; ++I) {
119       if (I > 0)
120         OutFile << ", ";
121       OutFile << TypedData[I];
122     }
123     OutFile << "]";
124   }
125 
126   /// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
127   /// The tensors are assumed to be stored contiguously, in row-major format,
128   /// in the TensorData buffer. Each tensor has the shape given by Spec. The
129   /// feature name in the output is either the provided LoggingName, if
130   /// specified, otherwise it's the name of the tensor (as given by Spec).
131   template <typename T>
132   void
133   writeTensorsAsFeatureLists(raw_fd_ostream &OutFile, const TensorSpec &Spec,
134                              const T *TensorData, size_t TensorCount,
135                              Optional<StringRef> LoggingName = None) const {
136     writeRawTensorsAsFeatureLists(OutFile, Spec,
137                                   reinterpret_cast<const char *>(TensorData),
138                                   TensorCount, LoggingName);
139   }
140 
141   /// Untyped implementation of the API above.
142   void
143   writeRawTensorsAsFeatureLists(raw_fd_ostream &OutFile, const TensorSpec &Spec,
144                                 const char *TensorData, size_t TensorCount,
145                                 Optional<StringRef> LoggingName = None) const {
146     const char *FieldName = "<invalid>";
147     std::function<void(const char *)> ValueWriter;
148     // The 'Feature' protobuf only has 3 possible fields: float_list,
149     // int64_list, or bytes_list, so we capture int32 values as int64. We don't
150     // support any other types.
151     if (Spec.isElementType<int64_t>()) {
152       FieldName = "int64_list";
153       ValueWriter = [&](const char *Data) {
154         writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
155       };
156     } else if (Spec.isElementType<int32_t>()) {
157       FieldName = "int64_list";
158       ValueWriter = [&](const char *Data) {
159         writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
160       };
161 
162     } else if (Spec.isElementType<float>()) {
163       FieldName = "float_list";
164       ValueWriter = [&](const char *Data) {
165         writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
166       };
167 
168     } else
169       llvm_unreachable("Unsupported tensor type.");
170 
171     OutFile << "  feature_list: {\n";
172     OutFile << "    key: "
173             << "\"" << (LoggingName ? *LoggingName : Spec.name()) << "\" ";
174     OutFile << "value: {\n";
175     size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
176     for (const char *P = TensorData,
177                     *E = TensorData + TensorByteSize * TensorCount;
178          P < E; P += TensorByteSize) {
179       OutFile << "      feature: { " << FieldName << ": { value: ";
180       ValueWriter(P);
181       OutFile << " } }\n";
182     }
183     OutFile << "    }\n";
184     OutFile << "  }\n";
185   }
186 
187   StringRef LogFileName;
188   const ModelUnderTrainingRunner *const MUTR;
189   std::vector<InlineFeatures> Features;
190   std::vector<int64_t> DefaultDecisions;
191   // We store all outputs as data blobs, but we always expect to have one, the
192   // first one, representing the decision. While we could track that separately,
193   // for uniformity, we store it, generically, here.
194   std::vector<std::vector<char>> Outputs;
195   std::vector<bool> Effects;
196   std::vector<int64_t> Rewards;
197 };
198 
199 /// An extension of the MLInlineAdvisor for the 'development' mode, targeting
200 /// the offline training scenario. Note that training happens outside of the
201 /// compiler, this facility is concerned with producing training data ("logs").
202 /// This InlineAdvisor can operate in the following modes:
203 ///
204 /// 1) collect logs for the default policy. This is useful for bootstrapping
205 /// training, which will be considerably faster by starting from a reasonable
206 /// policy.
207 ///
208 /// 2) collect logs for the ML policy, using a model from a previous
209 /// training. Potentially, that model uses internally some small random
210 /// perturbation of its weights, to induce exploration (setting this up is the
211 /// responsibility of the training algorithm). The logs would then be used to
212 /// retrain and improve on this model.
213 ///
214 /// 3) use the provided model, with no logging. This is useful for end to end
215 /// validation - the model, in this case, is a release candidate and shouldn't
216 /// have random perturbations. It is a convenience feature: rather than needing
217 /// to take the release candidate model and compile it in 'release' mode,
218 /// validate it, then potentially discard it, it's easier to just pass the model
219 /// to the compiler, albeit compilation would be slower, as a one-off. Once the
220 /// model behaves satisfactorily, it can be compiled AOT, for efficiency, in
221 /// release mode. The expectation is that a well-trained model provides a good
222 /// policy over a sufficiently diverse codebase, over many changes (i.e.
223 /// training happens seldom).
224 class DevelopmentModeMLInlineAdvisor : public MLInlineAdvisor {
225 public:
226   DevelopmentModeMLInlineAdvisor(
227       Module &M, ModuleAnalysisManager &MAM,
228       std::unique_ptr<MLModelRunner> ModelRunner,
229       std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
230       std::unique_ptr<TrainingLogger> Logger);
231 
232   size_t getTotalSizeEstimate();
233 
234   virtual ~DevelopmentModeMLInlineAdvisor();
235   void updateNativeSizeEstimate(int64_t Change) { CurrentNativeSize += Change; }
236   void resetNativeSize(Function *F) {
237     FAM.invalidate<InlineSizeEstimatorAnalysis>(*F);
238   }
239 
240   std::unique_ptr<MLInlineAdvice>
241   getMandatoryAdvice(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
242   std::unique_ptr<MLInlineAdvice>
243   getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
244 
245   size_t getNativeSizeEstimate(const Function &F) const;
246 
247 private:
248   bool isLogging() const { return !!Logger; }
249 
250   std::function<bool(CallBase &)> GetDefaultAdvice;
251   const bool IsDoingInference;
252   std::unique_ptr<TrainingLogger> Logger;
253 
254   const int32_t InitialNativeSize;
255   int32_t CurrentNativeSize = 0;
256 };
257 
258 /// A variant of MLInlineAdvice that tracks all non-trivial inlining
259 /// decisions, for training/logging.
260 class LoggingMLInlineAdvice : public MLInlineAdvice {
261 public:
262   LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
263                         OptimizationRemarkEmitter &ORE, bool Recommendation,
264                         TrainingLogger &Logger, size_t CallerSizeEstimateBefore,
265                         size_t CalleeSizeEstimateBefore, bool DefaultDecision,
266                         bool Mandatory = false)
267       : MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
268         CallerSizeEstimateBefore(CallerSizeEstimateBefore),
269         CalleeSizeEstimateBefore(CalleeSizeEstimateBefore),
270         DefaultDecision(DefaultDecision), Mandatory(Mandatory) {}
271 
272   virtual ~LoggingMLInlineAdvice() = default;
273 
274 private:
275   DevelopmentModeMLInlineAdvisor *getAdvisor() const {
276     return static_cast<DevelopmentModeMLInlineAdvisor *>(Advisor);
277   }
278   void recordInliningImpl() override {
279     MLInlineAdvice::recordInliningImpl();
280     getAdvisor()->resetNativeSize(Caller);
281     int Reward = std::numeric_limits<int>::max();
282     if (!getAdvisor()->isForcedToStop()) {
283       int NativeSizeAfter = getAdvisor()->getNativeSizeEstimate(*Caller) +
284                             CalleeSizeEstimateBefore;
285       Reward = NativeSizeAfter -
286                (CallerSizeEstimateBefore + CalleeSizeEstimateBefore);
287       getAdvisor()->updateNativeSizeEstimate(Reward);
288     }
289     log(Reward, /*Success=*/true);
290   }
291 
292   void recordInliningWithCalleeDeletedImpl() override {
293     MLInlineAdvice::recordInliningWithCalleeDeletedImpl();
294     getAdvisor()->resetNativeSize(Caller);
295     if (!getAdvisor()->isForcedToStop()) {
296       int NativeSizeAfter = getAdvisor()->getNativeSizeEstimate(*Caller);
297       int Reward = NativeSizeAfter -
298                    (CallerSizeEstimateBefore + CalleeSizeEstimateBefore);
299       getAdvisor()->updateNativeSizeEstimate(Reward);
300       log(Reward, /*Success=*/true);
301     }
302   }
303 
304   void recordUnsuccessfulInliningImpl(const InlineResult &Result) override {
305     MLInlineAdvice::recordUnsuccessfulInliningImpl(Result);
306     log(NoReward, /*Success=*/false);
307   }
308 
309   void recordUnattemptedInliningImpl() override {
310     MLInlineAdvice::recordUnattemptedInliningImpl();
311     log(NoReward, /*Success=*/false);
312   }
313 
314   void log(int64_t Reward, bool Success) {
315     if (Mandatory)
316       return;
317     InlineEvent Event;
318     Event.AdvisedDecision = isInliningRecommended();
319     Event.DefaultDecision = DefaultDecision;
320     Event.Effect = Success;
321     Event.Reward = Reward;
322     Logger.logInlineEvent(Event, getAdvisor()->getModelRunner());
323   }
324 
325   static const int64_t NoReward = 0;
326   TrainingLogger &Logger;
327   const size_t CallerSizeEstimateBefore;
328   const size_t CalleeSizeEstimateBefore;
329   const bool DefaultDecision;
330   const bool Mandatory;
331 };
332 
333 /// A pseudo model runner. We use it to store feature values when collecting
334 /// logs for the default policy, but never ask it to 'run'.
335 class NoInferenceModelRunner : public MLModelRunner {
336 public:
337   NoInferenceModelRunner(LLVMContext &Ctx)
338       : MLModelRunner(Ctx), Features(NumberOfFeatures) {}
339   void setFeature(FeatureIndex Index, int64_t Value) override {
340     Features[static_cast<int>(Index)] = Value;
341   }
342 
343   int64_t getFeature(int Index) const override { return Features[Index]; }
344   bool run() override {
345     llvm_unreachable("We shouldn't call run on this model runner.");
346   }
347 
348 private:
349   InlineFeatures Features;
350 };
351 
352 /// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
353 /// to dynamically load and evaluate a TF SavedModel
354 /// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
355 /// sacrificed for ease of use while training.
356 class ModelUnderTrainingRunner final : public MLModelRunner {
357 public:
358   ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath);
359 
360   bool run() override;
361 
362   // Disallows copy and assign.
363   ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
364   ModelUnderTrainingRunner &
365   operator=(const ModelUnderTrainingRunner &) = delete;
366 
367   void setFeature(FeatureIndex Index, int64_t Value) override;
368   int64_t getFeature(int Index) const override;
369   bool isValid() const { return !!Evaluator; }
370 
371   const std::vector<std::string> outputNames() const { return OutputNames; }
372 
373   const std::vector<TensorSpec> outputSpecs() const { return OutputSpecs; }
374 
375   const Optional<TFModelEvaluator::EvaluationResult> &
376   lastEvaluationResult() const {
377     return LastEvaluationResult;
378   }
379 
380 private:
381   std::unique_ptr<TFModelEvaluator> Evaluator;
382   std::vector<std::string> OutputNames;
383   std::vector<TensorSpec> OutputSpecs;
384   Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
385 
386   bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName);
387 
388   // The training framework needs some additional features.
389   const std::vector<TensorSpec> TrainingOnlyFeatures{
390       TensorSpec::createSpec<int64_t>(TFFeedPrefix + "inlining_default", {1}),
391       TensorSpec::createSpec<float>(TFFeedPrefix + "discount", {1}),
392       TensorSpec::createSpec<float>(TFFeedPrefix + "reward", {1}),
393       TensorSpec::createSpec<int32_t>(TFFeedPrefix + "step_type", {1})};
394 };
395 } // namespace
396 
397 TrainingLogger::TrainingLogger(StringRef LogFileName,
398                                const ModelUnderTrainingRunner *MUTR)
399     : LogFileName(LogFileName), MUTR(MUTR) {
400   for (size_t I = 0; I < NumberOfFeatures; ++I)
401     Features.push_back(InlineFeatures());
402 
403   // The first output is the inlining decision.
404   auto OutputCount = MUTR ? MUTR->outputSpecs().size() : 1;
405   Outputs.assign(OutputCount, std::vector<char>());
406 }
407 
408 /// Log one inlining event.
409 void TrainingLogger::logInlineEvent(const InlineEvent &Event,
410                                     const MLModelRunner &ModelRunner) {
411   for (size_t I = 0; I < NumberOfFeatures; ++I)
412     Features[I].push_back(ModelRunner.getFeature(I));
413 
414   Effects.push_back(Event.Effect);
415   Rewards.push_back(Event.Reward);
416   DefaultDecisions.push_back(Event.DefaultDecision);
417   int64_t Advice = static_cast<int64_t>(Event.AdvisedDecision);
418   const char *AdviceData = reinterpret_cast<const char *>(&Advice);
419   Outputs[0].insert(Outputs[0].end(), AdviceData, AdviceData + sizeof(int64_t));
420   for (size_t I = 1; I < Outputs.size(); ++I) {
421     const auto &Result = *MUTR->lastEvaluationResult();
422     auto &Spec = MUTR->outputSpecs()[I];
423     const char *RawData =
424         reinterpret_cast<const char *>(Result.getUntypedTensorValue(I));
425     Outputs[I].insert(Outputs[I].end(), RawData,
426                       RawData +
427                           Spec.getElementCount() * Spec.getElementByteSize());
428   }
429 }
430 
431 void TrainingLogger::print() {
432   std::error_code EC;
433   raw_fd_ostream OutFile(LogFileName, EC);
434   size_t NumberOfRecords = Rewards.size();
435   if (NumberOfRecords == 0)
436     return;
437 
438   OutFile << "feature_lists: {\n";
439   for (size_t I = 0; I < Features.size(); ++I)
440     writeTensorsAsFeatureLists(
441         OutFile, TensorSpec::createSpec<int64_t>(FeatureNameMap.at(I), {1}),
442         Features[I].data(), NumberOfRecords);
443 
444   writeTensorsAsFeatureLists(
445       OutFile, TensorSpec::createSpec<int64_t>(DefaultDecisionName, {1}),
446       DefaultDecisions.data(), NumberOfRecords);
447 
448   writeRawTensorsAsFeatureLists(
449       OutFile, TensorSpec::createSpec<int64_t>(DecisionName, {1}),
450       Outputs[0].data(), NumberOfRecords);
451   writeTensorsAsFeatureLists(OutFile,
452                              TensorSpec::createSpec<int64_t>(RewardName, {1}),
453                              Rewards.data(), NumberOfRecords);
454 
455   for (size_t I = 1; I < Outputs.size(); ++I)
456     writeRawTensorsAsFeatureLists(OutFile, MUTR->outputSpecs()[I],
457                                   Outputs[I].data(), NumberOfRecords,
458                                   StringRef(MUTR->outputNames()[I]));
459 
460   OutFile << "}\n";
461 }
462 
463 DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
464     Module &M, ModuleAnalysisManager &MAM,
465     std::unique_ptr<MLModelRunner> ModelRunner,
466     std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
467     std::unique_ptr<TrainingLogger> Logger)
468     : MLInlineAdvisor(M, MAM, std::move(ModelRunner)),
469       GetDefaultAdvice(GetDefaultAdvice), IsDoingInference(IsDoingInference),
470       Logger(std::move(Logger)),
471       InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
472       CurrentNativeSize(InitialNativeSize) {
473   // We cannot have the case of neither inference nor logging.
474   assert(IsDoingInference || isLogging());
475 }
476 
477 DevelopmentModeMLInlineAdvisor::~DevelopmentModeMLInlineAdvisor() {
478   if (isLogging())
479     Logger->print();
480 }
481 
482 size_t
483 DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
484   auto &R =
485       FAM.getResult<InlineSizeEstimatorAnalysis>(const_cast<Function &>(F));
486   if (!R) {
487     F.getParent()->getContext().emitError(
488         "Native size estimator is not present.");
489     return 0;
490   }
491   return *R;
492 }
493 
494 std::unique_ptr<MLInlineAdvice>
495 DevelopmentModeMLInlineAdvisor::getMandatoryAdvice(
496     CallBase &CB, OptimizationRemarkEmitter &ORE) {
497   if (!isLogging())
498     return MLInlineAdvisor::getMandatoryAdvice(CB, ORE);
499   return std::make_unique<LoggingMLInlineAdvice>(
500       /*Advisor=*/this,
501       /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/true, /*Logger=*/*Logger,
502       /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
503       /*CalleeSizeEstimateBefore=*/
504       getNativeSizeEstimate(*CB.getCalledFunction()),
505       /*DefaultDecision=*/true, /*Mandatory*/ true);
506 }
507 
508 std::unique_ptr<MLInlineAdvice>
509 DevelopmentModeMLInlineAdvisor::getAdviceFromModel(
510     CallBase &CB, OptimizationRemarkEmitter &ORE) {
511   if (IsDoingInference && !isLogging())
512     return MLInlineAdvisor::getAdviceFromModel(CB, ORE);
513 
514   bool DefaultAdvice = GetDefaultAdvice(CB);
515   auto Recommendation = IsDoingInference ? ModelRunner->run() : DefaultAdvice;
516   return std::make_unique<LoggingMLInlineAdvice>(
517       /*Advisor=*/this,
518       /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/Recommendation,
519       /*Logger=*/*Logger,
520       /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
521       /*CalleeSizeEstimateBefore=*/
522       getNativeSizeEstimate(*CB.getCalledFunction()),
523       /*DefaultDecision=*/DefaultAdvice);
524 }
525 
526 size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
527   size_t Ret = 0;
528   for (auto &F : M) {
529     if (F.isDeclaration())
530       continue;
531     if (isFunctionDeleted(&F))
532       continue;
533     Ret += getNativeSizeEstimate(F);
534   }
535   return Ret;
536 }
537 
538 ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx,
539                                                    const std::string &ModelPath)
540     : MLModelRunner(Ctx) {
541   std::vector<TensorSpec> InputSpecs;
542   for (size_t I = 0; I < NumberOfFeatures; ++I)
543     InputSpecs.push_back(
544         TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
545   InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(),
546                     TrainingOnlyFeatures.end());
547   SmallVector<char, 128> OutputSpecsPath;
548   StringRef OutputSpecPath = TFOutputSpecOverride;
549   if (OutputSpecPath.empty()) {
550     llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
551     OutputSpecPath = {OutputSpecsPath.data(), OutputSpecsPath.size()};
552   }
553   if (!loadOutputSpecs(Ctx, OutputSpecPath))
554     return;
555 
556   Evaluator =
557       std::make_unique<TFModelEvaluator>(ModelPath, InputSpecs, OutputSpecs);
558   if (!Evaluator || !Evaluator->isValid()) {
559     Ctx.emitError("Failed to create inliner saved model evaluator");
560     Evaluator.reset();
561     return;
562   }
563 }
564 
565 bool ModelUnderTrainingRunner::loadOutputSpecs(LLVMContext &Ctx,
566                                                StringRef FileName) {
567   auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
568   if (!BufferOrError) {
569     Ctx.emitError("Error opening output specs file: " + FileName + " : " +
570                   BufferOrError.getError().message());
571     return false;
572   }
573   auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
574   if (!ParsedJSONValues) {
575     Ctx.emitError("Could not parse specs file: " + FileName);
576     return false;
577   }
578   auto ValuesArray = ParsedJSONValues->getAsArray();
579   if (!ValuesArray) {
580     Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
581                   "logging_name:<name>} dictionaries");
582     return false;
583   }
584 
585   for (const auto &Value : *ValuesArray)
586     if (const auto *Obj = Value.getAsObject())
587       if (const auto *SpecPart = Obj->get("tensor_spec"))
588         if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
589           if (auto LoggingName = Obj->getString("logging_name")) {
590             if (!TensorSpec->isElementType<int64_t>() &&
591                 !TensorSpec->isElementType<int32_t>() &&
592                 !TensorSpec->isElementType<float>()) {
593               Ctx.emitError(
594                   "Only int64, int32, and float tensors are supported. "
595                   "Found unsupported type for tensor named " +
596                   TensorSpec->name());
597               return false;
598             }
599             OutputNames.push_back(LoggingName->str());
600             OutputSpecs.push_back(*TensorSpec);
601           }
602 
603   if (ValuesArray->size() != OutputNames.size()) {
604     Ctx.emitError(
605         "Unable to parse output spec. It should be a json file containing an "
606         "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
607         "with a json object describing a TensorSpec; and a 'logging_name' key, "
608         "which is a string to use as name when logging this tensor in the "
609         "training log.");
610     return false;
611   }
612   assert(OutputNames.size() == OutputSpecs.size());
613   if (OutputNames.empty() || OutputNames[0] != DecisionName) {
614     Ctx.emitError("The first output spec must describe the decision tensor, "
615                   "and must have the logging_name " +
616                   StringRef(DecisionName));
617     return false;
618   }
619   return true;
620 }
621 
622 bool ModelUnderTrainingRunner::run() {
623   LastEvaluationResult = Evaluator->evaluate();
624   if (!LastEvaluationResult.hasValue()) {
625     Ctx.emitError("Error evaluating model.");
626     return false;
627   }
628   int64_t Decision = *LastEvaluationResult->getTensorValue<int64_t>(0);
629   return static_cast<bool>(Decision);
630 }
631 
632 int64_t ModelUnderTrainingRunner::getFeature(int Index) const {
633   return *Evaluator->getInput<int64_t>(Index);
634 }
635 
636 void ModelUnderTrainingRunner::setFeature(FeatureIndex Index, int64_t Value) {
637   size_t NumericIndex = static_cast<size_t>(Index);
638   *(Evaluator->getInput<int64_t>(NumericIndex)) = Value;
639 }
640 
641 std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
642     Module &M, ModuleAnalysisManager &MAM,
643     std::function<bool(CallBase &)> GetDefaultAdvice) {
644   auto &Ctx = M.getContext();
645   if (TrainingLog.empty() !=
646       !InlineSizeEstimatorAnalysis::isEvaluatorRequested()) {
647     Ctx.emitError("For development mode, if training logs are requested, then "
648                   "a size estimator must be available; either that, or neither "
649                   "are specified.");
650     return nullptr;
651   }
652 
653   std::unique_ptr<MLModelRunner> Runner;
654   ModelUnderTrainingRunner *MUTRPtr = nullptr;
655   bool IsDoingInference = false;
656   if (TFModelUnderTrainingPath.empty())
657     Runner.reset(new NoInferenceModelRunner(Ctx));
658   else {
659     auto MUTR = std::make_unique<ModelUnderTrainingRunner>(
660         Ctx, TFModelUnderTrainingPath);
661     if (!MUTR || !MUTR->isValid()) {
662       Ctx.emitError("Could not load the policy model from the provided path");
663       return nullptr;
664     }
665     IsDoingInference = true;
666     MUTRPtr = MUTR.get();
667     Runner = std::move(MUTR);
668   }
669   std::unique_ptr<TrainingLogger> Logger;
670   if (!TrainingLog.empty())
671     Logger = std::make_unique<TrainingLogger>(TrainingLog, MUTRPtr);
672 
673   return std::make_unique<DevelopmentModeMLInlineAdvisor>(
674       M, MAM, std::move(Runner), GetDefaultAdvice, IsDoingInference,
675       std::move(Logger));
676 }
677 #endif // defined(LLVM_HAVE_TF_API)
678