1 //===- DevelopmentModeInlineAdvisor.cpp - runtime-loadable model runner  --===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a model runner using Tensorflow C APIs, allowing the
11 // loading of a model from a command line option.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "llvm/Config/config.h"
15 #if defined(LLVM_HAVE_TF_API)
16 
17 #include "llvm/Analysis/CallGraph.h"
18 #include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
19 #include "llvm/Analysis/MLInlineAdvisor.h"
20 #include "llvm/Analysis/Utils/TFUtils.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/ManagedStatic.h"
24 #include "llvm/Support/Path.h"
25 
26 #include <vector>
27 
28 using namespace llvm;
29 
30 static cl::opt<std::string> TrainingLog(
31     "training-log", cl::Hidden,
32     cl::desc("Path where the development - mode inlining log is saved."));
33 
34 static cl::opt<std::string> TFModelUnderTrainingPath(
35     "ml-inliner-model-under-training", cl::Hidden,
36     cl::desc(R"(Path to SavedModel from the previous training iteration.
37 The directory is also expected to contain a JSON specification of the
38 outputs expected to be logged, where the first entry must be the
39 inlining decision. The file containing the specification should be
40 called output_spec.json. The expected JSON value is an array of
41 dictionaries. Each dictionary should have 2 keys:
42 
43 - "tensor_spec, followed by the TensorSpec description of the
44 output; and
45 - "logging_name", a string indicating the name to use when
46 logging the output values.
47 
48 Example:
49 [
50   {
51     "logging_name" : "some_name",
52     "tensor_spec" : {
53       "name" : "model_name",
54       "port" : 0,
55       "shape" : [2, 3],
56       "type" : "float"
57       }
58   }
59 ]
60 
61 The first value must always correspond to the decision.)"));
62 
63 static cl::opt<std::string> TFOutputSpecOverride(
64     "ml-inliner-output-spec-override", cl::Hidden,
65     cl::desc("Override the path to the output spec json file. See "
66              "-ml-inliner-model-under-training documentation for the "
67              "specification of that file."));
68 
69 static cl::opt<std::string> TFFeedPrefix("ml-inliner-trained-model-feed-prefix",
70                                          cl::Hidden, cl::init("action_"),
71                                          cl::desc("Prefix for feature names."));
72 
73 namespace {
74 /// An InlineEvent, used by TrainingLogger.
75 struct InlineEvent {
76   /// What the default policy's decision would have been.
77   int64_t DefaultDecision = 0;
78 
79   /// What we advised. When training off the default policy, this is the same as
80   /// DefaultDecision.
81   int64_t AdvisedDecision = 0;
82 
83   /// What actually happened. This would be 'false' in the case of an inline
84   /// error, even if AdvisedDecision were true, otherwise it agrees with
85   /// AdvisedDecision.
86   bool Effect = false;
87 
88   /// What the change in size was: size_after - size_before
89   int64_t Reward = 0;
90 };
91 
92 /// Collect data we may use for training a model, and write it as a textual
93 /// Tensorflow SequenceExample
94 /// (https://www.tensorflow.org/api_docs/python/tf/train/SequenceExample)
95 /// protobuf (https://developers.google.com/protocol-buffers).
96 /// Because this is a protobuf, we cannot just stream the events as they come.
97 /// Internally, TrainingLogger stores data in column-major format, because that
98 /// lines up with how TF SequenceExample represents it.
99 class ModelUnderTrainingRunner;
100 class TrainingLogger final {
101 public:
102   TrainingLogger(StringRef LogFileName, const ModelUnderTrainingRunner *MUTR);
103 
104   /// Log one inlining event.
105   void logInlineEvent(const InlineEvent &Event,
106                       const MLModelRunner &ModelRunner);
107 
108   /// Print the stored tensors.
109   void print();
110 
111 private:
112   StringRef LogFileName;
113   const ModelUnderTrainingRunner *const MUTR;
114   std::unique_ptr<Logger> L;
115   std::vector<bool> Effects;
116   /// There's at least one output. We'll set this to a different value if MUTR
117   /// is avaliable.
118   size_t OutputCount = 1;
119   /// Set these 2 clearly OOB, to make sure we set them later.
120   size_t DefaultDecisionPos = std::numeric_limits<size_t>::max();
121   size_t DecisionPos = std::numeric_limits<size_t>::max();
122 };
123 
124 /// An extension of the MLInlineAdvisor for the 'development' mode, targeting
125 /// the offline training scenario. Note that training happens outside of the
126 /// compiler, this facility is concerned with producing training data ("logs").
127 /// This InlineAdvisor can operate in the following modes:
128 ///
129 /// 1) collect logs for the default policy. This is useful for bootstrapping
130 /// training, which will be considerably faster by starting from a reasonable
131 /// policy.
132 ///
133 /// 2) collect logs for the ML policy, using a model from a previous
134 /// training. Potentially, that model uses internally some small random
135 /// perturbation of its weights, to induce exploration (setting this up is the
136 /// responsibility of the training algorithm). The logs would then be used to
137 /// retrain and improve on this model.
138 ///
139 /// 3) use the provided model, with no logging. This is useful for end to end
140 /// validation - the model, in this case, is a release candidate and shouldn't
141 /// have random perturbations. It is a convenience feature: rather than needing
142 /// to take the release candidate model and compile it in 'release' mode,
143 /// validate it, then potentially discard it, it's easier to just pass the model
144 /// to the compiler, albeit compilation would be slower, as a one-off. Once the
145 /// model behaves satisfactorily, it can be compiled AOT, for efficiency, in
146 /// release mode. The expectation is that a well-trained model provides a good
147 /// policy over a sufficiently diverse codebase, over many changes (i.e.
148 /// training happens seldom).
149 class DevelopmentModeMLInlineAdvisor : public MLInlineAdvisor {
150 public:
151   DevelopmentModeMLInlineAdvisor(
152       Module &M, ModuleAnalysisManager &MAM,
153       std::unique_ptr<MLModelRunner> ModelRunner,
154       std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
155       std::unique_ptr<TrainingLogger> Logger);
156 
157   size_t getTotalSizeEstimate();
158 
159   virtual ~DevelopmentModeMLInlineAdvisor();
160   void updateNativeSizeEstimate(int64_t Change) {
161     *CurrentNativeSize += Change;
162   }
163   void resetNativeSize(Function *F) {
164     FAM.invalidate<InlineSizeEstimatorAnalysis>(*F);
165   }
166 
167   std::unique_ptr<MLInlineAdvice>
168   getMandatoryAdvice(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
169   std::unique_ptr<MLInlineAdvice>
170   getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
171 
172   Optional<size_t> getNativeSizeEstimate(const Function &F) const;
173 
174 private:
175   bool isLogging() const { return !!Logger; }
176 
177   std::function<bool(CallBase &)> GetDefaultAdvice;
178   const bool IsDoingInference;
179   std::unique_ptr<TrainingLogger> Logger;
180 
181   const Optional<int32_t> InitialNativeSize;
182   Optional<int32_t> CurrentNativeSize;
183 };
184 
185 /// A variant of MLInlineAdvice that tracks all non-trivial inlining
186 /// decisions, for training/logging.
187 class LoggingMLInlineAdvice : public MLInlineAdvice {
188 public:
189   LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
190                         OptimizationRemarkEmitter &ORE, bool Recommendation,
191                         TrainingLogger &Logger,
192                         Optional<size_t> CallerSizeEstimateBefore,
193                         Optional<size_t> CalleeSizeEstimateBefore,
194                         bool DefaultDecision, bool Mandatory = false)
195       : MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
196         CallerSizeEstimateBefore(CallerSizeEstimateBefore),
197         CalleeSizeEstimateBefore(CalleeSizeEstimateBefore),
198         DefaultDecision(DefaultDecision), Mandatory(Mandatory) {}
199 
200   virtual ~LoggingMLInlineAdvice() = default;
201 
202 private:
203   DevelopmentModeMLInlineAdvisor *getAdvisor() const {
204     return static_cast<DevelopmentModeMLInlineAdvisor *>(Advisor);
205   }
206   void recordInliningImpl() override {
207     MLInlineAdvice::recordInliningImpl();
208     getAdvisor()->resetNativeSize(Caller);
209     int Reward = std::numeric_limits<int>::max();
210     if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
211         !getAdvisor()->isForcedToStop()) {
212       int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller) +
213                             *CalleeSizeEstimateBefore;
214       Reward = NativeSizeAfter -
215                (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
216       getAdvisor()->updateNativeSizeEstimate(Reward);
217     }
218     log(Reward, /*Success=*/true);
219   }
220 
221   void recordInliningWithCalleeDeletedImpl() override {
222     MLInlineAdvice::recordInliningWithCalleeDeletedImpl();
223     getAdvisor()->resetNativeSize(Caller);
224     if (InlineSizeEstimatorAnalysis::isEvaluatorRequested() &&
225         !getAdvisor()->isForcedToStop()) {
226       int NativeSizeAfter = *getAdvisor()->getNativeSizeEstimate(*Caller);
227       int Reward = NativeSizeAfter -
228                    (*CallerSizeEstimateBefore + *CalleeSizeEstimateBefore);
229       getAdvisor()->updateNativeSizeEstimate(Reward);
230       log(Reward, /*Success=*/true);
231     }
232   }
233 
234   void recordUnsuccessfulInliningImpl(const InlineResult &Result) override {
235     MLInlineAdvice::recordUnsuccessfulInliningImpl(Result);
236     log(NoReward, /*Success=*/false);
237   }
238 
239   void recordUnattemptedInliningImpl() override {
240     MLInlineAdvice::recordUnattemptedInliningImpl();
241     log(NoReward, /*Success=*/false);
242   }
243 
244   void log(int64_t Reward, bool Success) {
245     if (Mandatory)
246       return;
247     InlineEvent Event;
248     Event.AdvisedDecision = isInliningRecommended();
249     Event.DefaultDecision = DefaultDecision;
250     Event.Effect = Success;
251     Event.Reward = Reward;
252     Logger.logInlineEvent(Event, getAdvisor()->getModelRunner());
253   }
254 
255   static const int64_t NoReward = 0;
256   TrainingLogger &Logger;
257   const Optional<size_t> CallerSizeEstimateBefore;
258   const Optional<size_t> CalleeSizeEstimateBefore;
259   const int64_t DefaultDecision;
260   const int64_t Mandatory;
261 };
262 
263 /// A pseudo model runner. We use it to store feature values when collecting
264 /// logs for the default policy, but never ask it to 'run'.
265 class NoInferenceModelRunner : public MLModelRunner {
266 public:
267   NoInferenceModelRunner(LLVMContext &Ctx)
268       : MLModelRunner(Ctx), Features(NumberOfFeatures) {}
269   void setFeature(FeatureIndex Index, int64_t Value) override {
270     Features[static_cast<int>(Index)] = Value;
271   }
272 
273   int64_t getFeature(int Index) const override { return Features[Index]; }
274   bool run() override {
275     llvm_unreachable("We shouldn't call run on this model runner.");
276   }
277 
278 private:
279   InlineFeatures Features;
280 };
281 
282 /// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
283 /// to dynamically load and evaluate a TF SavedModel
284 /// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
285 /// sacrificed for ease of use while training.
286 class ModelUnderTrainingRunner final : public MLModelRunner {
287 public:
288   ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath);
289 
290   bool run() override;
291 
292   // Disallows copy and assign.
293   ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
294   ModelUnderTrainingRunner &
295   operator=(const ModelUnderTrainingRunner &) = delete;
296 
297   void setFeature(FeatureIndex Index, int64_t Value) override;
298   int64_t getFeature(int Index) const override;
299   bool isValid() const { return !!Evaluator; }
300 
301   const std::vector<std::string> &outputNames() const { return OutputNames; }
302 
303   const std::vector<TensorSpec> &outputSpecs() const { return OutputSpecs; }
304 
305   const Optional<TFModelEvaluator::EvaluationResult> &
306   lastEvaluationResult() const {
307     return LastEvaluationResult;
308   }
309 
310 private:
311   std::unique_ptr<TFModelEvaluator> Evaluator;
312   std::vector<std::string> OutputNames;
313   std::vector<TensorSpec> OutputSpecs;
314   Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
315 
316   bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName);
317 
318   // The training framework needs some additional features.
319   const std::vector<TensorSpec> TrainingOnlyFeatures{
320       TensorSpec::createSpec<int64_t>(TFFeedPrefix + "inlining_default", {1}),
321       TensorSpec::createSpec<float>(TFFeedPrefix + "discount", {1}),
322       TensorSpec::createSpec<float>(TFFeedPrefix + "reward", {1}),
323       TensorSpec::createSpec<int32_t>(TFFeedPrefix + "step_type", {1})};
324 };
325 } // namespace
326 
327 TrainingLogger::TrainingLogger(StringRef LogFileName,
328                                const ModelUnderTrainingRunner *MUTR)
329     : LogFileName(LogFileName), MUTR(MUTR) {
330   // The first output is the inlining decision.
331   if (MUTR)
332     OutputCount = MUTR->outputSpecs().size();
333   std::vector<Logger::LoggedFeatureSpec> FT;
334 
335   for (size_t I = 0; I < NumberOfFeatures; ++I)
336     FT.push_back(
337         {TensorSpec::createSpec<int64_t>(FeatureNameMap.at(I), {1}), None});
338   for (size_t I = 1; I < OutputCount; ++I)
339     FT.push_back({MUTR->outputSpecs()[I], MUTR->outputNames()[I]});
340 
341   DefaultDecisionPos = FT.size();
342   FT.push_back(
343       {TensorSpec::createSpec<int64_t>(DefaultDecisionName, {1}), None});
344 
345   DecisionPos = FT.size();
346   FT.push_back({TensorSpec::createSpec<int64_t>(DecisionName, {1}), None});
347 
348   L = std::make_unique<Logger>(
349       FT, TensorSpec::createSpec<int64_t>(RewardName, {1}),
350       InlineSizeEstimatorAnalysis::isEvaluatorRequested());
351 }
352 
353 /// Log one inlining event.
354 void TrainingLogger::logInlineEvent(const InlineEvent &Event,
355                                     const MLModelRunner &ModelRunner) {
356   size_t CurrentFeature = 0;
357   for (; CurrentFeature < NumberOfFeatures; ++CurrentFeature) {
358     int64_t F = ModelRunner.getFeature(CurrentFeature);
359     L->logTensorValue(CurrentFeature, &F);
360   }
361 
362   for (size_t I = 1; I < OutputCount; ++I) {
363     const auto &Result = *MUTR->lastEvaluationResult();
364     auto &Spec = MUTR->outputSpecs()[I];
365     const char *RawData =
366         reinterpret_cast<const char *>(Result.getUntypedTensorValue(I));
367     L->logTensorValue(CurrentFeature, RawData,
368                       Spec.getElementCount() * Spec.getElementByteSize());
369     ++CurrentFeature;
370   }
371 
372   assert(CurrentFeature == DefaultDecisionPos);
373   L->logTensorValue(DefaultDecisionPos, &Event.DefaultDecision);
374   L->logTensorValue(DecisionPos, &Event.AdvisedDecision);
375   if (InlineSizeEstimatorAnalysis::isEvaluatorRequested())
376     L->logReward(Event.Reward);
377 
378   // For debugging / later use
379   Effects.push_back(Event.Effect);
380 }
381 
382 void TrainingLogger::print() {
383   std::error_code EC;
384   raw_fd_ostream OutFile(LogFileName, EC);
385   L->print(OutFile);
386 }
387 
388 DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
389     Module &M, ModuleAnalysisManager &MAM,
390     std::unique_ptr<MLModelRunner> ModelRunner,
391     std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
392     std::unique_ptr<TrainingLogger> Logger)
393     : MLInlineAdvisor(M, MAM, std::move(ModelRunner)),
394       GetDefaultAdvice(GetDefaultAdvice), IsDoingInference(IsDoingInference),
395       Logger(std::move(Logger)),
396       InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
397       CurrentNativeSize(InitialNativeSize) {
398   // We cannot have the case of neither inference nor logging.
399   assert(IsDoingInference || isLogging());
400 }
401 
402 DevelopmentModeMLInlineAdvisor::~DevelopmentModeMLInlineAdvisor() {
403   if (isLogging())
404     Logger->print();
405 }
406 
407 Optional<size_t>
408 DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
409   if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
410     return None;
411   auto &R =
412       FAM.getResult<InlineSizeEstimatorAnalysis>(const_cast<Function &>(F));
413   if (!R) {
414     F.getParent()->getContext().emitError(
415         "Native size estimator is not present.");
416     return 0;
417   }
418   return *R;
419 }
420 
421 std::unique_ptr<MLInlineAdvice>
422 DevelopmentModeMLInlineAdvisor::getMandatoryAdvice(
423     CallBase &CB, OptimizationRemarkEmitter &ORE) {
424   if (!isLogging())
425     return MLInlineAdvisor::getMandatoryAdvice(CB, ORE);
426 
427   return std::make_unique<LoggingMLInlineAdvice>(
428       /*Advisor=*/this,
429       /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/true, /*Logger=*/*Logger,
430       /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
431       /*CalleeSizeEstimateBefore=*/
432       getNativeSizeEstimate(*CB.getCalledFunction()),
433       /*DefaultDecision=*/true, /*Mandatory*/ true);
434 }
435 
436 std::unique_ptr<MLInlineAdvice>
437 DevelopmentModeMLInlineAdvisor::getAdviceFromModel(
438     CallBase &CB, OptimizationRemarkEmitter &ORE) {
439   if (IsDoingInference && !isLogging())
440     return MLInlineAdvisor::getAdviceFromModel(CB, ORE);
441 
442   bool DefaultAdvice = GetDefaultAdvice(CB);
443   auto Recommendation = IsDoingInference ? ModelRunner->run() : DefaultAdvice;
444   return std::make_unique<LoggingMLInlineAdvice>(
445       /*Advisor=*/this,
446       /*CB=*/CB, /*ORE=*/ORE, /*Recommendation=*/Recommendation,
447       /*Logger=*/*Logger,
448       /*CallerSizeEstimateBefore=*/getNativeSizeEstimate(*CB.getCaller()),
449       /*CalleeSizeEstimateBefore=*/
450       getNativeSizeEstimate(*CB.getCalledFunction()),
451       /*DefaultDecision=*/DefaultAdvice);
452 }
453 
454 size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
455   if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
456     return 0;
457   size_t Ret = 0;
458   for (auto &F : M) {
459     if (F.isDeclaration())
460       continue;
461     if (isFunctionDeleted(&F))
462       continue;
463     Ret += *getNativeSizeEstimate(F);
464   }
465   return Ret;
466 }
467 
468 ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx,
469                                                    const std::string &ModelPath)
470     : MLModelRunner(Ctx) {
471   std::vector<TensorSpec> InputSpecs;
472   for (size_t I = 0; I < NumberOfFeatures; ++I)
473     InputSpecs.push_back(
474         TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
475   InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(),
476                     TrainingOnlyFeatures.end());
477   SmallVector<char, 128> OutputSpecsPath;
478   StringRef OutputSpecPath = TFOutputSpecOverride;
479   if (OutputSpecPath.empty()) {
480     llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
481     OutputSpecPath = {OutputSpecsPath.data(), OutputSpecsPath.size()};
482   }
483   if (!loadOutputSpecs(Ctx, OutputSpecPath))
484     return;
485 
486   Evaluator =
487       std::make_unique<TFModelEvaluator>(ModelPath, InputSpecs, OutputSpecs);
488   if (!Evaluator || !Evaluator->isValid()) {
489     Ctx.emitError("Failed to create inliner saved model evaluator");
490     Evaluator.reset();
491     return;
492   }
493 }
494 
495 bool ModelUnderTrainingRunner::loadOutputSpecs(LLVMContext &Ctx,
496                                                StringRef FileName) {
497   auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
498   if (!BufferOrError) {
499     Ctx.emitError("Error opening output specs file: " + FileName + " : " +
500                   BufferOrError.getError().message());
501     return false;
502   }
503   auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
504   if (!ParsedJSONValues) {
505     Ctx.emitError("Could not parse specs file: " + FileName);
506     return false;
507   }
508   auto ValuesArray = ParsedJSONValues->getAsArray();
509   if (!ValuesArray) {
510     Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
511                   "logging_name:<name>} dictionaries");
512     return false;
513   }
514 
515   for (const auto &Value : *ValuesArray)
516     if (const auto *Obj = Value.getAsObject())
517       if (const auto *SpecPart = Obj->get("tensor_spec"))
518         if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
519           if (auto LoggingName = Obj->getString("logging_name")) {
520             if (!TensorSpec->isElementType<int64_t>() &&
521                 !TensorSpec->isElementType<int32_t>() &&
522                 !TensorSpec->isElementType<float>()) {
523               Ctx.emitError(
524                   "Only int64, int32, and float tensors are supported. "
525                   "Found unsupported type for tensor named " +
526                   TensorSpec->name());
527               return false;
528             }
529             OutputNames.push_back(LoggingName->str());
530             OutputSpecs.push_back(*TensorSpec);
531           }
532 
533   if (ValuesArray->size() != OutputNames.size()) {
534     Ctx.emitError(
535         "Unable to parse output spec. It should be a json file containing an "
536         "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
537         "with a json object describing a TensorSpec; and a 'logging_name' key, "
538         "which is a string to use as name when logging this tensor in the "
539         "training log.");
540     return false;
541   }
542   assert(OutputNames.size() == OutputSpecs.size());
543   if (OutputNames.empty() || OutputNames[0] != DecisionName) {
544     Ctx.emitError("The first output spec must describe the decision tensor, "
545                   "and must have the logging_name " +
546                   StringRef(DecisionName));
547     return false;
548   }
549   return true;
550 }
551 
552 bool ModelUnderTrainingRunner::run() {
553   LastEvaluationResult = Evaluator->evaluate();
554   if (!LastEvaluationResult.hasValue()) {
555     Ctx.emitError("Error evaluating model.");
556     return false;
557   }
558   int64_t Decision = *LastEvaluationResult->getTensorValue<int64_t>(0);
559   return static_cast<bool>(Decision);
560 }
561 
562 int64_t ModelUnderTrainingRunner::getFeature(int Index) const {
563   return *Evaluator->getInput<int64_t>(Index);
564 }
565 
566 void ModelUnderTrainingRunner::setFeature(FeatureIndex Index, int64_t Value) {
567   size_t NumericIndex = static_cast<size_t>(Index);
568   *(Evaluator->getInput<int64_t>(NumericIndex)) = Value;
569 }
570 
571 std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
572     Module &M, ModuleAnalysisManager &MAM,
573     std::function<bool(CallBase &)> GetDefaultAdvice) {
574   auto &Ctx = M.getContext();
575   std::unique_ptr<MLModelRunner> Runner;
576   ModelUnderTrainingRunner *MUTRPtr = nullptr;
577   bool IsDoingInference = false;
578   if (TFModelUnderTrainingPath.empty())
579     Runner.reset(new NoInferenceModelRunner(Ctx));
580   else {
581     auto MUTR = std::make_unique<ModelUnderTrainingRunner>(
582         Ctx, TFModelUnderTrainingPath);
583     if (!MUTR || !MUTR->isValid()) {
584       Ctx.emitError("Could not load the policy model from the provided path");
585       return nullptr;
586     }
587     IsDoingInference = true;
588     MUTRPtr = MUTR.get();
589     Runner = std::move(MUTR);
590   }
591   std::unique_ptr<TrainingLogger> Logger;
592   if (!TrainingLog.empty())
593     Logger = std::make_unique<TrainingLogger>(TrainingLog, MUTRPtr);
594 
595   return std::make_unique<DevelopmentModeMLInlineAdvisor>(
596       M, MAM, std::move(Runner), GetDefaultAdvice, IsDoingInference,
597       std::move(Logger));
598 }
599 #endif // defined(LLVM_HAVE_TF_API)
600