1 //===- MLRegAllocEvictAdvisor.cpp - ML eviction advisor -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the ML eviction advisor and reward injection pass
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "RegAllocEvictionAdvisor.h"
14 #include "RegAllocGreedy.h"
15 #include "RegAllocScore.h"
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/MLModelRunner.h"
18 #include "llvm/Analysis/ModelUnderTrainingRunner.h"
19 #include "llvm/Analysis/NoInferenceModelRunner.h"
20 #include "llvm/Analysis/ReleaseModeModelRunner.h"
21 #include "llvm/Analysis/Utils/TFUtils.h"
22 #include "llvm/CodeGen/CalcSpillWeights.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineLoopInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/RegisterClassInfo.h"
30 #include "llvm/CodeGen/VirtRegMap.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Pass.h"
34 #include "llvm/PassRegistry.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Target/TargetMachine.h"
38 
39 #include <array>
40 #include <memory>
41 
42 using namespace llvm;
43 
44 #define DEBUG_TYPE "ml-regalloc"
45 
46 // Generated header in release (AOT) mode
47 #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
48 #include "RegallocEvictModel.h"
49 #endif
50 
51 // Options that only make sense in development mode
52 #ifdef LLVM_HAVE_TF_API
53 static cl::opt<std::string> TrainingLog(
54     "regalloc-training-log", cl::Hidden,
55     cl::desc("Training log for the register allocator eviction model"));
56 
57 static cl::opt<std::string> ModelUnderTraining(
58     "regalloc-model", cl::Hidden,
59     cl::desc("The model being trained for register allocation eviction"));
60 
61 #endif // #ifdef LLVM_HAVE_TF_API
62 
63 /// The score injection pass.
64 /// This pass calculates the score for a function and inserts it in the log, but
65 /// this happens only in development mode. It's a no-op otherwise.
66 namespace llvm {
67 class RegAllocScoring : public MachineFunctionPass {
68 public:
69   static char ID;
70 
71   RegAllocScoring() : MachineFunctionPass(ID) {
72     initializeRegAllocScoringPass(*PassRegistry::getPassRegistry());
73   }
74 
75   ~RegAllocScoring() override = default;
76 
77   StringRef getPassName() const override {
78     return "Register Allocation Pass Scoring";
79   }
80 
81   /// RegAllocReward analysis usage.
82   void getAnalysisUsage(AnalysisUsage &AU) const override {
83     AU.setPreservesAll();
84     AU.addRequired<RegAllocEvictionAdvisorAnalysis>();
85     AU.addRequired<MachineBlockFrequencyInfo>();
86     AU.addRequired<AAResultsWrapperPass>();
87     MachineFunctionPass::getAnalysisUsage(AU);
88   }
89 
90   /// Performs this pass
91   bool runOnMachineFunction(MachineFunction &) override;
92 };
93 
94 char RegAllocScoring::ID = 0;
95 FunctionPass *createRegAllocScoringPass() { return new RegAllocScoring(); }
96 
97 } // namespace llvm
98 
99 INITIALIZE_PASS(RegAllocScoring, "regallocscoringpass",
100                 "Register Allocation Scoring Pass", false, false)
101 
102 // ===================================
103 // Common ML Advisor declarations
104 // ===================================
105 namespace {
106 // This is the maximum number of interfererring ranges. That's the number of
107 // distinct AllocationOrder values, which comes from MCRegisterClass::RegsSize.
108 // For X86, that's 32.
109 // TODO: find a way to get this, statically, in a programmatic way.
110 static const int64_t MaxInterferences = 32;
111 
112 // Logically, we can think of the feature set given to the evaluator as a 2D
113 // matrix. The rows are the features (see next). The columns correspond to the
114 // interferences. We treat the candidate virt reg as an 'interference', too, as
115 // its feature set is the same as that of the interferring ranges. So we'll have
116 // MaxInterferences + 1 columns and by convention, we will use the last column
117 // for the virt reg seeking allocation.
118 static const int64_t CandidateVirtRegPos = MaxInterferences;
119 static const int64_t NumberOfInterferences = CandidateVirtRegPos + 1;
120 
121 // Most features are as described above, so we'll reuse this vector in defining
122 // them.
123 static const std::vector<int64_t> PerLiveRangeShape{1, NumberOfInterferences};
124 
125 // --------------
126 // Features table
127 // --------------
128 // For each interfering live range (incl. the candidate) we collect a number of
129 // features. However, because the features are of different types (and because
130 // of ML best practices), we organize the tensors per feature, not per
131 // candidate. Each such tensor has a scalar value corresponding to the
132 // interferring live range at that position, in the order in AllocationOrder.
133 // The last position corresponds to the virt reg seeking allocation.
134 // Exception to all that is the progression feature, which is just a scalar (see
135 // its documentation for details).
136 // Note on naming: the "_by_max" are normalized using the largest value of that
137 // tensor, as observed in the current decision making stage (i.e. for the
138 // current call to the advisor's tryFindEvictionCandidate)
139 //
140 // The feature list format: type, name, shape, documentation.
141 // Note: we can really just use int64 and float, hence the modeling of some
142 // bools as int64 values.
143 #define RA_EVICT_FEATURES_LIST(M)                                              \
144   M(int64_t, mask, PerLiveRangeShape,                                          \
145     "boolean values, 0 for unavailable candidates (i.e. if a position is 0, "  \
146     "it "                                                                      \
147     "can't be evicted)")                                                       \
148   M(int64_t, is_free, PerLiveRangeShape,                                       \
149     "boolean values, 1 if this phys reg is actually free (no interferences)")  \
150   M(float, nr_urgent, PerLiveRangeShape,                                       \
151     "number of 'urgent' intervals, normalized. Urgent are those that are OK "  \
152     "to break cascades")                                                       \
153   M(float, nr_broken_hints, PerLiveRangeShape,                                 \
154     "if this position were evicted, how many broken hints would there be")     \
155   M(int64_t, is_hint, PerLiveRangeShape,                                       \
156     "is this a preferred phys reg for the candidate")                          \
157   M(int64_t, is_local, PerLiveRangeShape,                                      \
158     "is this live range local to a basic block")                               \
159   M(float, nr_rematerializable, PerLiveRangeShape,                             \
160     "nr rematerializable ranges")                                              \
161   M(float, nr_defs_and_uses, PerLiveRangeShape,                                \
162     "bb freq - weighed nr defs and uses")                                      \
163   M(float, weighed_reads_by_max, PerLiveRangeShape,                            \
164     "bb freq - weighed nr of reads, normalized")                               \
165   M(float, weighed_writes_by_max, PerLiveRangeShape,                           \
166     "bb feq - weighed nr of writes, normalized")                               \
167   M(float, weighed_read_writes_by_max, PerLiveRangeShape,                      \
168     "bb freq - weighed nr of uses that are both read and writes, normalized")  \
169   M(float, weighed_indvars_by_max, PerLiveRangeShape,                          \
170     "bb freq - weighed nr of uses that are indvars, normalized")               \
171   M(float, hint_weights_by_max, PerLiveRangeShape,                             \
172     "bb freq - weighed nr of uses that are hints, normalized")                 \
173   M(float, start_bb_freq_by_max, PerLiveRangeShape,                            \
174     "the freq in the start block, normalized")                                 \
175   M(float, end_bb_freq_by_max, PerLiveRangeShape,                              \
176     "freq of end block, normalized")                                           \
177   M(float, hottest_bb_freq_by_max, PerLiveRangeShape,                          \
178     "hottest BB freq, normalized")                                             \
179   M(float, liverange_size, PerLiveRangeShape,                                  \
180     "size (instr index diff) of the LR")                                       \
181   M(float, use_def_density, PerLiveRangeShape,                                 \
182     "the max weight, as computed by the manual heuristic")                     \
183   M(int64_t, max_stage, PerLiveRangeShape,                                     \
184     "largest stage of an interval in this LR")                                 \
185   M(int64_t, min_stage, PerLiveRangeShape,                                     \
186     "lowest stage of an interval in this LR")                                  \
187   M(float, progress, {1}, "ratio of current queue size to initial size")
188 
189 // The model learns to pick one of the mask == 1 interferences. This is the name
190 // of the output tensor.
191 // The contract with the model is that the output will be guaranteed to be to a
192 // mask == 1 position.
193 // Using a macro here to avoid 'not used' warnings (and keep cond compilation to
194 // a minimum)
195 #define DecisionName "index_to_evict"
196 
197 // Named features index.
198 enum FeatureIDs {
199 #define _FEATURE_IDX(_, name, __, ___) name,
200   RA_EVICT_FEATURES_LIST(_FEATURE_IDX)
201 #undef _FEATURE_IDX
202       FeatureCount
203 };
204 
205 // The ML advisor will typically have a sparse input to the evaluator, because
206 // various phys regs won't be available. It's easier (maintenance-wise) to
207 // bulk-reset the state of the evaluator each time we are about to use it again.
208 template <typename T> size_t getTotalSize(const std::vector<int64_t> &Shape) {
209   size_t Ret = sizeof(T);
210   for (const auto V : Shape)
211     Ret *= V;
212   return Ret;
213 }
214 
215 void resetInputs(MLModelRunner &Runner) {
216 #define _RESET(TYPE, NAME, SHAPE, __)                                          \
217   std::memset(Runner.getTensorUntyped(FeatureIDs::NAME), 0,                    \
218               getTotalSize<TYPE>(SHAPE));
219   RA_EVICT_FEATURES_LIST(_RESET)
220 #undef _RESET
221 }
222 
223 using CandidateRegList =
224     std::array<std::pair<MCRegister, bool>, NumberOfInterferences>;
225 using FeaturesListNormalizer = std::array<float, FeatureIDs::FeatureCount>;
226 
227 /// The ML evictor (commonalities between release and development mode)
228 class MLEvictAdvisor : public RegAllocEvictionAdvisor {
229 public:
230   MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
231                  MLModelRunner *Runner, const MachineBlockFrequencyInfo &MBFI,
232                  const MachineLoopInfo &Loops);
233 
234 protected:
235   const RegAllocEvictionAdvisor &getDefaultAdvisor() const {
236     return static_cast<const RegAllocEvictionAdvisor &>(DefaultAdvisor);
237   }
238 
239   // The assumption is that if the Runner could not be constructed, we emit-ed
240   // error, and we shouldn't be asking for it here.
241   const MLModelRunner &getRunner() const { return *Runner; }
242 
243   /// This just calls Evaluate on the Runner, but in the development mode case,
244   /// if we're just capturing the log of the default advisor, it needs to call
245   /// the latter instead, so we need to pass all the necessary parameters for
246   /// it. In the development case, it will also log.
247   virtual int64_t tryFindEvictionCandidatePosition(
248       LiveInterval &VirtReg, const AllocationOrder &Order, unsigned OrderLimit,
249       uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const;
250 
251   /// Load the features of the given VirtReg (allocated or not) at column Pos,
252   /// but if  that can't be evicted, return false instead.
253   bool
254   loadInterferenceFeatures(LiveInterval &VirtReg, MCRegister PhysReg,
255                            bool IsHint, const SmallVirtRegSet &FixedRegisters,
256                            std::array<float, FeatureIDs::FeatureCount> &Largest,
257                            size_t Pos) const;
258 
259 private:
260   static float getInitialQueueSize(const MachineFunction &MF);
261 
262   MCRegister tryFindEvictionCandidate(
263       LiveInterval &VirtReg, const AllocationOrder &Order,
264       uint8_t CostPerUseLimit,
265       const SmallVirtRegSet &FixedRegisters) const override;
266 
267   void extractFeatures(const SmallVectorImpl<LiveInterval *> &Intervals,
268                        std::array<float, FeatureIDs::FeatureCount> &Largest,
269                        size_t Pos, int64_t IsHint, int64_t LocalIntfsCount,
270                        float NrUrgent) const;
271 
272   // Point-in-time: we didn't learn this, so we always delegate to the default.
273   bool canEvictHintInterference(
274       LiveInterval &VirtReg, MCRegister PhysReg,
275       const SmallVirtRegSet &FixedRegisters) const override {
276     return getDefaultAdvisor().canEvictHintInterference(VirtReg, PhysReg,
277                                                         FixedRegisters);
278   }
279 
280   // Hold on to a default advisor for:
281   // 1) the implementation of canEvictHintInterference, because we didn't learn
282   // that nuance yet;
283   // 2) for bootstrapping (logging) in the development mode case.
284   const DefaultEvictionAdvisor DefaultAdvisor;
285   MLModelRunner *const Runner;
286   const MachineBlockFrequencyInfo &MBFI;
287   const MachineLoopInfo &Loops;
288 
289   // Indices of those features we don't want to normalize.
290   // This could be static and shared, but its initialization is non-trivial.
291   std::bitset<FeatureIDs::FeatureCount> DoNotNormalize;
292   const float InitialQSize;
293 };
294 
295 // ===================================
296 // Release (AOT) - specifics
297 // ===================================
298 #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
299 const std::array<std::string, FeatureIDs::FeatureCount> FeatureNames{
300 #define _GETNAME(_, NAME, __, ___) #NAME,
301     RA_EVICT_FEATURES_LIST(_GETNAME)
302 #undef _GETNAME
303 };
304 class ReleaseModeEvictionAdvisorAnalysis final
305     : public RegAllocEvictionAdvisorAnalysis {
306 public:
307   ReleaseModeEvictionAdvisorAnalysis()
308       : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Release) {}
309   // support for isa<> and dyn_cast.
310   static bool classof(const RegAllocEvictionAdvisorAnalysis *R) {
311     return R->getAdvisorMode() == AdvisorMode::Release;
312   }
313 
314 private:
315   void getAnalysisUsage(AnalysisUsage &AU) const override {
316     AU.addRequired<MachineBlockFrequencyInfo>();
317     AU.addRequired<MachineLoopInfo>();
318     RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU);
319   }
320 
321   std::unique_ptr<RegAllocEvictionAdvisor>
322   getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
323     if (!Runner)
324       Runner = std::make_unique<ReleaseModeModelRunner<RegallocEvictModel>>(
325           MF.getFunction().getContext(), FeatureNames, DecisionName);
326     return std::make_unique<MLEvictAdvisor>(
327         MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
328         getAnalysis<MachineLoopInfo>());
329   }
330   std::unique_ptr<ReleaseModeModelRunner<RegallocEvictModel>> Runner;
331 };
332 #endif
333 
334 // ===================================
335 // Development mode-specifics
336 // ===================================
337 //
338 // Features we log
339 #ifdef LLVM_HAVE_TF_API
340 #define _DECL_FEATURES(type, name, shape, _)                                   \
341   TensorSpec::createSpec<type>(#name, shape),
342 
343 static const std::vector<TensorSpec> InputFeatures{
344     {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)},
345 };
346 #undef _DECL_FEATURES
347 static const TensorSpec Output =
348     TensorSpec::createSpec<int64_t>(DecisionName, {1});
349 static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
350 
351 // Features we bind on the model. The tensor names have a prefix, and we also
352 // need to include some tensors that are expected to be present by the training
353 // algo.
354 // TODO: can we just get rid of these?
355 #define _DECL_TRAIN_FEATURES(type, name, shape, _)                             \
356   TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
357 
358 static const std::vector<TensorSpec> TrainingInputFeatures{
359     {RA_EVICT_FEATURES_LIST(_DECL_TRAIN_FEATURES)
360          TensorSpec::createSpec<float>("action_discount", {1}),
361      TensorSpec::createSpec<int32_t>("action_step_type", {1}),
362      TensorSpec::createSpec<float>("action_reward", {1})}};
363 #undef _DECL_TRAIN_FEATURES
364 
365 class DevelopmentModeEvictAdvisor : public MLEvictAdvisor {
366 public:
367   DevelopmentModeEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
368                               MLModelRunner *Runner,
369                               const MachineBlockFrequencyInfo &MBFI,
370                               const MachineLoopInfo &Loops, Logger *Log)
371       : MLEvictAdvisor(MF, RA, Runner, MBFI, Loops), Log(Log) {}
372 
373 private:
374   int64_t tryFindEvictionCandidatePosition(
375       LiveInterval &VirtReg, const AllocationOrder &Order, unsigned OrderLimit,
376       uint8_t CostPerUseLimit,
377       const SmallVirtRegSet &FixedRegisters) const override;
378 
379   Logger *const Log;
380 };
381 
382 class DevelopmentModeEvictionAdvisorAnalysis final
383     : public RegAllocEvictionAdvisorAnalysis {
384 public:
385   DevelopmentModeEvictionAdvisorAnalysis()
386       : RegAllocEvictionAdvisorAnalysis(AdvisorMode::Development) {}
387   // support for isa<> and dyn_cast.
388   static bool classof(const RegAllocEvictionAdvisorAnalysis *R) {
389     return R->getAdvisorMode() == AdvisorMode::Development;
390   }
391 
392   /// get the logger for the given function, or nullptr if we didn't collect
393   /// one. This is used to inject the score by the RegAllocScoring pass.
394   Logger *getLogger(const MachineFunction &MF) const {
395     auto I = LogMap.find(MF.getName());
396     if (I == LogMap.end())
397       return nullptr;
398     return I->second.get();
399   }
400 
401 private:
402   void getAnalysisUsage(AnalysisUsage &AU) const override {
403     AU.addRequired<MachineBlockFrequencyInfo>();
404     AU.addRequired<MachineLoopInfo>();
405     RegAllocEvictionAdvisorAnalysis::getAnalysisUsage(AU);
406   }
407 
408   // Save all the logs (when requested).
409   bool doFinalization(Module &M) override {
410     if (TrainingLog.empty())
411       return false;
412     std::error_code EC;
413     auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
414     if (EC) {
415       M.getContext().emitError(EC.message() + ":" + TrainingLog);
416       return false;
417     }
418     Logger::flushLogs(*OS, LogMap);
419     return false;
420   }
421 
422   std::unique_ptr<RegAllocEvictionAdvisor>
423   getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
424     LLVMContext &Ctx = MF.getFunction().getContext();
425     if (ModelUnderTraining.empty() && TrainingLog.empty()) {
426       Ctx.emitError("Regalloc development mode should be requested with at "
427                     "least logging enabled and/or a training model");
428       return nullptr;
429     }
430     if (!Runner) {
431       if (ModelUnderTraining.empty())
432         Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
433       else
434         Runner = ModelUnderTrainingRunner::createAndEnsureValid(
435             Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
436       if (!Runner) {
437         Ctx.emitError("Regalloc: could not set up the model runner");
438         return nullptr;
439       }
440     }
441 
442     Logger *Log = nullptr;
443     if (!TrainingLog.empty()) {
444       std::vector<LoggedFeatureSpec> LFS;
445       for (const auto &FS : InputFeatures)
446         LFS.push_back({FS, None});
447       if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
448         if (MUTR->outputLoggedFeatureSpecs().size() > 1)
449           append_range(LFS, drop_begin(MUTR->outputLoggedFeatureSpecs()));
450       // We always log the output; in particular, if we're not evaluating, we
451       // don't have an output spec json file. That's why we handle the
452       // 'normal' output separately.
453       LFS.push_back({Output, None});
454       auto I = LogMap.insert(std::make_pair(
455           MF.getFunction().getName(),
456           std::make_unique<Logger>(LFS, Reward, /*IncludeReward*/ true)));
457       assert(I.second);
458       Log = I.first->second.get();
459     }
460     return std::make_unique<DevelopmentModeEvictAdvisor>(
461         MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
462         getAnalysis<MachineLoopInfo>(), Log);
463   }
464 
465   std::unique_ptr<MLModelRunner> Runner;
466   StringMap<std::unique_ptr<Logger>> LogMap;
467 };
468 #endif //#ifdef LLVM_HAVE_TF_API
469 } // namespace
470 
471 float MLEvictAdvisor::getInitialQueueSize(const MachineFunction &MF) {
472   auto &MRI = MF.getRegInfo();
473   float Ret = 0.0;
474   for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
475     Register Reg = Register::index2VirtReg(I);
476     if (MRI.reg_nodbg_empty(Reg))
477       continue;
478     ++Ret;
479   }
480   return Ret;
481 }
482 
483 MLEvictAdvisor::MLEvictAdvisor(const MachineFunction &MF, const RAGreedy &RA,
484                                MLModelRunner *Runner,
485                                const MachineBlockFrequencyInfo &MBFI,
486                                const MachineLoopInfo &Loops)
487     : RegAllocEvictionAdvisor(MF, RA), DefaultAdvisor(MF, RA),
488       Runner(std::move(Runner)), MBFI(MBFI), Loops(Loops),
489       InitialQSize(MLEvictAdvisor::getInitialQueueSize(MF)) {
490   assert(this->Runner);
491   DoNotNormalize.set(FeatureIDs::mask);
492   DoNotNormalize.set(FeatureIDs::is_free);
493   DoNotNormalize.set(FeatureIDs::is_hint);
494   DoNotNormalize.set(FeatureIDs::is_local);
495   DoNotNormalize.set(FeatureIDs::min_stage);
496   DoNotNormalize.set(FeatureIDs::max_stage);
497   DoNotNormalize.set(FeatureIDs::progress);
498 }
499 
500 int64_t MLEvictAdvisor::tryFindEvictionCandidatePosition(
501     LiveInterval &, const AllocationOrder &, unsigned, uint8_t,
502     const SmallVirtRegSet &) const {
503   int64_t Ret = Runner->evaluate<int64_t>();
504   assert(Ret >= 0);
505   assert(Ret <= CandidateVirtRegPos);
506   return Ret;
507 }
508 
509 bool MLEvictAdvisor::loadInterferenceFeatures(
510     LiveInterval &VirtReg, MCRegister PhysReg, bool IsHint,
511     const SmallVirtRegSet &FixedRegisters, FeaturesListNormalizer &Largest,
512     size_t Pos) const {
513   // It is only possible to evict virtual register interference.
514   if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) {
515     // leave unavailable
516     return false;
517   }
518 
519   const bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
520   int64_t LocalIntfs = 0;
521   float NrUrgent = 0.0f;
522 
523   // The cascade tracking is the same as in the default advisor
524   unsigned Cascade = RA.getExtraInfo().getCascadeOrCurrentNext(VirtReg.reg());
525 
526   SmallVector<LiveInterval *, MaxInterferences> InterferingIntervals;
527   for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
528     LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
529     // Different from the default heuristic, we don't make any assumptions about
530     // what having more than 10 results in the query may mean.
531     const auto &IFIntervals = Q.interferingVRegs();
532     if (IFIntervals.empty() && InterferingIntervals.empty())
533       continue;
534     InterferingIntervals.append(IFIntervals.begin(), IFIntervals.end());
535     for (LiveInterval *Intf : reverse(IFIntervals)) {
536       assert(Register::isVirtualRegister(Intf->reg()) &&
537              "Only expecting virtual register interference from query");
538       // This is the same set of legality checks as in the default case: don't
539       // try to evict fixed regs or 'done' ones. Also don't break cascades,
540       // except in the urgent case, with the same nuances used in the default
541       // heuristic.
542       // We could try sharing this between the advisors, but it may end up
543       // more complex than it is right now.
544       if (FixedRegisters.count(Intf->reg()))
545         return false;
546       if (RA.getExtraInfo().getStage(*Intf) == RS_Done)
547         return false;
548       bool Urgent =
549           !VirtReg.isSpillable() &&
550           (Intf->isSpillable() ||
551            RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg())) <
552                RegClassInfo.getNumAllocatableRegs(
553                    MRI->getRegClass(Intf->reg())));
554       // Only evict older cascades or live ranges without a cascade.
555       unsigned IntfCascade = RA.getExtraInfo().getCascade(Intf->reg());
556       if (Cascade <= IntfCascade) {
557         if (!Urgent)
558           return false;
559         ++NrUrgent;
560       }
561 
562       LocalIntfs += (IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
563                      (!EnableLocalReassign || !canReassign(*Intf, PhysReg)));
564     }
565   }
566   // OK, so if we made it this far, this LR is an eviction candidate, load its
567   // features.
568   extractFeatures(InterferingIntervals, Largest, Pos, IsHint, LocalIntfs,
569                   NrUrgent);
570   return true;
571 }
572 
573 MCRegister MLEvictAdvisor::tryFindEvictionCandidate(
574     LiveInterval &VirtReg, const AllocationOrder &Order,
575     uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
576   auto MaybeOrderLimit = getOrderLimit(VirtReg, Order, CostPerUseLimit);
577   if (!MaybeOrderLimit)
578     return MCRegister::NoRegister;
579   unsigned OrderLimit = *MaybeOrderLimit;
580 
581   // The heuristic sets initial costs such as, if CostPerUseLimit is
582   // max<uint8_t>, then any of the costs of the legally-evictable intervals
583   // would be lower. When that happens, one of those will be selected.
584   // Therefore, we allow the candidate be selected, unless the candidate is
585   // unspillable, in which case it would be incorrect to not find a register for
586   // it.
587   const bool MustFindEviction =
588       (!VirtReg.isSpillable() && CostPerUseLimit == static_cast<uint8_t>(~0u));
589   // Number of available candidates - if 0, no need to continue.
590   size_t Available = 0;
591   // Make sure we don't have leftover partial state from an attempt where we had
592   // no available candidates and bailed out early.
593   resetInputs(*Runner);
594 
595   // Track the index->register mapping because AllocationOrder doesn't do that
596   // and we'd have to scan it.
597   // Also track their mask, to write asserts/debug.
598   CandidateRegList Regs;
599   Regs.fill({0, false});
600 
601   // Track the largest value of features seen during this eviction session. We
602   // only normalize (some of) the float features, but it's just simpler to
603   // dimension 'Largest' to all the features, especially since we have the
604   // 'DoNotNormalize' list.
605   FeaturesListNormalizer Largest;
606   Largest.fill(0.0);
607 
608   // Same overal idea as in the default eviction policy - we visit the values of
609   // AllocationOrder one at a time. If it's not legally available, we mask off
610   // the corresponding feature column (==do nothing because we already reset all
611   // the features to 0)
612   // Use Pos to capture the column we load features at - in AllocationOrder
613   // order.
614   size_t Pos = 0;
615   for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit); I != E;
616        ++I, ++Pos) {
617     MCRegister PhysReg = *I;
618     Regs[Pos] = std::make_pair(PhysReg, true);
619     assert(PhysReg);
620     if (!canAllocatePhysReg(CostPerUseLimit, PhysReg)) {
621       Regs[Pos].second = false;
622       continue;
623     }
624     if (loadInterferenceFeatures(VirtReg, PhysReg, I.isHint(), FixedRegisters,
625                                  Largest, Pos)) {
626       ++Available;
627       Regs[Pos].second = true;
628     }
629   }
630   if (Available == 0) {
631     // Nothing to decide, nothing to learn.
632     assert(!MustFindEviction);
633     return MCRegister::NoRegister;
634   }
635   // If we must find eviction, the candidate should be masked out of the
636   // decision making process.
637   Regs[CandidateVirtRegPos].second = !MustFindEviction;
638   if (!MustFindEviction)
639     extractFeatures(SmallVector<LiveInterval *, 1>(1, &VirtReg), Largest,
640                     CandidateVirtRegPos, /*IsHint*/ 0, /*LocalIntfsCount*/ 0,
641                     /*NrUrgent*/ 0.0);
642   assert(InitialQSize > 0.0 && "We couldn't have gotten here if we had "
643                                "nothing to allocate initially.");
644   // Normalize the features.
645   for (auto &V : Largest)
646     V = V ? V : 1.0;
647   for (size_t FeatureIndex = 0; FeatureIndex < FeatureIDs::FeatureCount;
648        ++FeatureIndex) {
649     if (DoNotNormalize.test(FeatureIndex))
650       continue;
651     for (size_t Pos = 0; Pos < NumberOfInterferences; ++Pos) {
652       Runner->getTensor<float>(FeatureIndex)[Pos] /= Largest[FeatureIndex];
653     }
654   }
655   *Runner->getTensor<float>(FeatureIDs::progress) =
656       static_cast<float>(RA.getQueueSize()) / InitialQSize;
657 
658   // Get a decision.
659   size_t CandidatePos = tryFindEvictionCandidatePosition(
660       VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
661   // The contract with the ML side is that CandidatePos is mask == 1 (i.e.
662   // Regs[CandidatePos].second)
663   assert(Regs[CandidatePos].second);
664   if (CandidatePos == CandidateVirtRegPos) {
665     assert(!MustFindEviction);
666     return MCRegister::NoRegister;
667   }
668   return Regs[CandidatePos].first;
669 }
670 
671 // Overall, this currently mimics what we do for weight calculation, but instead
672 // of accummulating the various features, we keep them separate.
673 void MLEvictAdvisor::extractFeatures(
674     const SmallVectorImpl<LiveInterval *> &Intervals,
675     std::array<float, FeatureIDs::FeatureCount> &Largest, size_t Pos,
676     int64_t IsHint, int64_t LocalIntfsCount, float NrUrgent) const {
677   int64_t NrDefsAndUses = 0;
678   int64_t NrBrokenHints = 0;
679   float R = 0;
680   float W = 0;
681   float RW = 0;
682   float IndVarUpdates = 0;
683   float HintWeights = 0.0;
684   float StartBBFreq = 0.0;
685   float EndBBFreq = 0.0;
686   float HottestBlockFreq = 0.0;
687   int32_t NrRematerializable = 0;
688   float TotalWeight = 0.0;
689 
690   SlotIndex EndSI = LIS->getSlotIndexes()->getZeroIndex();
691   SlotIndex StartSI = LIS->getSlotIndexes()->getLastIndex();
692   int64_t MaxStage = 0;
693   int64_t MinStage =
694       Intervals.empty() ? 0 : std::numeric_limits<int64_t>::max();
695 
696   for (const auto *L : Intervals) {
697     const LiveInterval &LI = *L;
698     MaxStage = std::max<int64_t>(
699         MaxStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
700     MinStage = std::min<int64_t>(
701         MinStage, static_cast<int64_t>(RA.getExtraInfo().getStage(LI)));
702 
703     TotalWeight = std::max(TotalWeight, LI.weight());
704 
705     if (LI.beginIndex() < StartSI)
706       StartSI = LI.beginIndex();
707 
708     if (LI.endIndex() > EndSI)
709       EndSI = LI.endIndex();
710 
711     SmallPtrSet<MachineInstr *, 8> Visited;
712     const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
713     NrBrokenHints += VRM->hasPreferredPhys(LI.reg());
714 
715     for (MachineRegisterInfo::reg_instr_nodbg_iterator
716              I = MRI->reg_instr_nodbg_begin(LI.reg()),
717              E = MRI->reg_instr_nodbg_end();
718          I != E;) {
719       MachineInstr *MI = &*(I++);
720 
721       ++NrDefsAndUses;
722       if (!Visited.insert(MI).second)
723         continue;
724 
725       if (MI->isIdentityCopy() || MI->isImplicitDef())
726         continue;
727 
728       bool Reads, Writes;
729       std::tie(Reads, Writes) = MI->readsWritesVirtualRegister(LI.reg());
730 
731       float Freq = MBFI.getBlockFreqRelativeToEntryBlock(MI->getParent());
732       if (Freq > HottestBlockFreq)
733         HottestBlockFreq = Freq;
734       R += (Reads && !Writes) * Freq;
735       W += (!Reads && Writes) * Freq;
736       RW += (Reads && Writes) * Freq;
737 
738       auto *MBB = MI->getParent();
739       auto *Loop = Loops.getLoopFor(MBB);
740       bool IsExiting = Loop ? Loop->isLoopExiting(MBB) : false;
741 
742       if (Writes && IsExiting && LIS->isLiveOutOfMBB(LI, MBB))
743         IndVarUpdates += Freq;
744 
745       if (MI->isCopy() && VirtRegAuxInfo::copyHint(MI, LI.reg(), TRI, *MRI))
746         HintWeights += Freq;
747     }
748     NrRematerializable += VirtRegAuxInfo::isRematerializable(
749         LI, *LIS, *VRM, *MF.getSubtarget().getInstrInfo());
750   }
751   size_t Size = 0;
752   if (!Intervals.empty()) {
753     StartBBFreq =
754         MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(StartSI));
755     if (EndSI >= LIS->getSlotIndexes()->getLastIndex())
756       EndSI = LIS->getSlotIndexes()->getLastIndex().getPrevIndex();
757     EndBBFreq =
758         MBFI.getBlockFreqRelativeToEntryBlock(LIS->getMBBFromIndex(EndSI));
759     Size = StartSI.distance(EndSI);
760   }
761   // Set the features at the column 'Pos'.
762 #define SET(ID, TYPE, VAL)                                                     \
763   do {                                                                         \
764     Runner->getTensor<TYPE>(FeatureIDs::ID)[Pos] = static_cast<TYPE>(VAL);     \
765     if (!DoNotNormalize.test(FeatureIDs::ID))                                  \
766       Largest[FeatureIDs::ID] =                                                \
767           std::max(Largest[FeatureIDs::ID], static_cast<float>(VAL));          \
768   } while (false)
769   SET(mask, int64_t, 1);
770   SET(is_free, int64_t, Intervals.empty());
771   SET(nr_urgent, float, NrUrgent);
772   SET(nr_broken_hints, float, NrBrokenHints);
773   SET(is_hint, int64_t, IsHint);
774   SET(is_local, int64_t, LocalIntfsCount);
775   SET(nr_rematerializable, float, NrRematerializable);
776   SET(nr_defs_and_uses, float, NrDefsAndUses);
777   SET(weighed_reads_by_max, float, R);
778   SET(weighed_writes_by_max, float, W);
779   SET(weighed_read_writes_by_max, float, RW);
780   SET(weighed_indvars_by_max, float, IndVarUpdates);
781   SET(hint_weights_by_max, float, HintWeights);
782   SET(start_bb_freq_by_max, float, StartBBFreq);
783   SET(end_bb_freq_by_max, float, EndBBFreq);
784   SET(hottest_bb_freq_by_max, float, HottestBlockFreq);
785   SET(liverange_size, float, Size);
786   SET(use_def_density, float, TotalWeight);
787   SET(max_stage, int64_t, MaxStage);
788   SET(min_stage, int64_t, MinStage);
789 #undef SET
790 }
791 
792 // Development mode-specific implementations
793 #ifdef LLVM_HAVE_TF_API
794 RegAllocEvictionAdvisorAnalysis *llvm::createDevelopmentModeAdvisor() {
795   return new DevelopmentModeEvictionAdvisorAnalysis();
796 }
797 
798 int64_t DevelopmentModeEvictAdvisor::tryFindEvictionCandidatePosition(
799     LiveInterval &VirtReg, const AllocationOrder &Order, unsigned OrderLimit,
800     uint8_t CostPerUseLimit, const SmallVirtRegSet &FixedRegisters) const {
801   int64_t Ret = 0;
802   if (isa<ModelUnderTrainingRunner>(getRunner())) {
803     Ret = MLEvictAdvisor::tryFindEvictionCandidatePosition(
804         VirtReg, Order, OrderLimit, CostPerUseLimit, FixedRegisters);
805   } else {
806     MCRegister PhysReg = getDefaultAdvisor().tryFindEvictionCandidate(
807         VirtReg, Order, CostPerUseLimit, FixedRegisters);
808     // Find the index of the selected PhysReg. We need it for logging, otherwise
809     // this is wasted cycles (but so would starting development mode without a
810     // model nor logging)
811     if (!PhysReg)
812       Ret = CandidateVirtRegPos;
813     else
814       for (auto I = Order.begin(), E = Order.getOrderLimitEnd(OrderLimit);
815            I != E; ++I, ++Ret)
816         if (*I == PhysReg)
817           break;
818   }
819   if (TrainingLog.empty())
820     return Ret;
821   size_t CurrentFeature = 0;
822   for (; CurrentFeature < FeatureIDs::FeatureCount; ++CurrentFeature) {
823     Log->logSpecifiedTensorValue(
824         CurrentFeature, reinterpret_cast<const char *>(
825                             getRunner().getTensorUntyped(CurrentFeature)));
826   }
827   if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner()))
828     for (size_t I = 1; I < MUTR->outputLoggedFeatureSpecs().size();
829          ++I, ++CurrentFeature)
830       Log->logSpecifiedTensorValue(
831           CurrentFeature,
832           reinterpret_cast<const char *>(
833               MUTR->lastEvaluationResult()->getUntypedTensorValue(I)));
834   // The output is right after the features and the extra outputs
835   Log->logInt64Value(CurrentFeature, &Ret);
836   return Ret;
837 }
838 
839 bool RegAllocScoring::runOnMachineFunction(MachineFunction &MF) {
840   if (auto *DevModeAnalysis = dyn_cast<DevelopmentModeEvictionAdvisorAnalysis>(
841           &getAnalysis<RegAllocEvictionAdvisorAnalysis>()))
842     if (auto *Log = DevModeAnalysis->getLogger(MF))
843       Log->logFloatFinalReward(static_cast<float>(
844           calculateRegAllocScore(
845               MF, getAnalysis<MachineBlockFrequencyInfo>(),
846               getAnalysis<AAResultsWrapperPass>().getAAResults())
847               .getScore()));
848 
849   return false;
850 }
851 #endif // #ifdef LLVM_HAVE_TF_API
852 
853 #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL)
854 RegAllocEvictionAdvisorAnalysis *llvm::createReleaseModeAdvisor() {
855   return new ReleaseModeEvictionAdvisorAnalysis();
856 }
857 #endif
858 
859 // In all cases except development mode, we don't need scoring.
860 #if !defined(LLVM_HAVE_TF_API)
861 bool RegAllocScoring::runOnMachineFunction(MachineFunction &) { return false; }
862 #endif
863