1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AllocationOrder.h" 16 #include "InterferenceCache.h" 17 #include "LiveDebugVariables.h" 18 #include "RegAllocBase.h" 19 #include "SpillPlacement.h" 20 #include "Spiller.h" 21 #include "SplitKit.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/IndexedMap.h" 26 #include "llvm/ADT/MapVector.h" 27 #include "llvm/ADT/SetVector.h" 28 #include "llvm/ADT/SmallPtrSet.h" 29 #include "llvm/ADT/SmallSet.h" 30 #include "llvm/ADT/SmallVector.h" 31 #include "llvm/ADT/Statistic.h" 32 #include "llvm/ADT/StringRef.h" 33 #include "llvm/Analysis/AliasAnalysis.h" 34 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 35 #include "llvm/CodeGen/CalcSpillWeights.h" 36 #include "llvm/CodeGen/EdgeBundles.h" 37 #include "llvm/CodeGen/LiveInterval.h" 38 #include "llvm/CodeGen/LiveIntervalUnion.h" 39 #include "llvm/CodeGen/LiveIntervals.h" 40 #include "llvm/CodeGen/LiveRangeEdit.h" 41 #include "llvm/CodeGen/LiveRegMatrix.h" 42 #include "llvm/CodeGen/LiveStacks.h" 43 #include "llvm/CodeGen/MachineBasicBlock.h" 44 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 45 #include "llvm/CodeGen/MachineDominators.h" 46 #include "llvm/CodeGen/MachineFrameInfo.h" 47 #include "llvm/CodeGen/MachineFunction.h" 48 #include "llvm/CodeGen/MachineFunctionPass.h" 49 #include "llvm/CodeGen/MachineInstr.h" 50 #include "llvm/CodeGen/MachineLoopInfo.h" 51 #include "llvm/CodeGen/MachineOperand.h" 52 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 53 #include "llvm/CodeGen/MachineRegisterInfo.h" 54 #include "llvm/CodeGen/RegAllocRegistry.h" 55 #include "llvm/CodeGen/RegisterClassInfo.h" 56 #include "llvm/CodeGen/SlotIndexes.h" 57 #include "llvm/CodeGen/TargetInstrInfo.h" 58 #include "llvm/CodeGen/TargetRegisterInfo.h" 59 #include "llvm/CodeGen/TargetSubtargetInfo.h" 60 #include "llvm/CodeGen/VirtRegMap.h" 61 #include "llvm/IR/Function.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/MC/MCRegisterInfo.h" 64 #include "llvm/Pass.h" 65 #include "llvm/Support/BlockFrequency.h" 66 #include "llvm/Support/BranchProbability.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/MathExtras.h" 70 #include "llvm/Support/Timer.h" 71 #include "llvm/Support/raw_ostream.h" 72 #include "llvm/Target/TargetMachine.h" 73 #include <algorithm> 74 #include <cassert> 75 #include <cstdint> 76 #include <memory> 77 #include <queue> 78 #include <tuple> 79 #include <utility> 80 81 using namespace llvm; 82 83 #define DEBUG_TYPE "regalloc" 84 85 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 86 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 87 STATISTIC(NumEvicted, "Number of interferences evicted"); 88 89 static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode( 90 "split-spill-mode", cl::Hidden, 91 cl::desc("Spill mode for splitting live ranges"), 92 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 93 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 94 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), 95 cl::init(SplitEditor::SM_Speed)); 96 97 static cl::opt<unsigned> 98 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, 99 cl::desc("Last chance recoloring max depth"), 100 cl::init(5)); 101 102 static cl::opt<unsigned> LastChanceRecoloringMaxInterference( 103 "lcr-max-interf", cl::Hidden, 104 cl::desc("Last chance recoloring maximum number of considered" 105 " interference at a time"), 106 cl::init(8)); 107 108 static cl::opt<bool> ExhaustiveSearch( 109 "exhaustive-register-search", cl::NotHidden, 110 cl::desc("Exhaustive Search for registers bypassing the depth " 111 "and interference cutoffs of last chance recoloring"), 112 cl::Hidden); 113 114 static cl::opt<bool> EnableLocalReassignment( 115 "enable-local-reassign", cl::Hidden, 116 cl::desc("Local reassignment can yield better allocation decisions, but " 117 "may be compile time intensive"), 118 cl::init(false)); 119 120 static cl::opt<bool> EnableDeferredSpilling( 121 "enable-deferred-spilling", cl::Hidden, 122 cl::desc("Instead of spilling a variable right away, defer the actual " 123 "code insertion to the end of the allocation. That way the " 124 "allocator might still find a suitable coloring for this " 125 "variable because of other evicted variables."), 126 cl::init(false)); 127 128 static cl::opt<unsigned> 129 HugeSizeForSplit("huge-size-for-split", cl::Hidden, 130 cl::desc("A threshold of live range size which may cause " 131 "high compile time cost in global splitting."), 132 cl::init(5000)); 133 134 // FIXME: Find a good default for this flag and remove the flag. 135 static cl::opt<unsigned> 136 CSRFirstTimeCost("regalloc-csr-first-time-cost", 137 cl::desc("Cost for first time use of callee-saved register."), 138 cl::init(0), cl::Hidden); 139 140 static cl::opt<bool> ConsiderLocalIntervalCost( 141 "condsider-local-interval-cost", cl::Hidden, 142 cl::desc("Consider the cost of local intervals created by a split " 143 "candidate when choosing the best split candidate."), 144 cl::init(false)); 145 146 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 147 createGreedyRegisterAllocator); 148 149 namespace { 150 151 class RAGreedy : public MachineFunctionPass, 152 public RegAllocBase, 153 private LiveRangeEdit::Delegate { 154 // Convenient shortcuts. 155 using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>; 156 using SmallLISet = SmallPtrSet<LiveInterval *, 4>; 157 using SmallVirtRegSet = SmallSet<unsigned, 16>; 158 159 // context 160 MachineFunction *MF; 161 162 // Shortcuts to some useful interface. 163 const TargetInstrInfo *TII; 164 const TargetRegisterInfo *TRI; 165 RegisterClassInfo RCI; 166 167 // analyses 168 SlotIndexes *Indexes; 169 MachineBlockFrequencyInfo *MBFI; 170 MachineDominatorTree *DomTree; 171 MachineLoopInfo *Loops; 172 MachineOptimizationRemarkEmitter *ORE; 173 EdgeBundles *Bundles; 174 SpillPlacement *SpillPlacer; 175 LiveDebugVariables *DebugVars; 176 AliasAnalysis *AA; 177 178 // state 179 std::unique_ptr<Spiller> SpillerInstance; 180 PQueue Queue; 181 unsigned NextCascade; 182 183 // Live ranges pass through a number of stages as we try to allocate them. 184 // Some of the stages may also create new live ranges: 185 // 186 // - Region splitting. 187 // - Per-block splitting. 188 // - Local splitting. 189 // - Spilling. 190 // 191 // Ranges produced by one of the stages skip the previous stages when they are 192 // dequeued. This improves performance because we can skip interference checks 193 // that are unlikely to give any results. It also guarantees that the live 194 // range splitting algorithm terminates, something that is otherwise hard to 195 // ensure. 196 enum LiveRangeStage { 197 /// Newly created live range that has never been queued. 198 RS_New, 199 200 /// Only attempt assignment and eviction. Then requeue as RS_Split. 201 RS_Assign, 202 203 /// Attempt live range splitting if assignment is impossible. 204 RS_Split, 205 206 /// Attempt more aggressive live range splitting that is guaranteed to make 207 /// progress. This is used for split products that may not be making 208 /// progress. 209 RS_Split2, 210 211 /// Live range will be spilled. No more splitting will be attempted. 212 RS_Spill, 213 214 215 /// Live range is in memory. Because of other evictions, it might get moved 216 /// in a register in the end. 217 RS_Memory, 218 219 /// There is nothing more we can do to this live range. Abort compilation 220 /// if it can't be assigned. 221 RS_Done 222 }; 223 224 // Enum CutOffStage to keep a track whether the register allocation failed 225 // because of the cutoffs encountered in last chance recoloring. 226 // Note: This is used as bitmask. New value should be next power of 2. 227 enum CutOffStage { 228 // No cutoffs encountered 229 CO_None = 0, 230 231 // lcr-max-depth cutoff encountered 232 CO_Depth = 1, 233 234 // lcr-max-interf cutoff encountered 235 CO_Interf = 2 236 }; 237 238 uint8_t CutOffInfo; 239 240 #ifndef NDEBUG 241 static const char *const StageName[]; 242 #endif 243 244 // RegInfo - Keep additional information about each live range. 245 struct RegInfo { 246 LiveRangeStage Stage = RS_New; 247 248 // Cascade - Eviction loop prevention. See canEvictInterference(). 249 unsigned Cascade = 0; 250 251 RegInfo() = default; 252 }; 253 254 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 255 256 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 257 return ExtraRegInfo[VirtReg.reg].Stage; 258 } 259 260 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 261 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 262 ExtraRegInfo[VirtReg.reg].Stage = Stage; 263 } 264 265 template<typename Iterator> 266 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 267 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 268 for (;Begin != End; ++Begin) { 269 unsigned Reg = *Begin; 270 if (ExtraRegInfo[Reg].Stage == RS_New) 271 ExtraRegInfo[Reg].Stage = NewStage; 272 } 273 } 274 275 /// Cost of evicting interference. 276 struct EvictionCost { 277 unsigned BrokenHints = 0; ///< Total number of broken hints. 278 float MaxWeight = 0; ///< Maximum spill weight evicted. 279 280 EvictionCost() = default; 281 282 bool isMax() const { return BrokenHints == ~0u; } 283 284 void setMax() { BrokenHints = ~0u; } 285 286 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } 287 288 bool operator<(const EvictionCost &O) const { 289 return std::tie(BrokenHints, MaxWeight) < 290 std::tie(O.BrokenHints, O.MaxWeight); 291 } 292 }; 293 294 /// EvictionTrack - Keeps track of past evictions in order to optimize region 295 /// split decision. 296 class EvictionTrack { 297 298 public: 299 using EvictorInfo = 300 std::pair<unsigned /* evictor */, unsigned /* physreg */>; 301 using EvicteeInfo = llvm::DenseMap<unsigned /* evictee */, EvictorInfo>; 302 303 private: 304 /// Each Vreg that has been evicted in the last stage of selectOrSplit will 305 /// be mapped to the evictor Vreg and the PhysReg it was evicted from. 306 EvicteeInfo Evictees; 307 308 public: 309 /// Clear all eviction information. 310 void clear() { Evictees.clear(); } 311 312 /// Clear eviction information for the given evictee Vreg. 313 /// E.g. when Vreg get's a new allocation, the old eviction info is no 314 /// longer relevant. 315 /// \param Evictee The evictee Vreg for whom we want to clear collected 316 /// eviction info. 317 void clearEvicteeInfo(unsigned Evictee) { Evictees.erase(Evictee); } 318 319 /// Track new eviction. 320 /// The Evictor vreg has evicted the Evictee vreg from Physreg. 321 /// \param PhysReg The phisical register Evictee was evicted from. 322 /// \param Evictor The evictor Vreg that evicted Evictee. 323 /// \param Evictee The evictee Vreg. 324 void addEviction(unsigned PhysReg, unsigned Evictor, unsigned Evictee) { 325 Evictees[Evictee].first = Evictor; 326 Evictees[Evictee].second = PhysReg; 327 } 328 329 /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg. 330 /// \param Evictee The evictee vreg. 331 /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if 332 /// nobody has evicted Evictee from PhysReg. 333 EvictorInfo getEvictor(unsigned Evictee) { 334 if (Evictees.count(Evictee)) { 335 return Evictees[Evictee]; 336 } 337 338 return EvictorInfo(0, 0); 339 } 340 }; 341 342 // Keeps track of past evictions in order to optimize region split decision. 343 EvictionTrack LastEvicted; 344 345 // splitting state. 346 std::unique_ptr<SplitAnalysis> SA; 347 std::unique_ptr<SplitEditor> SE; 348 349 /// Cached per-block interference maps 350 InterferenceCache IntfCache; 351 352 /// All basic blocks where the current register has uses. 353 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 354 355 /// Global live range splitting candidate info. 356 struct GlobalSplitCandidate { 357 // Register intended for assignment, or 0. 358 unsigned PhysReg; 359 360 // SplitKit interval index for this candidate. 361 unsigned IntvIdx; 362 363 // Interference for PhysReg. 364 InterferenceCache::Cursor Intf; 365 366 // Bundles where this candidate should be live. 367 BitVector LiveBundles; 368 SmallVector<unsigned, 8> ActiveBlocks; 369 370 void reset(InterferenceCache &Cache, unsigned Reg) { 371 PhysReg = Reg; 372 IntvIdx = 0; 373 Intf.setPhysReg(Cache, Reg); 374 LiveBundles.clear(); 375 ActiveBlocks.clear(); 376 } 377 378 // Set B[i] = C for every live bundle where B[i] was NoCand. 379 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 380 unsigned Count = 0; 381 for (unsigned i : LiveBundles.set_bits()) 382 if (B[i] == NoCand) { 383 B[i] = C; 384 Count++; 385 } 386 return Count; 387 } 388 }; 389 390 /// Candidate info for each PhysReg in AllocationOrder. 391 /// This vector never shrinks, but grows to the size of the largest register 392 /// class. 393 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 394 395 enum : unsigned { NoCand = ~0u }; 396 397 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 398 /// NoCand which indicates the stack interval. 399 SmallVector<unsigned, 32> BundleCand; 400 401 /// Callee-save register cost, calculated once per machine function. 402 BlockFrequency CSRCost; 403 404 /// Run or not the local reassignment heuristic. This information is 405 /// obtained from the TargetSubtargetInfo. 406 bool EnableLocalReassign; 407 408 /// Enable or not the consideration of the cost of local intervals created 409 /// by a split candidate when choosing the best split candidate. 410 bool EnableAdvancedRASplitCost; 411 412 /// Set of broken hints that may be reconciled later because of eviction. 413 SmallSetVector<LiveInterval *, 8> SetOfBrokenHints; 414 415 public: 416 RAGreedy(); 417 418 /// Return the pass name. 419 StringRef getPassName() const override { return "Greedy Register Allocator"; } 420 421 /// RAGreedy analysis usage. 422 void getAnalysisUsage(AnalysisUsage &AU) const override; 423 void releaseMemory() override; 424 Spiller &spiller() override { return *SpillerInstance; } 425 void enqueue(LiveInterval *LI) override; 426 LiveInterval *dequeue() override; 427 unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override; 428 void aboutToRemoveInterval(LiveInterval &) override; 429 430 /// Perform register allocation. 431 bool runOnMachineFunction(MachineFunction &mf) override; 432 433 MachineFunctionProperties getRequiredProperties() const override { 434 return MachineFunctionProperties().set( 435 MachineFunctionProperties::Property::NoPHIs); 436 } 437 438 static char ID; 439 440 private: 441 unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &, 442 SmallVirtRegSet &, unsigned = 0); 443 444 bool LRE_CanEraseVirtReg(unsigned) override; 445 void LRE_WillShrinkVirtReg(unsigned) override; 446 void LRE_DidCloneVirtReg(unsigned, unsigned) override; 447 void enqueue(PQueue &CurQueue, LiveInterval *LI); 448 LiveInterval *dequeue(PQueue &CurQueue); 449 450 BlockFrequency calcSpillCost(); 451 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); 452 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 453 void growRegion(GlobalSplitCandidate &Cand); 454 bool splitCanCauseEvictionChain(unsigned Evictee, GlobalSplitCandidate &Cand, 455 unsigned BBNumber, 456 const AllocationOrder &Order); 457 bool splitCanCauseLocalSpill(unsigned VirtRegToSplit, 458 GlobalSplitCandidate &Cand, unsigned BBNumber, 459 const AllocationOrder &Order); 460 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate &, 461 const AllocationOrder &Order, 462 bool *CanCauseEvictionChain); 463 bool calcCompactRegion(GlobalSplitCandidate&); 464 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 465 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 466 unsigned canReassign(LiveInterval &VirtReg, unsigned PrevReg); 467 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 468 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 469 bool canEvictInterferenceInRange(LiveInterval &VirtReg, unsigned PhysReg, 470 SlotIndex Start, SlotIndex End, 471 EvictionCost &MaxCost); 472 unsigned getCheapestEvicteeWeight(const AllocationOrder &Order, 473 LiveInterval &VirtReg, SlotIndex Start, 474 SlotIndex End, float *BestEvictWeight); 475 void evictInterference(LiveInterval&, unsigned, 476 SmallVectorImpl<unsigned>&); 477 bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg, 478 SmallLISet &RecoloringCandidates, 479 const SmallVirtRegSet &FixedRegisters); 480 481 unsigned tryAssign(LiveInterval&, AllocationOrder&, 482 SmallVectorImpl<unsigned>&); 483 unsigned tryEvict(LiveInterval&, AllocationOrder&, 484 SmallVectorImpl<unsigned>&, unsigned = ~0u); 485 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 486 SmallVectorImpl<unsigned>&); 487 unsigned isSplitBenefitWorthCost(LiveInterval &VirtReg); 488 /// Calculate cost of region splitting. 489 unsigned calculateRegionSplitCost(LiveInterval &VirtReg, 490 AllocationOrder &Order, 491 BlockFrequency &BestCost, 492 unsigned &NumCands, bool IgnoreCSR, 493 bool *CanCauseEvictionChain = nullptr); 494 /// Perform region splitting. 495 unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, 496 bool HasCompact, 497 SmallVectorImpl<unsigned> &NewVRegs); 498 /// Check other options before using a callee-saved register for the first 499 /// time. 500 unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order, 501 unsigned PhysReg, unsigned &CostPerUseLimit, 502 SmallVectorImpl<unsigned> &NewVRegs); 503 void initializeCSRCost(); 504 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 505 SmallVectorImpl<unsigned>&); 506 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 507 SmallVectorImpl<unsigned>&); 508 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 509 SmallVectorImpl<unsigned>&); 510 unsigned trySplit(LiveInterval&, AllocationOrder&, 511 SmallVectorImpl<unsigned>&); 512 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &, 513 SmallVectorImpl<unsigned> &, 514 SmallVirtRegSet &, unsigned); 515 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &, 516 SmallVirtRegSet &, unsigned); 517 void tryHintRecoloring(LiveInterval &); 518 void tryHintsRecoloring(); 519 520 /// Model the information carried by one end of a copy. 521 struct HintInfo { 522 /// The frequency of the copy. 523 BlockFrequency Freq; 524 /// The virtual register or physical register. 525 unsigned Reg; 526 /// Its currently assigned register. 527 /// In case of a physical register Reg == PhysReg. 528 unsigned PhysReg; 529 530 HintInfo(BlockFrequency Freq, unsigned Reg, unsigned PhysReg) 531 : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {} 532 }; 533 using HintsInfo = SmallVector<HintInfo, 4>; 534 535 BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned); 536 void collectHintInfo(unsigned, HintsInfo &); 537 538 bool isUnusedCalleeSavedReg(unsigned PhysReg) const; 539 540 /// Compute and report the number of spills and reloads for a loop. 541 void reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads, 542 unsigned &FoldedReloads, unsigned &Spills, 543 unsigned &FoldedSpills); 544 545 /// Report the number of spills and reloads for each loop. 546 void reportNumberOfSplillsReloads() { 547 for (MachineLoop *L : *Loops) { 548 unsigned Reloads, FoldedReloads, Spills, FoldedSpills; 549 reportNumberOfSplillsReloads(L, Reloads, FoldedReloads, Spills, 550 FoldedSpills); 551 } 552 } 553 }; 554 555 } // end anonymous namespace 556 557 char RAGreedy::ID = 0; 558 char &llvm::RAGreedyID = RAGreedy::ID; 559 560 INITIALIZE_PASS_BEGIN(RAGreedy, "greedy", 561 "Greedy Register Allocator", false, false) 562 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) 563 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 564 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 565 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer) 566 INITIALIZE_PASS_DEPENDENCY(MachineScheduler) 567 INITIALIZE_PASS_DEPENDENCY(LiveStacks) 568 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 569 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 570 INITIALIZE_PASS_DEPENDENCY(VirtRegMap) 571 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix) 572 INITIALIZE_PASS_DEPENDENCY(EdgeBundles) 573 INITIALIZE_PASS_DEPENDENCY(SpillPlacement) 574 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 575 INITIALIZE_PASS_END(RAGreedy, "greedy", 576 "Greedy Register Allocator", false, false) 577 578 #ifndef NDEBUG 579 const char *const RAGreedy::StageName[] = { 580 "RS_New", 581 "RS_Assign", 582 "RS_Split", 583 "RS_Split2", 584 "RS_Spill", 585 "RS_Memory", 586 "RS_Done" 587 }; 588 #endif 589 590 // Hysteresis to use when comparing floats. 591 // This helps stabilize decisions based on float comparisons. 592 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875 593 594 FunctionPass* llvm::createGreedyRegisterAllocator() { 595 return new RAGreedy(); 596 } 597 598 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 599 } 600 601 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 602 AU.setPreservesCFG(); 603 AU.addRequired<MachineBlockFrequencyInfo>(); 604 AU.addPreserved<MachineBlockFrequencyInfo>(); 605 AU.addRequired<AAResultsWrapperPass>(); 606 AU.addPreserved<AAResultsWrapperPass>(); 607 AU.addRequired<LiveIntervals>(); 608 AU.addPreserved<LiveIntervals>(); 609 AU.addRequired<SlotIndexes>(); 610 AU.addPreserved<SlotIndexes>(); 611 AU.addRequired<LiveDebugVariables>(); 612 AU.addPreserved<LiveDebugVariables>(); 613 AU.addRequired<LiveStacks>(); 614 AU.addPreserved<LiveStacks>(); 615 AU.addRequired<MachineDominatorTree>(); 616 AU.addPreserved<MachineDominatorTree>(); 617 AU.addRequired<MachineLoopInfo>(); 618 AU.addPreserved<MachineLoopInfo>(); 619 AU.addRequired<VirtRegMap>(); 620 AU.addPreserved<VirtRegMap>(); 621 AU.addRequired<LiveRegMatrix>(); 622 AU.addPreserved<LiveRegMatrix>(); 623 AU.addRequired<EdgeBundles>(); 624 AU.addRequired<SpillPlacement>(); 625 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 626 MachineFunctionPass::getAnalysisUsage(AU); 627 } 628 629 //===----------------------------------------------------------------------===// 630 // LiveRangeEdit delegate methods 631 //===----------------------------------------------------------------------===// 632 633 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 634 LiveInterval &LI = LIS->getInterval(VirtReg); 635 if (VRM->hasPhys(VirtReg)) { 636 Matrix->unassign(LI); 637 aboutToRemoveInterval(LI); 638 return true; 639 } 640 // Unassigned virtreg is probably in the priority queue. 641 // RegAllocBase will erase it after dequeueing. 642 // Nonetheless, clear the live-range so that the debug 643 // dump will show the right state for that VirtReg. 644 LI.clear(); 645 return false; 646 } 647 648 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 649 if (!VRM->hasPhys(VirtReg)) 650 return; 651 652 // Register is assigned, put it back on the queue for reassignment. 653 LiveInterval &LI = LIS->getInterval(VirtReg); 654 Matrix->unassign(LI); 655 enqueue(&LI); 656 } 657 658 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 659 // Cloning a register we haven't even heard about yet? Just ignore it. 660 if (!ExtraRegInfo.inBounds(Old)) 661 return; 662 663 // LRE may clone a virtual register because dead code elimination causes it to 664 // be split into connected components. The new components are much smaller 665 // than the original, so they should get a new chance at being assigned. 666 // same stage as the parent. 667 ExtraRegInfo[Old].Stage = RS_Assign; 668 ExtraRegInfo.grow(New); 669 ExtraRegInfo[New] = ExtraRegInfo[Old]; 670 } 671 672 void RAGreedy::releaseMemory() { 673 SpillerInstance.reset(); 674 ExtraRegInfo.clear(); 675 GlobalCand.clear(); 676 } 677 678 void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); } 679 680 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) { 681 // Prioritize live ranges by size, assigning larger ranges first. 682 // The queue holds (size, reg) pairs. 683 const unsigned Size = LI->getSize(); 684 const unsigned Reg = LI->reg; 685 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 686 "Can only enqueue virtual registers"); 687 unsigned Prio; 688 689 ExtraRegInfo.grow(Reg); 690 if (ExtraRegInfo[Reg].Stage == RS_New) 691 ExtraRegInfo[Reg].Stage = RS_Assign; 692 693 if (ExtraRegInfo[Reg].Stage == RS_Split) { 694 // Unsplit ranges that couldn't be allocated immediately are deferred until 695 // everything else has been allocated. 696 Prio = Size; 697 } else if (ExtraRegInfo[Reg].Stage == RS_Memory) { 698 // Memory operand should be considered last. 699 // Change the priority such that Memory operand are assigned in 700 // the reverse order that they came in. 701 // TODO: Make this a member variable and probably do something about hints. 702 static unsigned MemOp = 0; 703 Prio = MemOp++; 704 } else { 705 // Giant live ranges fall back to the global assignment heuristic, which 706 // prevents excessive spilling in pathological cases. 707 bool ReverseLocal = TRI->reverseLocalAssignment(); 708 const TargetRegisterClass &RC = *MRI->getRegClass(Reg); 709 bool ForceGlobal = !ReverseLocal && 710 (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs()); 711 712 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() && 713 LIS->intervalIsInOneMBB(*LI)) { 714 // Allocate original local ranges in linear instruction order. Since they 715 // are singly defined, this produces optimal coloring in the absence of 716 // global interference and other constraints. 717 if (!ReverseLocal) 718 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 719 else { 720 // Allocating bottom up may allow many short LRGs to be assigned first 721 // to one of the cheap registers. This could be much faster for very 722 // large blocks on targets with many physical registers. 723 Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex()); 724 } 725 Prio |= RC.AllocationPriority << 24; 726 } else { 727 // Allocate global and split ranges in long->short order. Long ranges that 728 // don't fit should be spilled (or split) ASAP so they don't create 729 // interference. Mark a bit to prioritize global above local ranges. 730 Prio = (1u << 29) + Size; 731 } 732 // Mark a higher bit to prioritize global and local above RS_Split. 733 Prio |= (1u << 31); 734 735 // Boost ranges that have a physical register hint. 736 if (VRM->hasKnownPreference(Reg)) 737 Prio |= (1u << 30); 738 } 739 // The virtual register number is a tie breaker for same-sized ranges. 740 // Give lower vreg numbers higher priority to assign them first. 741 CurQueue.push(std::make_pair(Prio, ~Reg)); 742 } 743 744 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); } 745 746 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) { 747 if (CurQueue.empty()) 748 return nullptr; 749 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second); 750 CurQueue.pop(); 751 return LI; 752 } 753 754 //===----------------------------------------------------------------------===// 755 // Direct Assignment 756 //===----------------------------------------------------------------------===// 757 758 /// tryAssign - Try to assign VirtReg to an available register. 759 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 760 AllocationOrder &Order, 761 SmallVectorImpl<unsigned> &NewVRegs) { 762 Order.rewind(); 763 unsigned PhysReg; 764 while ((PhysReg = Order.next())) 765 if (!Matrix->checkInterference(VirtReg, PhysReg)) 766 break; 767 if (!PhysReg || Order.isHint()) 768 return PhysReg; 769 770 // PhysReg is available, but there may be a better choice. 771 772 // If we missed a simple hint, try to cheaply evict interference from the 773 // preferred register. 774 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 775 if (Order.isHint(Hint)) { 776 LLVM_DEBUG(dbgs() << "missed hint " << printReg(Hint, TRI) << '\n'); 777 EvictionCost MaxCost; 778 MaxCost.setBrokenHints(1); 779 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 780 evictInterference(VirtReg, Hint, NewVRegs); 781 return Hint; 782 } 783 // Record the missed hint, we may be able to recover 784 // at the end if the surrounding allocation changed. 785 SetOfBrokenHints.insert(&VirtReg); 786 } 787 788 // Try to evict interference from a cheaper alternative. 789 unsigned Cost = TRI->getCostPerUse(PhysReg); 790 791 // Most registers have 0 additional cost. 792 if (!Cost) 793 return PhysReg; 794 795 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost " 796 << Cost << '\n'); 797 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 798 return CheapReg ? CheapReg : PhysReg; 799 } 800 801 //===----------------------------------------------------------------------===// 802 // Interference eviction 803 //===----------------------------------------------------------------------===// 804 805 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) { 806 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix); 807 unsigned PhysReg; 808 while ((PhysReg = Order.next())) { 809 if (PhysReg == PrevReg) 810 continue; 811 812 MCRegUnitIterator Units(PhysReg, TRI); 813 for (; Units.isValid(); ++Units) { 814 // Instantiate a "subquery", not to be confused with the Queries array. 815 LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]); 816 if (subQ.checkInterference()) 817 break; 818 } 819 // If no units have interference, break out with the current PhysReg. 820 if (!Units.isValid()) 821 break; 822 } 823 if (PhysReg) 824 LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 825 << printReg(PrevReg, TRI) << " to " 826 << printReg(PhysReg, TRI) << '\n'); 827 return PhysReg; 828 } 829 830 /// shouldEvict - determine if A should evict the assigned live range B. The 831 /// eviction policy defined by this function together with the allocation order 832 /// defined by enqueue() decides which registers ultimately end up being split 833 /// and spilled. 834 /// 835 /// Cascade numbers are used to prevent infinite loops if this function is a 836 /// cyclic relation. 837 /// 838 /// @param A The live range to be assigned. 839 /// @param IsHint True when A is about to be assigned to its preferred 840 /// register. 841 /// @param B The live range to be evicted. 842 /// @param BreaksHint True when B is already assigned to its preferred register. 843 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 844 LiveInterval &B, bool BreaksHint) { 845 bool CanSplit = getStage(B) < RS_Spill; 846 847 // Be fairly aggressive about following hints as long as the evictee can be 848 // split. 849 if (CanSplit && IsHint && !BreaksHint) 850 return true; 851 852 if (A.weight > B.weight) { 853 LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n'); 854 return true; 855 } 856 return false; 857 } 858 859 /// canEvictInterference - Return true if all interferences between VirtReg and 860 /// PhysReg can be evicted. 861 /// 862 /// @param VirtReg Live range that is about to be assigned. 863 /// @param PhysReg Desired register for assignment. 864 /// @param IsHint True when PhysReg is VirtReg's preferred register. 865 /// @param MaxCost Only look for cheaper candidates and update with new cost 866 /// when returning true. 867 /// @returns True when interference can be evicted cheaper than MaxCost. 868 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 869 bool IsHint, EvictionCost &MaxCost) { 870 // It is only possible to evict virtual register interference. 871 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 872 return false; 873 874 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); 875 876 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 877 // involved in an eviction before. If a cascade number was assigned, deny 878 // evicting anything with the same or a newer cascade number. This prevents 879 // infinite eviction loops. 880 // 881 // This works out so a register without a cascade number is allowed to evict 882 // anything, and it can be evicted by anything. 883 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 884 if (!Cascade) 885 Cascade = NextCascade; 886 887 EvictionCost Cost; 888 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 889 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 890 // If there is 10 or more interferences, chances are one is heavier. 891 if (Q.collectInterferingVRegs(10) >= 10) 892 return false; 893 894 // Check if any interfering live range is heavier than MaxWeight. 895 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 896 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 897 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 898 "Only expecting virtual register interference from query"); 899 // Never evict spill products. They cannot split or spill. 900 if (getStage(*Intf) == RS_Done) 901 return false; 902 // Once a live range becomes small enough, it is urgent that we find a 903 // register for it. This is indicated by an infinite spill weight. These 904 // urgent live ranges get to evict almost anything. 905 // 906 // Also allow urgent evictions of unspillable ranges from a strictly 907 // larger allocation order. 908 bool Urgent = !VirtReg.isSpillable() && 909 (Intf->isSpillable() || 910 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 911 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 912 // Only evict older cascades or live ranges without a cascade. 913 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 914 if (Cascade <= IntfCascade) { 915 if (!Urgent) 916 return false; 917 // We permit breaking cascades for urgent evictions. It should be the 918 // last resort, though, so make it really expensive. 919 Cost.BrokenHints += 10; 920 } 921 // Would this break a satisfied hint? 922 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 923 // Update eviction cost. 924 Cost.BrokenHints += BreaksHint; 925 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 926 // Abort if this would be too expensive. 927 if (!(Cost < MaxCost)) 928 return false; 929 if (Urgent) 930 continue; 931 // Apply the eviction policy for non-urgent evictions. 932 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 933 return false; 934 // If !MaxCost.isMax(), then we're just looking for a cheap register. 935 // Evicting another local live range in this case could lead to suboptimal 936 // coloring. 937 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && 938 (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) { 939 return false; 940 } 941 } 942 } 943 MaxCost = Cost; 944 return true; 945 } 946 947 /// Return true if all interferences between VirtReg and PhysReg between 948 /// Start and End can be evicted. 949 /// 950 /// \param VirtReg Live range that is about to be assigned. 951 /// \param PhysReg Desired register for assignment. 952 /// \param Start Start of range to look for interferences. 953 /// \param End End of range to look for interferences. 954 /// \param MaxCost Only look for cheaper candidates and update with new cost 955 /// when returning true. 956 /// \return True when interference can be evicted cheaper than MaxCost. 957 bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg, 958 unsigned PhysReg, SlotIndex Start, 959 SlotIndex End, 960 EvictionCost &MaxCost) { 961 EvictionCost Cost; 962 963 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 964 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 965 966 // Check if any interfering live range is heavier than MaxWeight. 967 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 968 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 969 970 // Check if interference overlast the segment in interest. 971 if (!Intf->overlaps(Start, End)) 972 continue; 973 974 // Cannot evict non virtual reg interference. 975 if (!TargetRegisterInfo::isVirtualRegister(Intf->reg)) 976 return false; 977 // Never evict spill products. They cannot split or spill. 978 if (getStage(*Intf) == RS_Done) 979 return false; 980 981 // Would this break a satisfied hint? 982 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 983 // Update eviction cost. 984 Cost.BrokenHints += BreaksHint; 985 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 986 // Abort if this would be too expensive. 987 if (!(Cost < MaxCost)) 988 return false; 989 } 990 } 991 992 if (Cost.MaxWeight == 0) 993 return false; 994 995 MaxCost = Cost; 996 return true; 997 } 998 999 /// Return the physical register that will be best 1000 /// candidate for eviction by a local split interval that will be created 1001 /// between Start and End. 1002 /// 1003 /// \param Order The allocation order 1004 /// \param VirtReg Live range that is about to be assigned. 1005 /// \param Start Start of range to look for interferences 1006 /// \param End End of range to look for interferences 1007 /// \param BestEvictweight The eviction cost of that eviction 1008 /// \return The PhysReg which is the best candidate for eviction and the 1009 /// eviction cost in BestEvictweight 1010 unsigned RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order, 1011 LiveInterval &VirtReg, 1012 SlotIndex Start, SlotIndex End, 1013 float *BestEvictweight) { 1014 EvictionCost BestEvictCost; 1015 BestEvictCost.setMax(); 1016 BestEvictCost.MaxWeight = VirtReg.weight; 1017 unsigned BestEvicteePhys = 0; 1018 1019 // Go over all physical registers and find the best candidate for eviction 1020 for (auto PhysReg : Order.getOrder()) { 1021 1022 if (!canEvictInterferenceInRange(VirtReg, PhysReg, Start, End, 1023 BestEvictCost)) 1024 continue; 1025 1026 // Best so far. 1027 BestEvicteePhys = PhysReg; 1028 } 1029 *BestEvictweight = BestEvictCost.MaxWeight; 1030 return BestEvicteePhys; 1031 } 1032 1033 /// evictInterference - Evict any interferring registers that prevent VirtReg 1034 /// from being assigned to Physreg. This assumes that canEvictInterference 1035 /// returned true. 1036 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 1037 SmallVectorImpl<unsigned> &NewVRegs) { 1038 // Make sure that VirtReg has a cascade number, and assign that cascade 1039 // number to every evicted register. These live ranges than then only be 1040 // evicted by a newer cascade, preventing infinite loops. 1041 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 1042 if (!Cascade) 1043 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 1044 1045 LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI) 1046 << " interference: Cascade " << Cascade << '\n'); 1047 1048 // Collect all interfering virtregs first. 1049 SmallVector<LiveInterval*, 8> Intfs; 1050 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1051 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 1052 // We usually have the interfering VRegs cached so collectInterferingVRegs() 1053 // should be fast, we may need to recalculate if when different physregs 1054 // overlap the same register unit so we had different SubRanges queried 1055 // against it. 1056 Q.collectInterferingVRegs(); 1057 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 1058 Intfs.append(IVR.begin(), IVR.end()); 1059 } 1060 1061 // Evict them second. This will invalidate the queries. 1062 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 1063 LiveInterval *Intf = Intfs[i]; 1064 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 1065 if (!VRM->hasPhys(Intf->reg)) 1066 continue; 1067 1068 LastEvicted.addEviction(PhysReg, VirtReg.reg, Intf->reg); 1069 1070 Matrix->unassign(*Intf); 1071 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 1072 VirtReg.isSpillable() < Intf->isSpillable()) && 1073 "Cannot decrease cascade number, illegal eviction"); 1074 ExtraRegInfo[Intf->reg].Cascade = Cascade; 1075 ++NumEvicted; 1076 NewVRegs.push_back(Intf->reg); 1077 } 1078 } 1079 1080 /// Returns true if the given \p PhysReg is a callee saved register and has not 1081 /// been used for allocation yet. 1082 bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const { 1083 unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg); 1084 if (CSR == 0) 1085 return false; 1086 1087 return !Matrix->isPhysRegUsed(PhysReg); 1088 } 1089 1090 /// tryEvict - Try to evict all interferences for a physreg. 1091 /// @param VirtReg Currently unassigned virtual register. 1092 /// @param Order Physregs to try. 1093 /// @return Physreg to assign VirtReg, or 0. 1094 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 1095 AllocationOrder &Order, 1096 SmallVectorImpl<unsigned> &NewVRegs, 1097 unsigned CostPerUseLimit) { 1098 NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription, 1099 TimePassesIsEnabled); 1100 1101 // Keep track of the cheapest interference seen so far. 1102 EvictionCost BestCost; 1103 BestCost.setMax(); 1104 unsigned BestPhys = 0; 1105 unsigned OrderLimit = Order.getOrder().size(); 1106 1107 // When we are just looking for a reduced cost per use, don't break any 1108 // hints, and only evict smaller spill weights. 1109 if (CostPerUseLimit < ~0u) { 1110 BestCost.BrokenHints = 0; 1111 BestCost.MaxWeight = VirtReg.weight; 1112 1113 // Check of any registers in RC are below CostPerUseLimit. 1114 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg); 1115 unsigned MinCost = RegClassInfo.getMinCost(RC); 1116 if (MinCost >= CostPerUseLimit) { 1117 LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " 1118 << MinCost << ", no cheaper registers to be found.\n"); 1119 return 0; 1120 } 1121 1122 // It is normal for register classes to have a long tail of registers with 1123 // the same cost. We don't need to look at them if they're too expensive. 1124 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) { 1125 OrderLimit = RegClassInfo.getLastCostChange(RC); 1126 LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit 1127 << " regs.\n"); 1128 } 1129 } 1130 1131 Order.rewind(); 1132 while (unsigned PhysReg = Order.next(OrderLimit)) { 1133 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 1134 continue; 1135 // The first use of a callee-saved register in a function has cost 1. 1136 // Don't start using a CSR when the CostPerUseLimit is low. 1137 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) { 1138 LLVM_DEBUG( 1139 dbgs() << printReg(PhysReg, TRI) << " would clobber CSR " 1140 << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI) 1141 << '\n'); 1142 continue; 1143 } 1144 1145 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 1146 continue; 1147 1148 // Best so far. 1149 BestPhys = PhysReg; 1150 1151 // Stop if the hint can be used. 1152 if (Order.isHint()) 1153 break; 1154 } 1155 1156 if (!BestPhys) 1157 return 0; 1158 1159 evictInterference(VirtReg, BestPhys, NewVRegs); 1160 return BestPhys; 1161 } 1162 1163 //===----------------------------------------------------------------------===// 1164 // Region Splitting 1165 //===----------------------------------------------------------------------===// 1166 1167 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 1168 /// interference pattern in Physreg and its aliases. Add the constraints to 1169 /// SpillPlacement and return the static cost of this split in Cost, assuming 1170 /// that all preferences in SplitConstraints are met. 1171 /// Return false if there are no bundles with positive bias. 1172 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 1173 BlockFrequency &Cost) { 1174 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1175 1176 // Reset interference dependent info. 1177 SplitConstraints.resize(UseBlocks.size()); 1178 BlockFrequency StaticCost = 0; 1179 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1180 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1181 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1182 1183 BC.Number = BI.MBB->getNumber(); 1184 Intf.moveToBlock(BC.Number); 1185 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 1186 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 1187 BC.ChangesValue = BI.FirstDef.isValid(); 1188 1189 if (!Intf.hasInterference()) 1190 continue; 1191 1192 // Number of spill code instructions to insert. 1193 unsigned Ins = 0; 1194 1195 // Interference for the live-in value. 1196 if (BI.LiveIn) { 1197 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) { 1198 BC.Entry = SpillPlacement::MustSpill; 1199 ++Ins; 1200 } else if (Intf.first() < BI.FirstInstr) { 1201 BC.Entry = SpillPlacement::PrefSpill; 1202 ++Ins; 1203 } else if (Intf.first() < BI.LastInstr) { 1204 ++Ins; 1205 } 1206 } 1207 1208 // Interference for the live-out value. 1209 if (BI.LiveOut) { 1210 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) { 1211 BC.Exit = SpillPlacement::MustSpill; 1212 ++Ins; 1213 } else if (Intf.last() > BI.LastInstr) { 1214 BC.Exit = SpillPlacement::PrefSpill; 1215 ++Ins; 1216 } else if (Intf.last() > BI.FirstInstr) { 1217 ++Ins; 1218 } 1219 } 1220 1221 // Accumulate the total frequency of inserted spill code. 1222 while (Ins--) 1223 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 1224 } 1225 Cost = StaticCost; 1226 1227 // Add constraints for use-blocks. Note that these are the only constraints 1228 // that may add a positive bias, it is downhill from here. 1229 SpillPlacer->addConstraints(SplitConstraints); 1230 return SpillPlacer->scanActiveBundles(); 1231 } 1232 1233 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 1234 /// live-through blocks in Blocks. 1235 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 1236 ArrayRef<unsigned> Blocks) { 1237 const unsigned GroupSize = 8; 1238 SpillPlacement::BlockConstraint BCS[GroupSize]; 1239 unsigned TBS[GroupSize]; 1240 unsigned B = 0, T = 0; 1241 1242 for (unsigned i = 0; i != Blocks.size(); ++i) { 1243 unsigned Number = Blocks[i]; 1244 Intf.moveToBlock(Number); 1245 1246 if (!Intf.hasInterference()) { 1247 assert(T < GroupSize && "Array overflow"); 1248 TBS[T] = Number; 1249 if (++T == GroupSize) { 1250 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 1251 T = 0; 1252 } 1253 continue; 1254 } 1255 1256 assert(B < GroupSize && "Array overflow"); 1257 BCS[B].Number = Number; 1258 1259 // Interference for the live-in value. 1260 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 1261 BCS[B].Entry = SpillPlacement::MustSpill; 1262 else 1263 BCS[B].Entry = SpillPlacement::PrefSpill; 1264 1265 // Interference for the live-out value. 1266 if (Intf.last() >= SA->getLastSplitPoint(Number)) 1267 BCS[B].Exit = SpillPlacement::MustSpill; 1268 else 1269 BCS[B].Exit = SpillPlacement::PrefSpill; 1270 1271 if (++B == GroupSize) { 1272 SpillPlacer->addConstraints(makeArrayRef(BCS, B)); 1273 B = 0; 1274 } 1275 } 1276 1277 SpillPlacer->addConstraints(makeArrayRef(BCS, B)); 1278 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 1279 } 1280 1281 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 1282 // Keep track of through blocks that have not been added to SpillPlacer. 1283 BitVector Todo = SA->getThroughBlocks(); 1284 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 1285 unsigned AddedTo = 0; 1286 #ifndef NDEBUG 1287 unsigned Visited = 0; 1288 #endif 1289 1290 while (true) { 1291 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 1292 // Find new through blocks in the periphery of PrefRegBundles. 1293 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 1294 unsigned Bundle = NewBundles[i]; 1295 // Look at all blocks connected to Bundle in the full graph. 1296 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 1297 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 1298 I != E; ++I) { 1299 unsigned Block = *I; 1300 if (!Todo.test(Block)) 1301 continue; 1302 Todo.reset(Block); 1303 // This is a new through block. Add it to SpillPlacer later. 1304 ActiveBlocks.push_back(Block); 1305 #ifndef NDEBUG 1306 ++Visited; 1307 #endif 1308 } 1309 } 1310 // Any new blocks to add? 1311 if (ActiveBlocks.size() == AddedTo) 1312 break; 1313 1314 // Compute through constraints from the interference, or assume that all 1315 // through blocks prefer spilling when forming compact regions. 1316 auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 1317 if (Cand.PhysReg) 1318 addThroughConstraints(Cand.Intf, NewBlocks); 1319 else 1320 // Provide a strong negative bias on through blocks to prevent unwanted 1321 // liveness on loop backedges. 1322 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 1323 AddedTo = ActiveBlocks.size(); 1324 1325 // Perhaps iterating can enable more bundles? 1326 SpillPlacer->iterate(); 1327 } 1328 LLVM_DEBUG(dbgs() << ", v=" << Visited); 1329 } 1330 1331 /// calcCompactRegion - Compute the set of edge bundles that should be live 1332 /// when splitting the current live range into compact regions. Compact 1333 /// regions can be computed without looking at interference. They are the 1334 /// regions formed by removing all the live-through blocks from the live range. 1335 /// 1336 /// Returns false if the current live range is already compact, or if the 1337 /// compact regions would form single block regions anyway. 1338 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 1339 // Without any through blocks, the live range is already compact. 1340 if (!SA->getNumThroughBlocks()) 1341 return false; 1342 1343 // Compact regions don't correspond to any physreg. 1344 Cand.reset(IntfCache, 0); 1345 1346 LLVM_DEBUG(dbgs() << "Compact region bundles"); 1347 1348 // Use the spill placer to determine the live bundles. GrowRegion pretends 1349 // that all the through blocks have interference when PhysReg is unset. 1350 SpillPlacer->prepare(Cand.LiveBundles); 1351 1352 // The static split cost will be zero since Cand.Intf reports no interference. 1353 BlockFrequency Cost; 1354 if (!addSplitConstraints(Cand.Intf, Cost)) { 1355 LLVM_DEBUG(dbgs() << ", none.\n"); 1356 return false; 1357 } 1358 1359 growRegion(Cand); 1360 SpillPlacer->finish(); 1361 1362 if (!Cand.LiveBundles.any()) { 1363 LLVM_DEBUG(dbgs() << ", none.\n"); 1364 return false; 1365 } 1366 1367 LLVM_DEBUG({ 1368 for (int i : Cand.LiveBundles.set_bits()) 1369 dbgs() << " EB#" << i; 1370 dbgs() << ".\n"; 1371 }); 1372 return true; 1373 } 1374 1375 /// calcSpillCost - Compute how expensive it would be to split the live range in 1376 /// SA around all use blocks instead of forming bundle regions. 1377 BlockFrequency RAGreedy::calcSpillCost() { 1378 BlockFrequency Cost = 0; 1379 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1380 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1381 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1382 unsigned Number = BI.MBB->getNumber(); 1383 // We normally only need one spill instruction - a load or a store. 1384 Cost += SpillPlacer->getBlockFrequency(Number); 1385 1386 // Unless the value is redefined in the block. 1387 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 1388 Cost += SpillPlacer->getBlockFrequency(Number); 1389 } 1390 return Cost; 1391 } 1392 1393 /// Check if splitting Evictee will create a local split interval in 1394 /// basic block number BBNumber that may cause a bad eviction chain. This is 1395 /// intended to prevent bad eviction sequences like: 1396 /// movl %ebp, 8(%esp) # 4-byte Spill 1397 /// movl %ecx, %ebp 1398 /// movl %ebx, %ecx 1399 /// movl %edi, %ebx 1400 /// movl %edx, %edi 1401 /// cltd 1402 /// idivl %esi 1403 /// movl %edi, %edx 1404 /// movl %ebx, %edi 1405 /// movl %ecx, %ebx 1406 /// movl %ebp, %ecx 1407 /// movl 16(%esp), %ebp # 4 - byte Reload 1408 /// 1409 /// Such sequences are created in 2 scenarios: 1410 /// 1411 /// Scenario #1: 1412 /// %0 is evicted from physreg0 by %1. 1413 /// Evictee %0 is intended for region splitting with split candidate 1414 /// physreg0 (the reg %0 was evicted from). 1415 /// Region splitting creates a local interval because of interference with the 1416 /// evictor %1 (normally region splitting creates 2 interval, the "by reg" 1417 /// and "by stack" intervals and local interval created when interference 1418 /// occurs). 1419 /// One of the split intervals ends up evicting %2 from physreg1. 1420 /// Evictee %2 is intended for region splitting with split candidate 1421 /// physreg1. 1422 /// One of the split intervals ends up evicting %3 from physreg2, etc. 1423 /// 1424 /// Scenario #2 1425 /// %0 is evicted from physreg0 by %1. 1426 /// %2 is evicted from physreg2 by %3 etc. 1427 /// Evictee %0 is intended for region splitting with split candidate 1428 /// physreg1. 1429 /// Region splitting creates a local interval because of interference with the 1430 /// evictor %1. 1431 /// One of the split intervals ends up evicting back original evictor %1 1432 /// from physreg0 (the reg %0 was evicted from). 1433 /// Another evictee %2 is intended for region splitting with split candidate 1434 /// physreg1. 1435 /// One of the split intervals ends up evicting %3 from physreg2, etc. 1436 /// 1437 /// \param Evictee The register considered to be split. 1438 /// \param Cand The split candidate that determines the physical register 1439 /// we are splitting for and the interferences. 1440 /// \param BBNumber The number of a BB for which the region split process will 1441 /// create a local split interval. 1442 /// \param Order The physical registers that may get evicted by a split 1443 /// artifact of Evictee. 1444 /// \return True if splitting Evictee may cause a bad eviction chain, false 1445 /// otherwise. 1446 bool RAGreedy::splitCanCauseEvictionChain(unsigned Evictee, 1447 GlobalSplitCandidate &Cand, 1448 unsigned BBNumber, 1449 const AllocationOrder &Order) { 1450 EvictionTrack::EvictorInfo VregEvictorInfo = LastEvicted.getEvictor(Evictee); 1451 unsigned Evictor = VregEvictorInfo.first; 1452 unsigned PhysReg = VregEvictorInfo.second; 1453 1454 // No actual evictor. 1455 if (!Evictor || !PhysReg) 1456 return false; 1457 1458 float MaxWeight = 0; 1459 unsigned FutureEvictedPhysReg = 1460 getCheapestEvicteeWeight(Order, LIS->getInterval(Evictee), 1461 Cand.Intf.first(), Cand.Intf.last(), &MaxWeight); 1462 1463 // The bad eviction chain occurs when either the split candidate is the 1464 // evicting reg or one of the split artifact will evict the evicting reg. 1465 if ((PhysReg != Cand.PhysReg) && (PhysReg != FutureEvictedPhysReg)) 1466 return false; 1467 1468 Cand.Intf.moveToBlock(BBNumber); 1469 1470 // Check to see if the Evictor contains interference (with Evictee) in the 1471 // given BB. If so, this interference caused the eviction of Evictee from 1472 // PhysReg. This suggest that we will create a local interval during the 1473 // region split to avoid this interference This local interval may cause a bad 1474 // eviction chain. 1475 if (!LIS->hasInterval(Evictor)) 1476 return false; 1477 LiveInterval &EvictorLI = LIS->getInterval(Evictor); 1478 if (EvictorLI.FindSegmentContaining(Cand.Intf.first()) == EvictorLI.end()) 1479 return false; 1480 1481 // Now, check to see if the local interval we will create is going to be 1482 // expensive enough to evict somebody If so, this may cause a bad eviction 1483 // chain. 1484 VirtRegAuxInfo VRAI(*MF, *LIS, VRM, getAnalysis<MachineLoopInfo>(), *MBFI); 1485 float splitArtifactWeight = 1486 VRAI.futureWeight(LIS->getInterval(Evictee), 1487 Cand.Intf.first().getPrevIndex(), Cand.Intf.last()); 1488 if (splitArtifactWeight >= 0 && splitArtifactWeight < MaxWeight) 1489 return false; 1490 1491 return true; 1492 } 1493 1494 /// Check if splitting VirtRegToSplit will create a local split interval 1495 /// in basic block number BBNumber that may cause a spill. 1496 /// 1497 /// \param VirtRegToSplit The register considered to be split. 1498 /// \param Cand The split candidate that determines the physical 1499 /// register we are splitting for and the interferences. 1500 /// \param BBNumber The number of a BB for which the region split process 1501 /// will create a local split interval. 1502 /// \param Order The physical registers that may get evicted by a 1503 /// split artifact of VirtRegToSplit. 1504 /// \return True if splitting VirtRegToSplit may cause a spill, false 1505 /// otherwise. 1506 bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit, 1507 GlobalSplitCandidate &Cand, 1508 unsigned BBNumber, 1509 const AllocationOrder &Order) { 1510 Cand.Intf.moveToBlock(BBNumber); 1511 1512 // Check if the local interval will find a non interfereing assignment. 1513 for (auto PhysReg : Order.getOrder()) { 1514 if (!Matrix->checkInterference(Cand.Intf.first().getPrevIndex(), 1515 Cand.Intf.last(), PhysReg)) 1516 return false; 1517 } 1518 1519 // Check if the local interval will evict a cheaper interval. 1520 float CheapestEvictWeight = 0; 1521 unsigned FutureEvictedPhysReg = getCheapestEvicteeWeight( 1522 Order, LIS->getInterval(VirtRegToSplit), Cand.Intf.first(), 1523 Cand.Intf.last(), &CheapestEvictWeight); 1524 1525 // Have we found an interval that can be evicted? 1526 if (FutureEvictedPhysReg) { 1527 VirtRegAuxInfo VRAI(*MF, *LIS, VRM, getAnalysis<MachineLoopInfo>(), *MBFI); 1528 float splitArtifactWeight = 1529 VRAI.futureWeight(LIS->getInterval(VirtRegToSplit), 1530 Cand.Intf.first().getPrevIndex(), Cand.Intf.last()); 1531 // Will the weight of the local interval be higher than the cheapest evictee 1532 // weight? If so it will evict it and will not cause a spill. 1533 if (splitArtifactWeight >= 0 && splitArtifactWeight > CheapestEvictWeight) 1534 return false; 1535 } 1536 1537 // The local interval is not able to find non interferencing assignment and 1538 // not able to evict a less worthy interval, therfore, it can cause a spill. 1539 return true; 1540 } 1541 1542 /// calcGlobalSplitCost - Return the global split cost of following the split 1543 /// pattern in LiveBundles. This cost should be added to the local cost of the 1544 /// interference pattern in SplitConstraints. 1545 /// 1546 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, 1547 const AllocationOrder &Order, 1548 bool *CanCauseEvictionChain) { 1549 BlockFrequency GlobalCost = 0; 1550 const BitVector &LiveBundles = Cand.LiveBundles; 1551 unsigned VirtRegToSplit = SA->getParent().reg; 1552 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1553 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1554 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1555 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1556 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)]; 1557 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)]; 1558 unsigned Ins = 0; 1559 1560 Cand.Intf.moveToBlock(BC.Number); 1561 // Check wheather a local interval is going to be created during the region 1562 // split. Calculate adavanced spilt cost (cost of local intervals) if option 1563 // is enabled. 1564 if (EnableAdvancedRASplitCost && Cand.Intf.hasInterference() && BI.LiveIn && 1565 BI.LiveOut && RegIn && RegOut) { 1566 1567 if (CanCauseEvictionChain && 1568 splitCanCauseEvictionChain(VirtRegToSplit, Cand, BC.Number, Order)) { 1569 // This interference causes our eviction from this assignment, we might 1570 // evict somebody else and eventually someone will spill, add that cost. 1571 // See splitCanCauseEvictionChain for detailed description of scenarios. 1572 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1573 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1574 1575 *CanCauseEvictionChain = true; 1576 1577 } else if (splitCanCauseLocalSpill(VirtRegToSplit, Cand, BC.Number, 1578 Order)) { 1579 // This interference causes local interval to spill, add that cost. 1580 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1581 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1582 } 1583 } 1584 1585 if (BI.LiveIn) 1586 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1587 if (BI.LiveOut) 1588 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1589 while (Ins--) 1590 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1591 } 1592 1593 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1594 unsigned Number = Cand.ActiveBlocks[i]; 1595 bool RegIn = LiveBundles[Bundles->getBundle(Number, false)]; 1596 bool RegOut = LiveBundles[Bundles->getBundle(Number, true)]; 1597 if (!RegIn && !RegOut) 1598 continue; 1599 if (RegIn && RegOut) { 1600 // We need double spill code if this block has interference. 1601 Cand.Intf.moveToBlock(Number); 1602 if (Cand.Intf.hasInterference()) { 1603 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1604 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1605 1606 // Check wheather a local interval is going to be created during the 1607 // region split. 1608 if (EnableAdvancedRASplitCost && CanCauseEvictionChain && 1609 splitCanCauseEvictionChain(VirtRegToSplit, Cand, Number, Order)) { 1610 // This interference cause our eviction from this assignment, we might 1611 // evict somebody else, add that cost. 1612 // See splitCanCauseEvictionChain for detailed description of 1613 // scenarios. 1614 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1615 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1616 1617 *CanCauseEvictionChain = true; 1618 } 1619 } 1620 continue; 1621 } 1622 // live-in / stack-out or stack-in live-out. 1623 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1624 } 1625 return GlobalCost; 1626 } 1627 1628 /// splitAroundRegion - Split the current live range around the regions 1629 /// determined by BundleCand and GlobalCand. 1630 /// 1631 /// Before calling this function, GlobalCand and BundleCand must be initialized 1632 /// so each bundle is assigned to a valid candidate, or NoCand for the 1633 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1634 /// objects must be initialized for the current live range, and intervals 1635 /// created for the used candidates. 1636 /// 1637 /// @param LREdit The LiveRangeEdit object handling the current split. 1638 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1639 /// must appear in this list. 1640 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1641 ArrayRef<unsigned> UsedCands) { 1642 // These are the intervals created for new global ranges. We may create more 1643 // intervals for local ranges. 1644 const unsigned NumGlobalIntvs = LREdit.size(); 1645 LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs 1646 << " globals.\n"); 1647 assert(NumGlobalIntvs && "No global intervals configured"); 1648 1649 // Isolate even single instructions when dealing with a proper sub-class. 1650 // That guarantees register class inflation for the stack interval because it 1651 // is all copies. 1652 unsigned Reg = SA->getParent().reg; 1653 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1654 1655 // First handle all the blocks with uses. 1656 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1657 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1658 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1659 unsigned Number = BI.MBB->getNumber(); 1660 unsigned IntvIn = 0, IntvOut = 0; 1661 SlotIndex IntfIn, IntfOut; 1662 if (BI.LiveIn) { 1663 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; 1664 if (CandIn != NoCand) { 1665 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1666 IntvIn = Cand.IntvIdx; 1667 Cand.Intf.moveToBlock(Number); 1668 IntfIn = Cand.Intf.first(); 1669 } 1670 } 1671 if (BI.LiveOut) { 1672 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; 1673 if (CandOut != NoCand) { 1674 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1675 IntvOut = Cand.IntvIdx; 1676 Cand.Intf.moveToBlock(Number); 1677 IntfOut = Cand.Intf.last(); 1678 } 1679 } 1680 1681 // Create separate intervals for isolated blocks with multiple uses. 1682 if (!IntvIn && !IntvOut) { 1683 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n"); 1684 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1685 SE->splitSingleBlock(BI); 1686 continue; 1687 } 1688 1689 if (IntvIn && IntvOut) 1690 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1691 else if (IntvIn) 1692 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1693 else 1694 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1695 } 1696 1697 // Handle live-through blocks. The relevant live-through blocks are stored in 1698 // the ActiveBlocks list with each candidate. We need to filter out 1699 // duplicates. 1700 BitVector Todo = SA->getThroughBlocks(); 1701 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1702 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1703 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1704 unsigned Number = Blocks[i]; 1705 if (!Todo.test(Number)) 1706 continue; 1707 Todo.reset(Number); 1708 1709 unsigned IntvIn = 0, IntvOut = 0; 1710 SlotIndex IntfIn, IntfOut; 1711 1712 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; 1713 if (CandIn != NoCand) { 1714 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1715 IntvIn = Cand.IntvIdx; 1716 Cand.Intf.moveToBlock(Number); 1717 IntfIn = Cand.Intf.first(); 1718 } 1719 1720 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; 1721 if (CandOut != NoCand) { 1722 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1723 IntvOut = Cand.IntvIdx; 1724 Cand.Intf.moveToBlock(Number); 1725 IntfOut = Cand.Intf.last(); 1726 } 1727 if (!IntvIn && !IntvOut) 1728 continue; 1729 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1730 } 1731 } 1732 1733 ++NumGlobalSplits; 1734 1735 SmallVector<unsigned, 8> IntvMap; 1736 SE->finish(&IntvMap); 1737 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1738 1739 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1740 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1741 1742 // Sort out the new intervals created by splitting. We get four kinds: 1743 // - Remainder intervals should not be split again. 1744 // - Candidate intervals can be assigned to Cand.PhysReg. 1745 // - Block-local splits are candidates for local splitting. 1746 // - DCE leftovers should go back on the queue. 1747 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1748 LiveInterval &Reg = LIS->getInterval(LREdit.get(i)); 1749 1750 // Ignore old intervals from DCE. 1751 if (getStage(Reg) != RS_New) 1752 continue; 1753 1754 // Remainder interval. Don't try splitting again, spill if it doesn't 1755 // allocate. 1756 if (IntvMap[i] == 0) { 1757 setStage(Reg, RS_Spill); 1758 continue; 1759 } 1760 1761 // Global intervals. Allow repeated splitting as long as the number of live 1762 // blocks is strictly decreasing. 1763 if (IntvMap[i] < NumGlobalIntvs) { 1764 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1765 LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1766 << " blocks as original.\n"); 1767 // Don't allow repeated splitting as a safe guard against looping. 1768 setStage(Reg, RS_Split2); 1769 } 1770 continue; 1771 } 1772 1773 // Other intervals are treated as new. This includes local intervals created 1774 // for blocks with multiple uses, and anything created by DCE. 1775 } 1776 1777 if (VerifyEnabled) 1778 MF->verify(this, "After splitting live range around region"); 1779 } 1780 1781 // Global split has high compile time cost especially for large live range. 1782 // Return false for the case here where the potential benefit will never 1783 // worth the cost. 1784 unsigned RAGreedy::isSplitBenefitWorthCost(LiveInterval &VirtReg) { 1785 MachineInstr *MI = MRI->getUniqueVRegDef(VirtReg.reg); 1786 if (MI && TII->isTriviallyReMaterializable(*MI, AA) && 1787 VirtReg.size() > HugeSizeForSplit) 1788 return false; 1789 return true; 1790 } 1791 1792 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1793 SmallVectorImpl<unsigned> &NewVRegs) { 1794 if (!isSplitBenefitWorthCost(VirtReg)) 1795 return 0; 1796 unsigned NumCands = 0; 1797 BlockFrequency SpillCost = calcSpillCost(); 1798 BlockFrequency BestCost; 1799 1800 // Check if we can split this live range around a compact region. 1801 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1802 if (HasCompact) { 1803 // Yes, keep GlobalCand[0] as the compact region candidate. 1804 NumCands = 1; 1805 BestCost = BlockFrequency::getMaxFrequency(); 1806 } else { 1807 // No benefit from the compact region, our fallback will be per-block 1808 // splitting. Make sure we find a solution that is cheaper than spilling. 1809 BestCost = SpillCost; 1810 LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = "; 1811 MBFI->printBlockFreq(dbgs(), BestCost) << '\n'); 1812 } 1813 1814 bool CanCauseEvictionChain = false; 1815 unsigned BestCand = 1816 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands, 1817 false /*IgnoreCSR*/, &CanCauseEvictionChain); 1818 1819 // Split candidates with compact regions can cause a bad eviction sequence. 1820 // See splitCanCauseEvictionChain for detailed description of scenarios. 1821 // To avoid it, we need to comapre the cost with the spill cost and not the 1822 // current max frequency. 1823 if (HasCompact && (BestCost > SpillCost) && (BestCand != NoCand) && 1824 CanCauseEvictionChain) { 1825 return 0; 1826 } 1827 1828 // No solutions found, fall back to single block splitting. 1829 if (!HasCompact && BestCand == NoCand) 1830 return 0; 1831 1832 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs); 1833 } 1834 1835 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg, 1836 AllocationOrder &Order, 1837 BlockFrequency &BestCost, 1838 unsigned &NumCands, bool IgnoreCSR, 1839 bool *CanCauseEvictionChain) { 1840 unsigned BestCand = NoCand; 1841 Order.rewind(); 1842 while (unsigned PhysReg = Order.next()) { 1843 if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg)) 1844 continue; 1845 1846 // Discard bad candidates before we run out of interference cache cursors. 1847 // This will only affect register classes with a lot of registers (>32). 1848 if (NumCands == IntfCache.getMaxCursors()) { 1849 unsigned WorstCount = ~0u; 1850 unsigned Worst = 0; 1851 for (unsigned i = 0; i != NumCands; ++i) { 1852 if (i == BestCand || !GlobalCand[i].PhysReg) 1853 continue; 1854 unsigned Count = GlobalCand[i].LiveBundles.count(); 1855 if (Count < WorstCount) { 1856 Worst = i; 1857 WorstCount = Count; 1858 } 1859 } 1860 --NumCands; 1861 GlobalCand[Worst] = GlobalCand[NumCands]; 1862 if (BestCand == NumCands) 1863 BestCand = Worst; 1864 } 1865 1866 if (GlobalCand.size() <= NumCands) 1867 GlobalCand.resize(NumCands+1); 1868 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1869 Cand.reset(IntfCache, PhysReg); 1870 1871 SpillPlacer->prepare(Cand.LiveBundles); 1872 BlockFrequency Cost; 1873 if (!addSplitConstraints(Cand.Intf, Cost)) { 1874 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n"); 1875 continue; 1876 } 1877 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = "; 1878 MBFI->printBlockFreq(dbgs(), Cost)); 1879 if (Cost >= BestCost) { 1880 LLVM_DEBUG({ 1881 if (BestCand == NoCand) 1882 dbgs() << " worse than no bundles\n"; 1883 else 1884 dbgs() << " worse than " 1885 << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1886 }); 1887 continue; 1888 } 1889 growRegion(Cand); 1890 1891 SpillPlacer->finish(); 1892 1893 // No live bundles, defer to splitSingleBlocks(). 1894 if (!Cand.LiveBundles.any()) { 1895 LLVM_DEBUG(dbgs() << " no bundles.\n"); 1896 continue; 1897 } 1898 1899 bool HasEvictionChain = false; 1900 Cost += calcGlobalSplitCost(Cand, Order, &HasEvictionChain); 1901 LLVM_DEBUG({ 1902 dbgs() << ", total = "; 1903 MBFI->printBlockFreq(dbgs(), Cost) << " with bundles"; 1904 for (int i : Cand.LiveBundles.set_bits()) 1905 dbgs() << " EB#" << i; 1906 dbgs() << ".\n"; 1907 }); 1908 if (Cost < BestCost) { 1909 BestCand = NumCands; 1910 BestCost = Cost; 1911 // See splitCanCauseEvictionChain for detailed description of bad 1912 // eviction chain scenarios. 1913 if (CanCauseEvictionChain) 1914 *CanCauseEvictionChain = HasEvictionChain; 1915 } 1916 ++NumCands; 1917 } 1918 1919 if (CanCauseEvictionChain && BestCand != NoCand) { 1920 // See splitCanCauseEvictionChain for detailed description of bad 1921 // eviction chain scenarios. 1922 LLVM_DEBUG(dbgs() << "Best split candidate of vreg " 1923 << printReg(VirtReg.reg, TRI) << " may "); 1924 if (!(*CanCauseEvictionChain)) 1925 LLVM_DEBUG(dbgs() << "not "); 1926 LLVM_DEBUG(dbgs() << "cause bad eviction chain\n"); 1927 } 1928 1929 return BestCand; 1930 } 1931 1932 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, 1933 bool HasCompact, 1934 SmallVectorImpl<unsigned> &NewVRegs) { 1935 SmallVector<unsigned, 8> UsedCands; 1936 // Prepare split editor. 1937 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1938 SE->reset(LREdit, SplitSpillMode); 1939 1940 // Assign all edge bundles to the preferred candidate, or NoCand. 1941 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1942 1943 // Assign bundles for the best candidate region. 1944 if (BestCand != NoCand) { 1945 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1946 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1947 UsedCands.push_back(BestCand); 1948 Cand.IntvIdx = SE->openIntv(); 1949 LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in " 1950 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1951 (void)B; 1952 } 1953 } 1954 1955 // Assign bundles for the compact region. 1956 if (HasCompact) { 1957 GlobalSplitCandidate &Cand = GlobalCand.front(); 1958 assert(!Cand.PhysReg && "Compact region has no physreg"); 1959 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1960 UsedCands.push_back(0); 1961 Cand.IntvIdx = SE->openIntv(); 1962 LLVM_DEBUG(dbgs() << "Split for compact region in " << B 1963 << " bundles, intv " << Cand.IntvIdx << ".\n"); 1964 (void)B; 1965 } 1966 } 1967 1968 splitAroundRegion(LREdit, UsedCands); 1969 return 0; 1970 } 1971 1972 //===----------------------------------------------------------------------===// 1973 // Per-Block Splitting 1974 //===----------------------------------------------------------------------===// 1975 1976 /// tryBlockSplit - Split a global live range around every block with uses. This 1977 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1978 /// they don't allocate. 1979 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1980 SmallVectorImpl<unsigned> &NewVRegs) { 1981 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1982 unsigned Reg = VirtReg.reg; 1983 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1984 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1985 SE->reset(LREdit, SplitSpillMode); 1986 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1987 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1988 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1989 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1990 SE->splitSingleBlock(BI); 1991 } 1992 // No blocks were split. 1993 if (LREdit.empty()) 1994 return 0; 1995 1996 // We did split for some blocks. 1997 SmallVector<unsigned, 8> IntvMap; 1998 SE->finish(&IntvMap); 1999 2000 // Tell LiveDebugVariables about the new ranges. 2001 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 2002 2003 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 2004 2005 // Sort out the new intervals created by splitting. The remainder interval 2006 // goes straight to spilling, the new local ranges get to stay RS_New. 2007 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 2008 LiveInterval &LI = LIS->getInterval(LREdit.get(i)); 2009 if (getStage(LI) == RS_New && IntvMap[i] == 0) 2010 setStage(LI, RS_Spill); 2011 } 2012 2013 if (VerifyEnabled) 2014 MF->verify(this, "After splitting live range around basic blocks"); 2015 return 0; 2016 } 2017 2018 //===----------------------------------------------------------------------===// 2019 // Per-Instruction Splitting 2020 //===----------------------------------------------------------------------===// 2021 2022 /// Get the number of allocatable registers that match the constraints of \p Reg 2023 /// on \p MI and that are also in \p SuperRC. 2024 static unsigned getNumAllocatableRegsForConstraints( 2025 const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC, 2026 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, 2027 const RegisterClassInfo &RCI) { 2028 assert(SuperRC && "Invalid register class"); 2029 2030 const TargetRegisterClass *ConstrainedRC = 2031 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI, 2032 /* ExploreBundle */ true); 2033 if (!ConstrainedRC) 2034 return 0; 2035 return RCI.getNumAllocatableRegs(ConstrainedRC); 2036 } 2037 2038 /// tryInstructionSplit - Split a live range around individual instructions. 2039 /// This is normally not worthwhile since the spiller is doing essentially the 2040 /// same thing. However, when the live range is in a constrained register 2041 /// class, it may help to insert copies such that parts of the live range can 2042 /// be moved to a larger register class. 2043 /// 2044 /// This is similar to spilling to a larger register class. 2045 unsigned 2046 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 2047 SmallVectorImpl<unsigned> &NewVRegs) { 2048 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg); 2049 // There is no point to this if there are no larger sub-classes. 2050 if (!RegClassInfo.isProperSubClass(CurRC)) 2051 return 0; 2052 2053 // Always enable split spill mode, since we're effectively spilling to a 2054 // register. 2055 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 2056 SE->reset(LREdit, SplitEditor::SM_Size); 2057 2058 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 2059 if (Uses.size() <= 1) 2060 return 0; 2061 2062 LLVM_DEBUG(dbgs() << "Split around " << Uses.size() 2063 << " individual instrs.\n"); 2064 2065 const TargetRegisterClass *SuperRC = 2066 TRI->getLargestLegalSuperClass(CurRC, *MF); 2067 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC); 2068 // Split around every non-copy instruction if this split will relax 2069 // the constraints on the virtual register. 2070 // Otherwise, splitting just inserts uncoalescable copies that do not help 2071 // the allocation. 2072 for (unsigned i = 0; i != Uses.size(); ++i) { 2073 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 2074 if (MI->isFullCopy() || 2075 SuperRCNumAllocatableRegs == 2076 getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII, 2077 TRI, RCI)) { 2078 LLVM_DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 2079 continue; 2080 } 2081 SE->openIntv(); 2082 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 2083 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 2084 SE->useIntv(SegStart, SegStop); 2085 } 2086 2087 if (LREdit.empty()) { 2088 LLVM_DEBUG(dbgs() << "All uses were copies.\n"); 2089 return 0; 2090 } 2091 2092 SmallVector<unsigned, 8> IntvMap; 2093 SE->finish(&IntvMap); 2094 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 2095 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 2096 2097 // Assign all new registers to RS_Spill. This was the last chance. 2098 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 2099 return 0; 2100 } 2101 2102 //===----------------------------------------------------------------------===// 2103 // Local Splitting 2104 //===----------------------------------------------------------------------===// 2105 2106 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 2107 /// in order to use PhysReg between two entries in SA->UseSlots. 2108 /// 2109 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 2110 /// 2111 void RAGreedy::calcGapWeights(unsigned PhysReg, 2112 SmallVectorImpl<float> &GapWeight) { 2113 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 2114 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 2115 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 2116 const unsigned NumGaps = Uses.size()-1; 2117 2118 // Start and end points for the interference check. 2119 SlotIndex StartIdx = 2120 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 2121 SlotIndex StopIdx = 2122 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 2123 2124 GapWeight.assign(NumGaps, 0.0f); 2125 2126 // Add interference from each overlapping register. 2127 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 2128 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 2129 .checkInterference()) 2130 continue; 2131 2132 // We know that VirtReg is a continuous interval from FirstInstr to 2133 // LastInstr, so we don't need InterferenceQuery. 2134 // 2135 // Interference that overlaps an instruction is counted in both gaps 2136 // surrounding the instruction. The exception is interference before 2137 // StartIdx and after StopIdx. 2138 // 2139 LiveIntervalUnion::SegmentIter IntI = 2140 Matrix->getLiveUnions()[*Units] .find(StartIdx); 2141 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 2142 // Skip the gaps before IntI. 2143 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 2144 if (++Gap == NumGaps) 2145 break; 2146 if (Gap == NumGaps) 2147 break; 2148 2149 // Update the gaps covered by IntI. 2150 const float weight = IntI.value()->weight; 2151 for (; Gap != NumGaps; ++Gap) { 2152 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 2153 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 2154 break; 2155 } 2156 if (Gap == NumGaps) 2157 break; 2158 } 2159 } 2160 2161 // Add fixed interference. 2162 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 2163 const LiveRange &LR = LIS->getRegUnit(*Units); 2164 LiveRange::const_iterator I = LR.find(StartIdx); 2165 LiveRange::const_iterator E = LR.end(); 2166 2167 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 2168 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 2169 while (Uses[Gap+1].getBoundaryIndex() < I->start) 2170 if (++Gap == NumGaps) 2171 break; 2172 if (Gap == NumGaps) 2173 break; 2174 2175 for (; Gap != NumGaps; ++Gap) { 2176 GapWeight[Gap] = huge_valf; 2177 if (Uses[Gap+1].getBaseIndex() >= I->end) 2178 break; 2179 } 2180 if (Gap == NumGaps) 2181 break; 2182 } 2183 } 2184 } 2185 2186 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 2187 /// basic block. 2188 /// 2189 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 2190 SmallVectorImpl<unsigned> &NewVRegs) { 2191 // TODO: the function currently only handles a single UseBlock; it should be 2192 // possible to generalize. 2193 if (SA->getUseBlocks().size() != 1) 2194 return 0; 2195 2196 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 2197 2198 // Note that it is possible to have an interval that is live-in or live-out 2199 // while only covering a single block - A phi-def can use undef values from 2200 // predecessors, and the block could be a single-block loop. 2201 // We don't bother doing anything clever about such a case, we simply assume 2202 // that the interval is continuous from FirstInstr to LastInstr. We should 2203 // make sure that we don't do anything illegal to such an interval, though. 2204 2205 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 2206 if (Uses.size() <= 2) 2207 return 0; 2208 const unsigned NumGaps = Uses.size()-1; 2209 2210 LLVM_DEBUG({ 2211 dbgs() << "tryLocalSplit: "; 2212 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 2213 dbgs() << ' ' << Uses[i]; 2214 dbgs() << '\n'; 2215 }); 2216 2217 // If VirtReg is live across any register mask operands, compute a list of 2218 // gaps with register masks. 2219 SmallVector<unsigned, 8> RegMaskGaps; 2220 if (Matrix->checkRegMaskInterference(VirtReg)) { 2221 // Get regmask slots for the whole block. 2222 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 2223 LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 2224 // Constrain to VirtReg's live range. 2225 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 2226 Uses.front().getRegSlot()) - RMS.begin(); 2227 unsigned re = RMS.size(); 2228 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 2229 // Look for Uses[i] <= RMS <= Uses[i+1]. 2230 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 2231 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 2232 continue; 2233 // Skip a regmask on the same instruction as the last use. It doesn't 2234 // overlap the live range. 2235 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 2236 break; 2237 LLVM_DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' 2238 << Uses[i + 1]); 2239 RegMaskGaps.push_back(i); 2240 // Advance ri to the next gap. A regmask on one of the uses counts in 2241 // both gaps. 2242 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 2243 ++ri; 2244 } 2245 LLVM_DEBUG(dbgs() << '\n'); 2246 } 2247 2248 // Since we allow local split results to be split again, there is a risk of 2249 // creating infinite loops. It is tempting to require that the new live 2250 // ranges have less instructions than the original. That would guarantee 2251 // convergence, but it is too strict. A live range with 3 instructions can be 2252 // split 2+3 (including the COPY), and we want to allow that. 2253 // 2254 // Instead we use these rules: 2255 // 2256 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 2257 // noop split, of course). 2258 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 2259 // the new ranges must have fewer instructions than before the split. 2260 // 3. New ranges with the same number of instructions are marked RS_Split2, 2261 // smaller ranges are marked RS_New. 2262 // 2263 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 2264 // excessive splitting and infinite loops. 2265 // 2266 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 2267 2268 // Best split candidate. 2269 unsigned BestBefore = NumGaps; 2270 unsigned BestAfter = 0; 2271 float BestDiff = 0; 2272 2273 const float blockFreq = 2274 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 2275 (1.0f / MBFI->getEntryFreq()); 2276 SmallVector<float, 8> GapWeight; 2277 2278 Order.rewind(); 2279 while (unsigned PhysReg = Order.next()) { 2280 // Keep track of the largest spill weight that would need to be evicted in 2281 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 2282 calcGapWeights(PhysReg, GapWeight); 2283 2284 // Remove any gaps with regmask clobbers. 2285 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 2286 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 2287 GapWeight[RegMaskGaps[i]] = huge_valf; 2288 2289 // Try to find the best sequence of gaps to close. 2290 // The new spill weight must be larger than any gap interference. 2291 2292 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 2293 unsigned SplitBefore = 0, SplitAfter = 1; 2294 2295 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 2296 // It is the spill weight that needs to be evicted. 2297 float MaxGap = GapWeight[0]; 2298 2299 while (true) { 2300 // Live before/after split? 2301 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 2302 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 2303 2304 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore] 2305 << '-' << Uses[SplitAfter] << " i=" << MaxGap); 2306 2307 // Stop before the interval gets so big we wouldn't be making progress. 2308 if (!LiveBefore && !LiveAfter) { 2309 LLVM_DEBUG(dbgs() << " all\n"); 2310 break; 2311 } 2312 // Should the interval be extended or shrunk? 2313 bool Shrink = true; 2314 2315 // How many gaps would the new range have? 2316 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 2317 2318 // Legally, without causing looping? 2319 bool Legal = !ProgressRequired || NewGaps < NumGaps; 2320 2321 if (Legal && MaxGap < huge_valf) { 2322 // Estimate the new spill weight. Each instruction reads or writes the 2323 // register. Conservatively assume there are no read-modify-write 2324 // instructions. 2325 // 2326 // Try to guess the size of the new interval. 2327 const float EstWeight = normalizeSpillWeight( 2328 blockFreq * (NewGaps + 1), 2329 Uses[SplitBefore].distance(Uses[SplitAfter]) + 2330 (LiveBefore + LiveAfter) * SlotIndex::InstrDist, 2331 1); 2332 // Would this split be possible to allocate? 2333 // Never allocate all gaps, we wouldn't be making progress. 2334 LLVM_DEBUG(dbgs() << " w=" << EstWeight); 2335 if (EstWeight * Hysteresis >= MaxGap) { 2336 Shrink = false; 2337 float Diff = EstWeight - MaxGap; 2338 if (Diff > BestDiff) { 2339 LLVM_DEBUG(dbgs() << " (best)"); 2340 BestDiff = Hysteresis * Diff; 2341 BestBefore = SplitBefore; 2342 BestAfter = SplitAfter; 2343 } 2344 } 2345 } 2346 2347 // Try to shrink. 2348 if (Shrink) { 2349 if (++SplitBefore < SplitAfter) { 2350 LLVM_DEBUG(dbgs() << " shrink\n"); 2351 // Recompute the max when necessary. 2352 if (GapWeight[SplitBefore - 1] >= MaxGap) { 2353 MaxGap = GapWeight[SplitBefore]; 2354 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 2355 MaxGap = std::max(MaxGap, GapWeight[i]); 2356 } 2357 continue; 2358 } 2359 MaxGap = 0; 2360 } 2361 2362 // Try to extend the interval. 2363 if (SplitAfter >= NumGaps) { 2364 LLVM_DEBUG(dbgs() << " end\n"); 2365 break; 2366 } 2367 2368 LLVM_DEBUG(dbgs() << " extend\n"); 2369 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 2370 } 2371 } 2372 2373 // Didn't find any candidates? 2374 if (BestBefore == NumGaps) 2375 return 0; 2376 2377 LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-' 2378 << Uses[BestAfter] << ", " << BestDiff << ", " 2379 << (BestAfter - BestBefore + 1) << " instrs\n"); 2380 2381 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 2382 SE->reset(LREdit); 2383 2384 SE->openIntv(); 2385 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 2386 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 2387 SE->useIntv(SegStart, SegStop); 2388 SmallVector<unsigned, 8> IntvMap; 2389 SE->finish(&IntvMap); 2390 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 2391 2392 // If the new range has the same number of instructions as before, mark it as 2393 // RS_Split2 so the next split will be forced to make progress. Otherwise, 2394 // leave the new intervals as RS_New so they can compete. 2395 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 2396 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 2397 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 2398 if (NewGaps >= NumGaps) { 2399 LLVM_DEBUG(dbgs() << "Tagging non-progress ranges: "); 2400 assert(!ProgressRequired && "Didn't make progress when it was required."); 2401 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 2402 if (IntvMap[i] == 1) { 2403 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2); 2404 LLVM_DEBUG(dbgs() << printReg(LREdit.get(i))); 2405 } 2406 LLVM_DEBUG(dbgs() << '\n'); 2407 } 2408 ++NumLocalSplits; 2409 2410 return 0; 2411 } 2412 2413 //===----------------------------------------------------------------------===// 2414 // Live Range Splitting 2415 //===----------------------------------------------------------------------===// 2416 2417 /// trySplit - Try to split VirtReg or one of its interferences, making it 2418 /// assignable. 2419 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 2420 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 2421 SmallVectorImpl<unsigned>&NewVRegs) { 2422 // Ranges must be Split2 or less. 2423 if (getStage(VirtReg) >= RS_Spill) 2424 return 0; 2425 2426 // Local intervals are handled separately. 2427 if (LIS->intervalIsInOneMBB(VirtReg)) { 2428 NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName, 2429 TimerGroupDescription, TimePassesIsEnabled); 2430 SA->analyze(&VirtReg); 2431 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 2432 if (PhysReg || !NewVRegs.empty()) 2433 return PhysReg; 2434 return tryInstructionSplit(VirtReg, Order, NewVRegs); 2435 } 2436 2437 NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName, 2438 TimerGroupDescription, TimePassesIsEnabled); 2439 2440 SA->analyze(&VirtReg); 2441 2442 // FIXME: SplitAnalysis may repair broken live ranges coming from the 2443 // coalescer. That may cause the range to become allocatable which means that 2444 // tryRegionSplit won't be making progress. This check should be replaced with 2445 // an assertion when the coalescer is fixed. 2446 if (SA->didRepairRange()) { 2447 // VirtReg has changed, so all cached queries are invalid. 2448 Matrix->invalidateVirtRegs(); 2449 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 2450 return PhysReg; 2451 } 2452 2453 // First try to split around a region spanning multiple blocks. RS_Split2 2454 // ranges already made dubious progress with region splitting, so they go 2455 // straight to single block splitting. 2456 if (getStage(VirtReg) < RS_Split2) { 2457 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 2458 if (PhysReg || !NewVRegs.empty()) 2459 return PhysReg; 2460 } 2461 2462 // Then isolate blocks. 2463 return tryBlockSplit(VirtReg, Order, NewVRegs); 2464 } 2465 2466 //===----------------------------------------------------------------------===// 2467 // Last Chance Recoloring 2468 //===----------------------------------------------------------------------===// 2469 2470 /// Return true if \p reg has any tied def operand. 2471 static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) { 2472 for (const MachineOperand &MO : MRI->def_operands(reg)) 2473 if (MO.isTied()) 2474 return true; 2475 2476 return false; 2477 } 2478 2479 /// mayRecolorAllInterferences - Check if the virtual registers that 2480 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be 2481 /// recolored to free \p PhysReg. 2482 /// When true is returned, \p RecoloringCandidates has been augmented with all 2483 /// the live intervals that need to be recolored in order to free \p PhysReg 2484 /// for \p VirtReg. 2485 /// \p FixedRegisters contains all the virtual registers that cannot be 2486 /// recolored. 2487 bool 2488 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg, 2489 SmallLISet &RecoloringCandidates, 2490 const SmallVirtRegSet &FixedRegisters) { 2491 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg); 2492 2493 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 2494 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 2495 // If there is LastChanceRecoloringMaxInterference or more interferences, 2496 // chances are one would not be recolorable. 2497 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >= 2498 LastChanceRecoloringMaxInterference && !ExhaustiveSearch) { 2499 LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n"); 2500 CutOffInfo |= CO_Interf; 2501 return false; 2502 } 2503 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 2504 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 2505 // If Intf is done and sit on the same register class as VirtReg, 2506 // it would not be recolorable as it is in the same state as VirtReg. 2507 // However, if VirtReg has tied defs and Intf doesn't, then 2508 // there is still a point in examining if it can be recolorable. 2509 if (((getStage(*Intf) == RS_Done && 2510 MRI->getRegClass(Intf->reg) == CurRC) && 2511 !(hasTiedDef(MRI, VirtReg.reg) && !hasTiedDef(MRI, Intf->reg))) || 2512 FixedRegisters.count(Intf->reg)) { 2513 LLVM_DEBUG( 2514 dbgs() << "Early abort: the interference is not recolorable.\n"); 2515 return false; 2516 } 2517 RecoloringCandidates.insert(Intf); 2518 } 2519 } 2520 return true; 2521 } 2522 2523 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring 2524 /// its interferences. 2525 /// Last chance recoloring chooses a color for \p VirtReg and recolors every 2526 /// virtual register that was using it. The recoloring process may recursively 2527 /// use the last chance recoloring. Therefore, when a virtual register has been 2528 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot 2529 /// be last-chance-recolored again during this recoloring "session". 2530 /// E.g., 2531 /// Let 2532 /// vA can use {R1, R2 } 2533 /// vB can use { R2, R3} 2534 /// vC can use {R1 } 2535 /// Where vA, vB, and vC cannot be split anymore (they are reloads for 2536 /// instance) and they all interfere. 2537 /// 2538 /// vA is assigned R1 2539 /// vB is assigned R2 2540 /// vC tries to evict vA but vA is already done. 2541 /// Regular register allocation fails. 2542 /// 2543 /// Last chance recoloring kicks in: 2544 /// vC does as if vA was evicted => vC uses R1. 2545 /// vC is marked as fixed. 2546 /// vA needs to find a color. 2547 /// None are available. 2548 /// vA cannot evict vC: vC is a fixed virtual register now. 2549 /// vA does as if vB was evicted => vA uses R2. 2550 /// vB needs to find a color. 2551 /// R3 is available. 2552 /// Recoloring => vC = R1, vA = R2, vB = R3 2553 /// 2554 /// \p Order defines the preferred allocation order for \p VirtReg. 2555 /// \p NewRegs will contain any new virtual register that have been created 2556 /// (split, spill) during the process and that must be assigned. 2557 /// \p FixedRegisters contains all the virtual registers that cannot be 2558 /// recolored. 2559 /// \p Depth gives the current depth of the last chance recoloring. 2560 /// \return a physical register that can be used for VirtReg or ~0u if none 2561 /// exists. 2562 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg, 2563 AllocationOrder &Order, 2564 SmallVectorImpl<unsigned> &NewVRegs, 2565 SmallVirtRegSet &FixedRegisters, 2566 unsigned Depth) { 2567 LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n'); 2568 // Ranges must be Done. 2569 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && 2570 "Last chance recoloring should really be last chance"); 2571 // Set the max depth to LastChanceRecoloringMaxDepth. 2572 // We may want to reconsider that if we end up with a too large search space 2573 // for target with hundreds of registers. 2574 // Indeed, in that case we may want to cut the search space earlier. 2575 if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) { 2576 LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n"); 2577 CutOffInfo |= CO_Depth; 2578 return ~0u; 2579 } 2580 2581 // Set of Live intervals that will need to be recolored. 2582 SmallLISet RecoloringCandidates; 2583 // Record the original mapping virtual register to physical register in case 2584 // the recoloring fails. 2585 DenseMap<unsigned, unsigned> VirtRegToPhysReg; 2586 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in 2587 // this recoloring "session". 2588 FixedRegisters.insert(VirtReg.reg); 2589 SmallVector<unsigned, 4> CurrentNewVRegs; 2590 2591 Order.rewind(); 2592 while (unsigned PhysReg = Order.next()) { 2593 LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to " 2594 << printReg(PhysReg, TRI) << '\n'); 2595 RecoloringCandidates.clear(); 2596 VirtRegToPhysReg.clear(); 2597 CurrentNewVRegs.clear(); 2598 2599 // It is only possible to recolor virtual register interference. 2600 if (Matrix->checkInterference(VirtReg, PhysReg) > 2601 LiveRegMatrix::IK_VirtReg) { 2602 LLVM_DEBUG( 2603 dbgs() << "Some interferences are not with virtual registers.\n"); 2604 2605 continue; 2606 } 2607 2608 // Early give up on this PhysReg if it is obvious we cannot recolor all 2609 // the interferences. 2610 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates, 2611 FixedRegisters)) { 2612 LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n"); 2613 continue; 2614 } 2615 2616 // RecoloringCandidates contains all the virtual registers that interfer 2617 // with VirtReg on PhysReg (or one of its aliases). 2618 // Enqueue them for recoloring and perform the actual recoloring. 2619 PQueue RecoloringQueue; 2620 for (SmallLISet::iterator It = RecoloringCandidates.begin(), 2621 EndIt = RecoloringCandidates.end(); 2622 It != EndIt; ++It) { 2623 unsigned ItVirtReg = (*It)->reg; 2624 enqueue(RecoloringQueue, *It); 2625 assert(VRM->hasPhys(ItVirtReg) && 2626 "Interferences are supposed to be with allocated variables"); 2627 2628 // Record the current allocation. 2629 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg); 2630 // unset the related struct. 2631 Matrix->unassign(**It); 2632 } 2633 2634 // Do as if VirtReg was assigned to PhysReg so that the underlying 2635 // recoloring has the right information about the interferes and 2636 // available colors. 2637 Matrix->assign(VirtReg, PhysReg); 2638 2639 // Save the current recoloring state. 2640 // If we cannot recolor all the interferences, we will have to start again 2641 // at this point for the next physical register. 2642 SmallVirtRegSet SaveFixedRegisters(FixedRegisters); 2643 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs, 2644 FixedRegisters, Depth)) { 2645 // Push the queued vregs into the main queue. 2646 for (unsigned NewVReg : CurrentNewVRegs) 2647 NewVRegs.push_back(NewVReg); 2648 // Do not mess up with the global assignment process. 2649 // I.e., VirtReg must be unassigned. 2650 Matrix->unassign(VirtReg); 2651 return PhysReg; 2652 } 2653 2654 LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to " 2655 << printReg(PhysReg, TRI) << '\n'); 2656 2657 // The recoloring attempt failed, undo the changes. 2658 FixedRegisters = SaveFixedRegisters; 2659 Matrix->unassign(VirtReg); 2660 2661 // For a newly created vreg which is also in RecoloringCandidates, 2662 // don't add it to NewVRegs because its physical register will be restored 2663 // below. Other vregs in CurrentNewVRegs are created by calling 2664 // selectOrSplit and should be added into NewVRegs. 2665 for (SmallVectorImpl<unsigned>::iterator Next = CurrentNewVRegs.begin(), 2666 End = CurrentNewVRegs.end(); 2667 Next != End; ++Next) { 2668 if (RecoloringCandidates.count(&LIS->getInterval(*Next))) 2669 continue; 2670 NewVRegs.push_back(*Next); 2671 } 2672 2673 for (SmallLISet::iterator It = RecoloringCandidates.begin(), 2674 EndIt = RecoloringCandidates.end(); 2675 It != EndIt; ++It) { 2676 unsigned ItVirtReg = (*It)->reg; 2677 if (VRM->hasPhys(ItVirtReg)) 2678 Matrix->unassign(**It); 2679 unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg]; 2680 Matrix->assign(**It, ItPhysReg); 2681 } 2682 } 2683 2684 // Last chance recoloring did not worked either, give up. 2685 return ~0u; 2686 } 2687 2688 /// tryRecoloringCandidates - Try to assign a new color to every register 2689 /// in \RecoloringQueue. 2690 /// \p NewRegs will contain any new virtual register created during the 2691 /// recoloring process. 2692 /// \p FixedRegisters[in/out] contains all the registers that have been 2693 /// recolored. 2694 /// \return true if all virtual registers in RecoloringQueue were successfully 2695 /// recolored, false otherwise. 2696 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue, 2697 SmallVectorImpl<unsigned> &NewVRegs, 2698 SmallVirtRegSet &FixedRegisters, 2699 unsigned Depth) { 2700 while (!RecoloringQueue.empty()) { 2701 LiveInterval *LI = dequeue(RecoloringQueue); 2702 LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n'); 2703 unsigned PhysReg; 2704 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1); 2705 // When splitting happens, the live-range may actually be empty. 2706 // In that case, this is okay to continue the recoloring even 2707 // if we did not find an alternative color for it. Indeed, 2708 // there will not be anything to color for LI in the end. 2709 if (PhysReg == ~0u || (!PhysReg && !LI->empty())) 2710 return false; 2711 2712 if (!PhysReg) { 2713 assert(LI->empty() && "Only empty live-range do not require a register"); 2714 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI 2715 << " succeeded. Empty LI.\n"); 2716 continue; 2717 } 2718 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI 2719 << " succeeded with: " << printReg(PhysReg, TRI) << '\n'); 2720 2721 Matrix->assign(*LI, PhysReg); 2722 FixedRegisters.insert(LI->reg); 2723 } 2724 return true; 2725 } 2726 2727 //===----------------------------------------------------------------------===// 2728 // Main Entry Point 2729 //===----------------------------------------------------------------------===// 2730 2731 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 2732 SmallVectorImpl<unsigned> &NewVRegs) { 2733 CutOffInfo = CO_None; 2734 LLVMContext &Ctx = MF->getFunction().getContext(); 2735 SmallVirtRegSet FixedRegisters; 2736 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters); 2737 if (Reg == ~0U && (CutOffInfo != CO_None)) { 2738 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf); 2739 if (CutOffEncountered == CO_Depth) 2740 Ctx.emitError("register allocation failed: maximum depth for recoloring " 2741 "reached. Use -fexhaustive-register-search to skip " 2742 "cutoffs"); 2743 else if (CutOffEncountered == CO_Interf) 2744 Ctx.emitError("register allocation failed: maximum interference for " 2745 "recoloring reached. Use -fexhaustive-register-search " 2746 "to skip cutoffs"); 2747 else if (CutOffEncountered == (CO_Depth | CO_Interf)) 2748 Ctx.emitError("register allocation failed: maximum interference and " 2749 "depth for recoloring reached. Use " 2750 "-fexhaustive-register-search to skip cutoffs"); 2751 } 2752 return Reg; 2753 } 2754 2755 /// Using a CSR for the first time has a cost because it causes push|pop 2756 /// to be added to prologue|epilogue. Splitting a cold section of the live 2757 /// range can have lower cost than using the CSR for the first time; 2758 /// Spilling a live range in the cold path can have lower cost than using 2759 /// the CSR for the first time. Returns the physical register if we decide 2760 /// to use the CSR; otherwise return 0. 2761 unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg, 2762 AllocationOrder &Order, 2763 unsigned PhysReg, 2764 unsigned &CostPerUseLimit, 2765 SmallVectorImpl<unsigned> &NewVRegs) { 2766 if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) { 2767 // We choose spill over using the CSR for the first time if the spill cost 2768 // is lower than CSRCost. 2769 SA->analyze(&VirtReg); 2770 if (calcSpillCost() >= CSRCost) 2771 return PhysReg; 2772 2773 // We are going to spill, set CostPerUseLimit to 1 to make sure that 2774 // we will not use a callee-saved register in tryEvict. 2775 CostPerUseLimit = 1; 2776 return 0; 2777 } 2778 if (getStage(VirtReg) < RS_Split) { 2779 // We choose pre-splitting over using the CSR for the first time if 2780 // the cost of splitting is lower than CSRCost. 2781 SA->analyze(&VirtReg); 2782 unsigned NumCands = 0; 2783 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost. 2784 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost, 2785 NumCands, true /*IgnoreCSR*/); 2786 if (BestCand == NoCand) 2787 // Use the CSR if we can't find a region split below CSRCost. 2788 return PhysReg; 2789 2790 // Perform the actual pre-splitting. 2791 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs); 2792 return 0; 2793 } 2794 return PhysReg; 2795 } 2796 2797 void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) { 2798 // Do not keep invalid information around. 2799 SetOfBrokenHints.remove(&LI); 2800 } 2801 2802 void RAGreedy::initializeCSRCost() { 2803 // We use the larger one out of the command-line option and the value report 2804 // by TRI. 2805 CSRCost = BlockFrequency( 2806 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost())); 2807 if (!CSRCost.getFrequency()) 2808 return; 2809 2810 // Raw cost is relative to Entry == 2^14; scale it appropriately. 2811 uint64_t ActualEntry = MBFI->getEntryFreq(); 2812 if (!ActualEntry) { 2813 CSRCost = 0; 2814 return; 2815 } 2816 uint64_t FixedEntry = 1 << 14; 2817 if (ActualEntry < FixedEntry) 2818 CSRCost *= BranchProbability(ActualEntry, FixedEntry); 2819 else if (ActualEntry <= UINT32_MAX) 2820 // Invert the fraction and divide. 2821 CSRCost /= BranchProbability(FixedEntry, ActualEntry); 2822 else 2823 // Can't use BranchProbability in general, since it takes 32-bit numbers. 2824 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry); 2825 } 2826 2827 /// Collect the hint info for \p Reg. 2828 /// The results are stored into \p Out. 2829 /// \p Out is not cleared before being populated. 2830 void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) { 2831 for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) { 2832 if (!Instr.isFullCopy()) 2833 continue; 2834 // Look for the other end of the copy. 2835 unsigned OtherReg = Instr.getOperand(0).getReg(); 2836 if (OtherReg == Reg) { 2837 OtherReg = Instr.getOperand(1).getReg(); 2838 if (OtherReg == Reg) 2839 continue; 2840 } 2841 // Get the current assignment. 2842 unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg) 2843 ? OtherReg 2844 : VRM->getPhys(OtherReg); 2845 // Push the collected information. 2846 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg, 2847 OtherPhysReg)); 2848 } 2849 } 2850 2851 /// Using the given \p List, compute the cost of the broken hints if 2852 /// \p PhysReg was used. 2853 /// \return The cost of \p List for \p PhysReg. 2854 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, 2855 unsigned PhysReg) { 2856 BlockFrequency Cost = 0; 2857 for (const HintInfo &Info : List) { 2858 if (Info.PhysReg != PhysReg) 2859 Cost += Info.Freq; 2860 } 2861 return Cost; 2862 } 2863 2864 /// Using the register assigned to \p VirtReg, try to recolor 2865 /// all the live ranges that are copy-related with \p VirtReg. 2866 /// The recoloring is then propagated to all the live-ranges that have 2867 /// been recolored and so on, until no more copies can be coalesced or 2868 /// it is not profitable. 2869 /// For a given live range, profitability is determined by the sum of the 2870 /// frequencies of the non-identity copies it would introduce with the old 2871 /// and new register. 2872 void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { 2873 // We have a broken hint, check if it is possible to fix it by 2874 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted 2875 // some register and PhysReg may be available for the other live-ranges. 2876 SmallSet<unsigned, 4> Visited; 2877 SmallVector<unsigned, 2> RecoloringCandidates; 2878 HintsInfo Info; 2879 unsigned Reg = VirtReg.reg; 2880 unsigned PhysReg = VRM->getPhys(Reg); 2881 // Start the recoloring algorithm from the input live-interval, then 2882 // it will propagate to the ones that are copy-related with it. 2883 Visited.insert(Reg); 2884 RecoloringCandidates.push_back(Reg); 2885 2886 LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI) 2887 << '(' << printReg(PhysReg, TRI) << ")\n"); 2888 2889 do { 2890 Reg = RecoloringCandidates.pop_back_val(); 2891 2892 // We cannot recolor physical register. 2893 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 2894 continue; 2895 2896 assert(VRM->hasPhys(Reg) && "We have unallocated variable!!"); 2897 2898 // Get the live interval mapped with this virtual register to be able 2899 // to check for the interference with the new color. 2900 LiveInterval &LI = LIS->getInterval(Reg); 2901 unsigned CurrPhys = VRM->getPhys(Reg); 2902 // Check that the new color matches the register class constraints and 2903 // that it is free for this live range. 2904 if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) || 2905 Matrix->checkInterference(LI, PhysReg))) 2906 continue; 2907 2908 LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI) 2909 << ") is recolorable.\n"); 2910 2911 // Gather the hint info. 2912 Info.clear(); 2913 collectHintInfo(Reg, Info); 2914 // Check if recoloring the live-range will increase the cost of the 2915 // non-identity copies. 2916 if (CurrPhys != PhysReg) { 2917 LLVM_DEBUG(dbgs() << "Checking profitability:\n"); 2918 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys); 2919 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg); 2920 LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency() 2921 << "\nNew Cost: " << NewCopiesCost.getFrequency() 2922 << '\n'); 2923 if (OldCopiesCost < NewCopiesCost) { 2924 LLVM_DEBUG(dbgs() << "=> Not profitable.\n"); 2925 continue; 2926 } 2927 // At this point, the cost is either cheaper or equal. If it is 2928 // equal, we consider this is profitable because it may expose 2929 // more recoloring opportunities. 2930 LLVM_DEBUG(dbgs() << "=> Profitable.\n"); 2931 // Recolor the live-range. 2932 Matrix->unassign(LI); 2933 Matrix->assign(LI, PhysReg); 2934 } 2935 // Push all copy-related live-ranges to keep reconciling the broken 2936 // hints. 2937 for (const HintInfo &HI : Info) { 2938 if (Visited.insert(HI.Reg).second) 2939 RecoloringCandidates.push_back(HI.Reg); 2940 } 2941 } while (!RecoloringCandidates.empty()); 2942 } 2943 2944 /// Try to recolor broken hints. 2945 /// Broken hints may be repaired by recoloring when an evicted variable 2946 /// freed up a register for a larger live-range. 2947 /// Consider the following example: 2948 /// BB1: 2949 /// a = 2950 /// b = 2951 /// BB2: 2952 /// ... 2953 /// = b 2954 /// = a 2955 /// Let us assume b gets split: 2956 /// BB1: 2957 /// a = 2958 /// b = 2959 /// BB2: 2960 /// c = b 2961 /// ... 2962 /// d = c 2963 /// = d 2964 /// = a 2965 /// Because of how the allocation work, b, c, and d may be assigned different 2966 /// colors. Now, if a gets evicted later: 2967 /// BB1: 2968 /// a = 2969 /// st a, SpillSlot 2970 /// b = 2971 /// BB2: 2972 /// c = b 2973 /// ... 2974 /// d = c 2975 /// = d 2976 /// e = ld SpillSlot 2977 /// = e 2978 /// This is likely that we can assign the same register for b, c, and d, 2979 /// getting rid of 2 copies. 2980 void RAGreedy::tryHintsRecoloring() { 2981 for (LiveInterval *LI : SetOfBrokenHints) { 2982 assert(TargetRegisterInfo::isVirtualRegister(LI->reg) && 2983 "Recoloring is possible only for virtual registers"); 2984 // Some dead defs may be around (e.g., because of debug uses). 2985 // Ignore those. 2986 if (!VRM->hasPhys(LI->reg)) 2987 continue; 2988 tryHintRecoloring(*LI); 2989 } 2990 } 2991 2992 unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg, 2993 SmallVectorImpl<unsigned> &NewVRegs, 2994 SmallVirtRegSet &FixedRegisters, 2995 unsigned Depth) { 2996 unsigned CostPerUseLimit = ~0u; 2997 // First try assigning a free register. 2998 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix); 2999 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) { 3000 // If VirtReg got an assignment, the eviction info is no longre relevant. 3001 LastEvicted.clearEvicteeInfo(VirtReg.reg); 3002 // When NewVRegs is not empty, we may have made decisions such as evicting 3003 // a virtual register, go with the earlier decisions and use the physical 3004 // register. 3005 if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) && 3006 NewVRegs.empty()) { 3007 unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg, 3008 CostPerUseLimit, NewVRegs); 3009 if (CSRReg || !NewVRegs.empty()) 3010 // Return now if we decide to use a CSR or create new vregs due to 3011 // pre-splitting. 3012 return CSRReg; 3013 } else 3014 return PhysReg; 3015 } 3016 3017 LiveRangeStage Stage = getStage(VirtReg); 3018 LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade " 3019 << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 3020 3021 // Try to evict a less worthy live range, but only for ranges from the primary 3022 // queue. The RS_Split ranges already failed to do this, and they should not 3023 // get a second chance until they have been split. 3024 if (Stage != RS_Split) 3025 if (unsigned PhysReg = 3026 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit)) { 3027 unsigned Hint = MRI->getSimpleHint(VirtReg.reg); 3028 // If VirtReg has a hint and that hint is broken record this 3029 // virtual register as a recoloring candidate for broken hint. 3030 // Indeed, since we evicted a variable in its neighborhood it is 3031 // likely we can at least partially recolor some of the 3032 // copy-related live-ranges. 3033 if (Hint && Hint != PhysReg) 3034 SetOfBrokenHints.insert(&VirtReg); 3035 // If VirtReg eviction someone, the eviction info for it as an evictee is 3036 // no longre relevant. 3037 LastEvicted.clearEvicteeInfo(VirtReg.reg); 3038 return PhysReg; 3039 } 3040 3041 assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs"); 3042 3043 // The first time we see a live range, don't try to split or spill. 3044 // Wait until the second time, when all smaller ranges have been allocated. 3045 // This gives a better picture of the interference to split around. 3046 if (Stage < RS_Split) { 3047 setStage(VirtReg, RS_Split); 3048 LLVM_DEBUG(dbgs() << "wait for second round\n"); 3049 NewVRegs.push_back(VirtReg.reg); 3050 return 0; 3051 } 3052 3053 if (Stage < RS_Spill) { 3054 // Try splitting VirtReg or interferences. 3055 unsigned NewVRegSizeBefore = NewVRegs.size(); 3056 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 3057 if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) { 3058 // If VirtReg got split, the eviction info is no longre relevant. 3059 LastEvicted.clearEvicteeInfo(VirtReg.reg); 3060 return PhysReg; 3061 } 3062 } 3063 3064 // If we couldn't allocate a register from spilling, there is probably some 3065 // invalid inline assembly. The base class will report it. 3066 if (Stage >= RS_Done || !VirtReg.isSpillable()) 3067 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters, 3068 Depth); 3069 3070 // Finally spill VirtReg itself. 3071 if (EnableDeferredSpilling && getStage(VirtReg) < RS_Memory) { 3072 // TODO: This is experimental and in particular, we do not model 3073 // the live range splitting done by spilling correctly. 3074 // We would need a deep integration with the spiller to do the 3075 // right thing here. Anyway, that is still good for early testing. 3076 setStage(VirtReg, RS_Memory); 3077 LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n"); 3078 NewVRegs.push_back(VirtReg.reg); 3079 } else { 3080 NamedRegionTimer T("spill", "Spiller", TimerGroupName, 3081 TimerGroupDescription, TimePassesIsEnabled); 3082 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 3083 spiller().spill(LRE); 3084 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 3085 3086 if (VerifyEnabled) 3087 MF->verify(this, "After spilling"); 3088 } 3089 3090 // The live virtual register requesting allocation was spilled, so tell 3091 // the caller not to allocate anything during this round. 3092 return 0; 3093 } 3094 3095 void RAGreedy::reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads, 3096 unsigned &FoldedReloads, 3097 unsigned &Spills, 3098 unsigned &FoldedSpills) { 3099 Reloads = 0; 3100 FoldedReloads = 0; 3101 Spills = 0; 3102 FoldedSpills = 0; 3103 3104 // Sum up the spill and reloads in subloops. 3105 for (MachineLoop *SubLoop : *L) { 3106 unsigned SubReloads; 3107 unsigned SubFoldedReloads; 3108 unsigned SubSpills; 3109 unsigned SubFoldedSpills; 3110 3111 reportNumberOfSplillsReloads(SubLoop, SubReloads, SubFoldedReloads, 3112 SubSpills, SubFoldedSpills); 3113 Reloads += SubReloads; 3114 FoldedReloads += SubFoldedReloads; 3115 Spills += SubSpills; 3116 FoldedSpills += SubFoldedSpills; 3117 } 3118 3119 const MachineFrameInfo &MFI = MF->getFrameInfo(); 3120 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 3121 int FI; 3122 3123 for (MachineBasicBlock *MBB : L->getBlocks()) 3124 // Handle blocks that were not included in subloops. 3125 if (Loops->getLoopFor(MBB) == L) 3126 for (MachineInstr &MI : *MBB) { 3127 SmallVector<const MachineMemOperand *, 2> Accesses; 3128 auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) { 3129 return MFI.isSpillSlotObjectIndex( 3130 cast<FixedStackPseudoSourceValue>(A->getPseudoValue()) 3131 ->getFrameIndex()); 3132 }; 3133 3134 if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) 3135 ++Reloads; 3136 else if (TII->hasLoadFromStackSlot(MI, Accesses) && 3137 llvm::any_of(Accesses, isSpillSlotAccess)) 3138 ++FoldedReloads; 3139 else if (TII->isStoreToStackSlot(MI, FI) && 3140 MFI.isSpillSlotObjectIndex(FI)) 3141 ++Spills; 3142 else if (TII->hasStoreToStackSlot(MI, Accesses) && 3143 llvm::any_of(Accesses, isSpillSlotAccess)) 3144 ++FoldedSpills; 3145 } 3146 3147 if (Reloads || FoldedReloads || Spills || FoldedSpills) { 3148 using namespace ore; 3149 3150 ORE->emit([&]() { 3151 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReload", 3152 L->getStartLoc(), L->getHeader()); 3153 if (Spills) 3154 R << NV("NumSpills", Spills) << " spills "; 3155 if (FoldedSpills) 3156 R << NV("NumFoldedSpills", FoldedSpills) << " folded spills "; 3157 if (Reloads) 3158 R << NV("NumReloads", Reloads) << " reloads "; 3159 if (FoldedReloads) 3160 R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads "; 3161 R << "generated in loop"; 3162 return R; 3163 }); 3164 } 3165 } 3166 3167 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 3168 LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 3169 << "********** Function: " << mf.getName() << '\n'); 3170 3171 MF = &mf; 3172 TRI = MF->getSubtarget().getRegisterInfo(); 3173 TII = MF->getSubtarget().getInstrInfo(); 3174 RCI.runOnMachineFunction(mf); 3175 3176 EnableLocalReassign = EnableLocalReassignment || 3177 MF->getSubtarget().enableRALocalReassignment( 3178 MF->getTarget().getOptLevel()); 3179 3180 EnableAdvancedRASplitCost = ConsiderLocalIntervalCost || 3181 MF->getSubtarget().enableAdvancedRASplitCost(); 3182 3183 if (VerifyEnabled) 3184 MF->verify(this, "Before greedy register allocator"); 3185 3186 RegAllocBase::init(getAnalysis<VirtRegMap>(), 3187 getAnalysis<LiveIntervals>(), 3188 getAnalysis<LiveRegMatrix>()); 3189 Indexes = &getAnalysis<SlotIndexes>(); 3190 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 3191 DomTree = &getAnalysis<MachineDominatorTree>(); 3192 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 3193 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 3194 Loops = &getAnalysis<MachineLoopInfo>(); 3195 Bundles = &getAnalysis<EdgeBundles>(); 3196 SpillPlacer = &getAnalysis<SpillPlacement>(); 3197 DebugVars = &getAnalysis<LiveDebugVariables>(); 3198 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 3199 3200 initializeCSRCost(); 3201 3202 calculateSpillWeightsAndHints(*LIS, mf, VRM, *Loops, *MBFI); 3203 3204 LLVM_DEBUG(LIS->dump()); 3205 3206 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 3207 SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI)); 3208 ExtraRegInfo.clear(); 3209 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 3210 NextCascade = 1; 3211 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 3212 GlobalCand.resize(32); // This will grow as needed. 3213 SetOfBrokenHints.clear(); 3214 LastEvicted.clear(); 3215 3216 allocatePhysRegs(); 3217 tryHintsRecoloring(); 3218 postOptimization(); 3219 reportNumberOfSplillsReloads(); 3220 3221 releaseMemory(); 3222 return true; 3223 } 3224