1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AllocationOrder.h" 16 #include "InterferenceCache.h" 17 #include "LiveDebugVariables.h" 18 #include "RegAllocBase.h" 19 #include "SpillPlacement.h" 20 #include "Spiller.h" 21 #include "SplitKit.h" 22 #include "llvm/ADT/ArrayRef.h" 23 #include "llvm/ADT/BitVector.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/IndexedMap.h" 26 #include "llvm/ADT/SetVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/SmallVector.h" 30 #include "llvm/ADT/Statistic.h" 31 #include "llvm/ADT/StringRef.h" 32 #include "llvm/Analysis/AliasAnalysis.h" 33 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 34 #include "llvm/CodeGen/CalcSpillWeights.h" 35 #include "llvm/CodeGen/EdgeBundles.h" 36 #include "llvm/CodeGen/LiveInterval.h" 37 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 38 #include "llvm/CodeGen/LiveIntervalUnion.h" 39 #include "llvm/CodeGen/LiveRangeEdit.h" 40 #include "llvm/CodeGen/LiveRegMatrix.h" 41 #include "llvm/CodeGen/LiveStackAnalysis.h" 42 #include "llvm/CodeGen/MachineBasicBlock.h" 43 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 44 #include "llvm/CodeGen/MachineDominators.h" 45 #include "llvm/CodeGen/MachineFrameInfo.h" 46 #include "llvm/CodeGen/MachineFunction.h" 47 #include "llvm/CodeGen/MachineFunctionPass.h" 48 #include "llvm/CodeGen/MachineInstr.h" 49 #include "llvm/CodeGen/MachineLoopInfo.h" 50 #include "llvm/CodeGen/MachineOperand.h" 51 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h" 52 #include "llvm/CodeGen/MachineRegisterInfo.h" 53 #include "llvm/CodeGen/RegAllocRegistry.h" 54 #include "llvm/CodeGen/RegisterClassInfo.h" 55 #include "llvm/CodeGen/SlotIndexes.h" 56 #include "llvm/CodeGen/VirtRegMap.h" 57 #include "llvm/IR/Function.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/MC/MCRegisterInfo.h" 60 #include "llvm/Pass.h" 61 #include "llvm/Support/BlockFrequency.h" 62 #include "llvm/Support/BranchProbability.h" 63 #include "llvm/Support/CommandLine.h" 64 #include "llvm/Support/Debug.h" 65 #include "llvm/Support/MathExtras.h" 66 #include "llvm/Support/Timer.h" 67 #include "llvm/Support/raw_ostream.h" 68 #include "llvm/Target/TargetInstrInfo.h" 69 #include "llvm/Target/TargetMachine.h" 70 #include "llvm/Target/TargetRegisterInfo.h" 71 #include "llvm/Target/TargetSubtargetInfo.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <memory> 76 #include <queue> 77 #include <tuple> 78 #include <utility> 79 80 using namespace llvm; 81 82 #define DEBUG_TYPE "regalloc" 83 84 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 85 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 86 STATISTIC(NumEvicted, "Number of interferences evicted"); 87 88 static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode( 89 "split-spill-mode", cl::Hidden, 90 cl::desc("Spill mode for splitting live ranges"), 91 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 92 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 93 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")), 94 cl::init(SplitEditor::SM_Speed)); 95 96 static cl::opt<unsigned> 97 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, 98 cl::desc("Last chance recoloring max depth"), 99 cl::init(5)); 100 101 static cl::opt<unsigned> LastChanceRecoloringMaxInterference( 102 "lcr-max-interf", cl::Hidden, 103 cl::desc("Last chance recoloring maximum number of considered" 104 " interference at a time"), 105 cl::init(8)); 106 107 static cl::opt<bool> 108 ExhaustiveSearch("exhaustive-register-search", cl::NotHidden, 109 cl::desc("Exhaustive Search for registers bypassing the depth " 110 "and interference cutoffs of last chance recoloring")); 111 112 static cl::opt<bool> EnableLocalReassignment( 113 "enable-local-reassign", cl::Hidden, 114 cl::desc("Local reassignment can yield better allocation decisions, but " 115 "may be compile time intensive"), 116 cl::init(false)); 117 118 static cl::opt<bool> EnableDeferredSpilling( 119 "enable-deferred-spilling", cl::Hidden, 120 cl::desc("Instead of spilling a variable right away, defer the actual " 121 "code insertion to the end of the allocation. That way the " 122 "allocator might still find a suitable coloring for this " 123 "variable because of other evicted variables."), 124 cl::init(false)); 125 126 // FIXME: Find a good default for this flag and remove the flag. 127 static cl::opt<unsigned> 128 CSRFirstTimeCost("regalloc-csr-first-time-cost", 129 cl::desc("Cost for first time use of callee-saved register."), 130 cl::init(0), cl::Hidden); 131 132 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 133 createGreedyRegisterAllocator); 134 135 namespace { 136 137 class RAGreedy : public MachineFunctionPass, 138 public RegAllocBase, 139 private LiveRangeEdit::Delegate { 140 // Convenient shortcuts. 141 using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>; 142 using SmallLISet = SmallPtrSet<LiveInterval *, 4>; 143 using SmallVirtRegSet = SmallSet<unsigned, 16>; 144 145 // context 146 MachineFunction *MF; 147 148 // Shortcuts to some useful interface. 149 const TargetInstrInfo *TII; 150 const TargetRegisterInfo *TRI; 151 RegisterClassInfo RCI; 152 153 // analyses 154 SlotIndexes *Indexes; 155 MachineBlockFrequencyInfo *MBFI; 156 MachineDominatorTree *DomTree; 157 MachineLoopInfo *Loops; 158 MachineOptimizationRemarkEmitter *ORE; 159 EdgeBundles *Bundles; 160 SpillPlacement *SpillPlacer; 161 LiveDebugVariables *DebugVars; 162 AliasAnalysis *AA; 163 164 // state 165 std::unique_ptr<Spiller> SpillerInstance; 166 PQueue Queue; 167 unsigned NextCascade; 168 169 // Live ranges pass through a number of stages as we try to allocate them. 170 // Some of the stages may also create new live ranges: 171 // 172 // - Region splitting. 173 // - Per-block splitting. 174 // - Local splitting. 175 // - Spilling. 176 // 177 // Ranges produced by one of the stages skip the previous stages when they are 178 // dequeued. This improves performance because we can skip interference checks 179 // that are unlikely to give any results. It also guarantees that the live 180 // range splitting algorithm terminates, something that is otherwise hard to 181 // ensure. 182 enum LiveRangeStage { 183 /// Newly created live range that has never been queued. 184 RS_New, 185 186 /// Only attempt assignment and eviction. Then requeue as RS_Split. 187 RS_Assign, 188 189 /// Attempt live range splitting if assignment is impossible. 190 RS_Split, 191 192 /// Attempt more aggressive live range splitting that is guaranteed to make 193 /// progress. This is used for split products that may not be making 194 /// progress. 195 RS_Split2, 196 197 /// Live range will be spilled. No more splitting will be attempted. 198 RS_Spill, 199 200 201 /// Live range is in memory. Because of other evictions, it might get moved 202 /// in a register in the end. 203 RS_Memory, 204 205 /// There is nothing more we can do to this live range. Abort compilation 206 /// if it can't be assigned. 207 RS_Done 208 }; 209 210 // Enum CutOffStage to keep a track whether the register allocation failed 211 // because of the cutoffs encountered in last chance recoloring. 212 // Note: This is used as bitmask. New value should be next power of 2. 213 enum CutOffStage { 214 // No cutoffs encountered 215 CO_None = 0, 216 217 // lcr-max-depth cutoff encountered 218 CO_Depth = 1, 219 220 // lcr-max-interf cutoff encountered 221 CO_Interf = 2 222 }; 223 224 uint8_t CutOffInfo; 225 226 #ifndef NDEBUG 227 static const char *const StageName[]; 228 #endif 229 230 // RegInfo - Keep additional information about each live range. 231 struct RegInfo { 232 LiveRangeStage Stage = RS_New; 233 234 // Cascade - Eviction loop prevention. See canEvictInterference(). 235 unsigned Cascade = 0; 236 237 RegInfo() = default; 238 }; 239 240 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 241 242 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 243 return ExtraRegInfo[VirtReg.reg].Stage; 244 } 245 246 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 247 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 248 ExtraRegInfo[VirtReg.reg].Stage = Stage; 249 } 250 251 template<typename Iterator> 252 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 253 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 254 for (;Begin != End; ++Begin) { 255 unsigned Reg = *Begin; 256 if (ExtraRegInfo[Reg].Stage == RS_New) 257 ExtraRegInfo[Reg].Stage = NewStage; 258 } 259 } 260 261 /// Cost of evicting interference. 262 struct EvictionCost { 263 unsigned BrokenHints = 0; ///< Total number of broken hints. 264 float MaxWeight = 0; ///< Maximum spill weight evicted. 265 266 EvictionCost() = default; 267 268 bool isMax() const { return BrokenHints == ~0u; } 269 270 void setMax() { BrokenHints = ~0u; } 271 272 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } 273 274 bool operator<(const EvictionCost &O) const { 275 return std::tie(BrokenHints, MaxWeight) < 276 std::tie(O.BrokenHints, O.MaxWeight); 277 } 278 }; 279 280 // splitting state. 281 std::unique_ptr<SplitAnalysis> SA; 282 std::unique_ptr<SplitEditor> SE; 283 284 /// Cached per-block interference maps 285 InterferenceCache IntfCache; 286 287 /// All basic blocks where the current register has uses. 288 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 289 290 /// Global live range splitting candidate info. 291 struct GlobalSplitCandidate { 292 // Register intended for assignment, or 0. 293 unsigned PhysReg; 294 295 // SplitKit interval index for this candidate. 296 unsigned IntvIdx; 297 298 // Interference for PhysReg. 299 InterferenceCache::Cursor Intf; 300 301 // Bundles where this candidate should be live. 302 BitVector LiveBundles; 303 SmallVector<unsigned, 8> ActiveBlocks; 304 305 void reset(InterferenceCache &Cache, unsigned Reg) { 306 PhysReg = Reg; 307 IntvIdx = 0; 308 Intf.setPhysReg(Cache, Reg); 309 LiveBundles.clear(); 310 ActiveBlocks.clear(); 311 } 312 313 // Set B[i] = C for every live bundle where B[i] was NoCand. 314 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 315 unsigned Count = 0; 316 for (unsigned i : LiveBundles.set_bits()) 317 if (B[i] == NoCand) { 318 B[i] = C; 319 Count++; 320 } 321 return Count; 322 } 323 }; 324 325 /// Candidate info for each PhysReg in AllocationOrder. 326 /// This vector never shrinks, but grows to the size of the largest register 327 /// class. 328 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 329 330 enum : unsigned { NoCand = ~0u }; 331 332 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 333 /// NoCand which indicates the stack interval. 334 SmallVector<unsigned, 32> BundleCand; 335 336 /// Callee-save register cost, calculated once per machine function. 337 BlockFrequency CSRCost; 338 339 /// Run or not the local reassignment heuristic. This information is 340 /// obtained from the TargetSubtargetInfo. 341 bool EnableLocalReassign; 342 343 /// Set of broken hints that may be reconciled later because of eviction. 344 SmallSetVector<LiveInterval *, 8> SetOfBrokenHints; 345 346 public: 347 RAGreedy(); 348 349 /// Return the pass name. 350 StringRef getPassName() const override { return "Greedy Register Allocator"; } 351 352 /// RAGreedy analysis usage. 353 void getAnalysisUsage(AnalysisUsage &AU) const override; 354 void releaseMemory() override; 355 Spiller &spiller() override { return *SpillerInstance; } 356 void enqueue(LiveInterval *LI) override; 357 LiveInterval *dequeue() override; 358 unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override; 359 void aboutToRemoveInterval(LiveInterval &) override; 360 361 /// Perform register allocation. 362 bool runOnMachineFunction(MachineFunction &mf) override; 363 364 MachineFunctionProperties getRequiredProperties() const override { 365 return MachineFunctionProperties().set( 366 MachineFunctionProperties::Property::NoPHIs); 367 } 368 369 static char ID; 370 371 private: 372 unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &, 373 SmallVirtRegSet &, unsigned = 0); 374 375 bool LRE_CanEraseVirtReg(unsigned) override; 376 void LRE_WillShrinkVirtReg(unsigned) override; 377 void LRE_DidCloneVirtReg(unsigned, unsigned) override; 378 void enqueue(PQueue &CurQueue, LiveInterval *LI); 379 LiveInterval *dequeue(PQueue &CurQueue); 380 381 BlockFrequency calcSpillCost(); 382 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); 383 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 384 void growRegion(GlobalSplitCandidate &Cand); 385 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&); 386 bool calcCompactRegion(GlobalSplitCandidate&); 387 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 388 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 389 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg); 390 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 391 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 392 void evictInterference(LiveInterval&, unsigned, 393 SmallVectorImpl<unsigned>&); 394 bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg, 395 SmallLISet &RecoloringCandidates, 396 const SmallVirtRegSet &FixedRegisters); 397 398 unsigned tryAssign(LiveInterval&, AllocationOrder&, 399 SmallVectorImpl<unsigned>&); 400 unsigned tryEvict(LiveInterval&, AllocationOrder&, 401 SmallVectorImpl<unsigned>&, unsigned = ~0u); 402 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 403 SmallVectorImpl<unsigned>&); 404 /// Calculate cost of region splitting. 405 unsigned calculateRegionSplitCost(LiveInterval &VirtReg, 406 AllocationOrder &Order, 407 BlockFrequency &BestCost, 408 unsigned &NumCands, bool IgnoreCSR); 409 /// Perform region splitting. 410 unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, 411 bool HasCompact, 412 SmallVectorImpl<unsigned> &NewVRegs); 413 /// Check other options before using a callee-saved register for the first 414 /// time. 415 unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order, 416 unsigned PhysReg, unsigned &CostPerUseLimit, 417 SmallVectorImpl<unsigned> &NewVRegs); 418 void initializeCSRCost(); 419 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 420 SmallVectorImpl<unsigned>&); 421 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 422 SmallVectorImpl<unsigned>&); 423 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 424 SmallVectorImpl<unsigned>&); 425 unsigned trySplit(LiveInterval&, AllocationOrder&, 426 SmallVectorImpl<unsigned>&); 427 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &, 428 SmallVectorImpl<unsigned> &, 429 SmallVirtRegSet &, unsigned); 430 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &, 431 SmallVirtRegSet &, unsigned); 432 void tryHintRecoloring(LiveInterval &); 433 void tryHintsRecoloring(); 434 435 /// Model the information carried by one end of a copy. 436 struct HintInfo { 437 /// The frequency of the copy. 438 BlockFrequency Freq; 439 /// The virtual register or physical register. 440 unsigned Reg; 441 /// Its currently assigned register. 442 /// In case of a physical register Reg == PhysReg. 443 unsigned PhysReg; 444 445 HintInfo(BlockFrequency Freq, unsigned Reg, unsigned PhysReg) 446 : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {} 447 }; 448 using HintsInfo = SmallVector<HintInfo, 4>; 449 450 BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned); 451 void collectHintInfo(unsigned, HintsInfo &); 452 453 bool isUnusedCalleeSavedReg(unsigned PhysReg) const; 454 455 /// Compute and report the number of spills and reloads for a loop. 456 void reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads, 457 unsigned &FoldedReloads, unsigned &Spills, 458 unsigned &FoldedSpills); 459 460 /// Report the number of spills and reloads for each loop. 461 void reportNumberOfSplillsReloads() { 462 for (MachineLoop *L : *Loops) { 463 unsigned Reloads, FoldedReloads, Spills, FoldedSpills; 464 reportNumberOfSplillsReloads(L, Reloads, FoldedReloads, Spills, 465 FoldedSpills); 466 } 467 } 468 }; 469 470 } // end anonymous namespace 471 472 char RAGreedy::ID = 0; 473 char &llvm::RAGreedyID = RAGreedy::ID; 474 475 INITIALIZE_PASS_BEGIN(RAGreedy, "greedy", 476 "Greedy Register Allocator", false, false) 477 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) 478 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) 479 INITIALIZE_PASS_DEPENDENCY(LiveIntervals) 480 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer) 481 INITIALIZE_PASS_DEPENDENCY(MachineScheduler) 482 INITIALIZE_PASS_DEPENDENCY(LiveStacks) 483 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 484 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 485 INITIALIZE_PASS_DEPENDENCY(VirtRegMap) 486 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix) 487 INITIALIZE_PASS_DEPENDENCY(EdgeBundles) 488 INITIALIZE_PASS_DEPENDENCY(SpillPlacement) 489 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass) 490 INITIALIZE_PASS_END(RAGreedy, "greedy", 491 "Greedy Register Allocator", false, false) 492 493 #ifndef NDEBUG 494 const char *const RAGreedy::StageName[] = { 495 "RS_New", 496 "RS_Assign", 497 "RS_Split", 498 "RS_Split2", 499 "RS_Spill", 500 "RS_Memory", 501 "RS_Done" 502 }; 503 #endif 504 505 // Hysteresis to use when comparing floats. 506 // This helps stabilize decisions based on float comparisons. 507 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875 508 509 FunctionPass* llvm::createGreedyRegisterAllocator() { 510 return new RAGreedy(); 511 } 512 513 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 514 } 515 516 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 517 AU.setPreservesCFG(); 518 AU.addRequired<MachineBlockFrequencyInfo>(); 519 AU.addPreserved<MachineBlockFrequencyInfo>(); 520 AU.addRequired<AAResultsWrapperPass>(); 521 AU.addPreserved<AAResultsWrapperPass>(); 522 AU.addRequired<LiveIntervals>(); 523 AU.addPreserved<LiveIntervals>(); 524 AU.addRequired<SlotIndexes>(); 525 AU.addPreserved<SlotIndexes>(); 526 AU.addRequired<LiveDebugVariables>(); 527 AU.addPreserved<LiveDebugVariables>(); 528 AU.addRequired<LiveStacks>(); 529 AU.addPreserved<LiveStacks>(); 530 AU.addRequired<MachineDominatorTree>(); 531 AU.addPreserved<MachineDominatorTree>(); 532 AU.addRequired<MachineLoopInfo>(); 533 AU.addPreserved<MachineLoopInfo>(); 534 AU.addRequired<VirtRegMap>(); 535 AU.addPreserved<VirtRegMap>(); 536 AU.addRequired<LiveRegMatrix>(); 537 AU.addPreserved<LiveRegMatrix>(); 538 AU.addRequired<EdgeBundles>(); 539 AU.addRequired<SpillPlacement>(); 540 AU.addRequired<MachineOptimizationRemarkEmitterPass>(); 541 MachineFunctionPass::getAnalysisUsage(AU); 542 } 543 544 //===----------------------------------------------------------------------===// 545 // LiveRangeEdit delegate methods 546 //===----------------------------------------------------------------------===// 547 548 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 549 LiveInterval &LI = LIS->getInterval(VirtReg); 550 if (VRM->hasPhys(VirtReg)) { 551 Matrix->unassign(LI); 552 aboutToRemoveInterval(LI); 553 return true; 554 } 555 // Unassigned virtreg is probably in the priority queue. 556 // RegAllocBase will erase it after dequeueing. 557 // Nonetheless, clear the live-range so that the debug 558 // dump will show the right state for that VirtReg. 559 LI.clear(); 560 return false; 561 } 562 563 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 564 if (!VRM->hasPhys(VirtReg)) 565 return; 566 567 // Register is assigned, put it back on the queue for reassignment. 568 LiveInterval &LI = LIS->getInterval(VirtReg); 569 Matrix->unassign(LI); 570 enqueue(&LI); 571 } 572 573 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 574 // Cloning a register we haven't even heard about yet? Just ignore it. 575 if (!ExtraRegInfo.inBounds(Old)) 576 return; 577 578 // LRE may clone a virtual register because dead code elimination causes it to 579 // be split into connected components. The new components are much smaller 580 // than the original, so they should get a new chance at being assigned. 581 // same stage as the parent. 582 ExtraRegInfo[Old].Stage = RS_Assign; 583 ExtraRegInfo.grow(New); 584 ExtraRegInfo[New] = ExtraRegInfo[Old]; 585 } 586 587 void RAGreedy::releaseMemory() { 588 SpillerInstance.reset(); 589 ExtraRegInfo.clear(); 590 GlobalCand.clear(); 591 } 592 593 void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); } 594 595 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) { 596 // Prioritize live ranges by size, assigning larger ranges first. 597 // The queue holds (size, reg) pairs. 598 const unsigned Size = LI->getSize(); 599 const unsigned Reg = LI->reg; 600 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 601 "Can only enqueue virtual registers"); 602 unsigned Prio; 603 604 ExtraRegInfo.grow(Reg); 605 if (ExtraRegInfo[Reg].Stage == RS_New) 606 ExtraRegInfo[Reg].Stage = RS_Assign; 607 608 if (ExtraRegInfo[Reg].Stage == RS_Split) { 609 // Unsplit ranges that couldn't be allocated immediately are deferred until 610 // everything else has been allocated. 611 Prio = Size; 612 } else if (ExtraRegInfo[Reg].Stage == RS_Memory) { 613 // Memory operand should be considered last. 614 // Change the priority such that Memory operand are assigned in 615 // the reverse order that they came in. 616 // TODO: Make this a member variable and probably do something about hints. 617 static unsigned MemOp = 0; 618 Prio = MemOp++; 619 } else { 620 // Giant live ranges fall back to the global assignment heuristic, which 621 // prevents excessive spilling in pathological cases. 622 bool ReverseLocal = TRI->reverseLocalAssignment(); 623 const TargetRegisterClass &RC = *MRI->getRegClass(Reg); 624 bool ForceGlobal = !ReverseLocal && 625 (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs()); 626 627 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() && 628 LIS->intervalIsInOneMBB(*LI)) { 629 // Allocate original local ranges in linear instruction order. Since they 630 // are singly defined, this produces optimal coloring in the absence of 631 // global interference and other constraints. 632 if (!ReverseLocal) 633 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 634 else { 635 // Allocating bottom up may allow many short LRGs to be assigned first 636 // to one of the cheap registers. This could be much faster for very 637 // large blocks on targets with many physical registers. 638 Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex()); 639 } 640 Prio |= RC.AllocationPriority << 24; 641 } else { 642 // Allocate global and split ranges in long->short order. Long ranges that 643 // don't fit should be spilled (or split) ASAP so they don't create 644 // interference. Mark a bit to prioritize global above local ranges. 645 Prio = (1u << 29) + Size; 646 } 647 // Mark a higher bit to prioritize global and local above RS_Split. 648 Prio |= (1u << 31); 649 650 // Boost ranges that have a physical register hint. 651 if (VRM->hasKnownPreference(Reg)) 652 Prio |= (1u << 30); 653 } 654 // The virtual register number is a tie breaker for same-sized ranges. 655 // Give lower vreg numbers higher priority to assign them first. 656 CurQueue.push(std::make_pair(Prio, ~Reg)); 657 } 658 659 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); } 660 661 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) { 662 if (CurQueue.empty()) 663 return nullptr; 664 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second); 665 CurQueue.pop(); 666 return LI; 667 } 668 669 //===----------------------------------------------------------------------===// 670 // Direct Assignment 671 //===----------------------------------------------------------------------===// 672 673 /// tryAssign - Try to assign VirtReg to an available register. 674 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 675 AllocationOrder &Order, 676 SmallVectorImpl<unsigned> &NewVRegs) { 677 Order.rewind(); 678 unsigned PhysReg; 679 while ((PhysReg = Order.next())) 680 if (!Matrix->checkInterference(VirtReg, PhysReg)) 681 break; 682 if (!PhysReg || Order.isHint()) 683 return PhysReg; 684 685 // PhysReg is available, but there may be a better choice. 686 687 // If we missed a simple hint, try to cheaply evict interference from the 688 // preferred register. 689 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 690 if (Order.isHint(Hint)) { 691 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 692 EvictionCost MaxCost; 693 MaxCost.setBrokenHints(1); 694 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 695 evictInterference(VirtReg, Hint, NewVRegs); 696 return Hint; 697 } 698 // Record the missed hint, we may be able to recover 699 // at the end if the surrounding allocation changed. 700 SetOfBrokenHints.insert(&VirtReg); 701 } 702 703 // Try to evict interference from a cheaper alternative. 704 unsigned Cost = TRI->getCostPerUse(PhysReg); 705 706 // Most registers have 0 additional cost. 707 if (!Cost) 708 return PhysReg; 709 710 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 711 << '\n'); 712 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 713 return CheapReg ? CheapReg : PhysReg; 714 } 715 716 //===----------------------------------------------------------------------===// 717 // Interference eviction 718 //===----------------------------------------------------------------------===// 719 720 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) { 721 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix); 722 unsigned PhysReg; 723 while ((PhysReg = Order.next())) { 724 if (PhysReg == PrevReg) 725 continue; 726 727 MCRegUnitIterator Units(PhysReg, TRI); 728 for (; Units.isValid(); ++Units) { 729 // Instantiate a "subquery", not to be confused with the Queries array. 730 LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]); 731 if (subQ.checkInterference()) 732 break; 733 } 734 // If no units have interference, break out with the current PhysReg. 735 if (!Units.isValid()) 736 break; 737 } 738 if (PhysReg) 739 DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 740 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI) 741 << '\n'); 742 return PhysReg; 743 } 744 745 /// shouldEvict - determine if A should evict the assigned live range B. The 746 /// eviction policy defined by this function together with the allocation order 747 /// defined by enqueue() decides which registers ultimately end up being split 748 /// and spilled. 749 /// 750 /// Cascade numbers are used to prevent infinite loops if this function is a 751 /// cyclic relation. 752 /// 753 /// @param A The live range to be assigned. 754 /// @param IsHint True when A is about to be assigned to its preferred 755 /// register. 756 /// @param B The live range to be evicted. 757 /// @param BreaksHint True when B is already assigned to its preferred register. 758 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 759 LiveInterval &B, bool BreaksHint) { 760 bool CanSplit = getStage(B) < RS_Spill; 761 762 // Be fairly aggressive about following hints as long as the evictee can be 763 // split. 764 if (CanSplit && IsHint && !BreaksHint) 765 return true; 766 767 if (A.weight > B.weight) { 768 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n'); 769 return true; 770 } 771 return false; 772 } 773 774 /// canEvictInterference - Return true if all interferences between VirtReg and 775 /// PhysReg can be evicted. 776 /// 777 /// @param VirtReg Live range that is about to be assigned. 778 /// @param PhysReg Desired register for assignment. 779 /// @param IsHint True when PhysReg is VirtReg's preferred register. 780 /// @param MaxCost Only look for cheaper candidates and update with new cost 781 /// when returning true. 782 /// @returns True when interference can be evicted cheaper than MaxCost. 783 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 784 bool IsHint, EvictionCost &MaxCost) { 785 // It is only possible to evict virtual register interference. 786 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 787 return false; 788 789 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); 790 791 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 792 // involved in an eviction before. If a cascade number was assigned, deny 793 // evicting anything with the same or a newer cascade number. This prevents 794 // infinite eviction loops. 795 // 796 // This works out so a register without a cascade number is allowed to evict 797 // anything, and it can be evicted by anything. 798 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 799 if (!Cascade) 800 Cascade = NextCascade; 801 802 EvictionCost Cost; 803 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 804 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 805 // If there is 10 or more interferences, chances are one is heavier. 806 if (Q.collectInterferingVRegs(10) >= 10) 807 return false; 808 809 // Check if any interfering live range is heavier than MaxWeight. 810 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 811 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 812 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 813 "Only expecting virtual register interference from query"); 814 // Never evict spill products. They cannot split or spill. 815 if (getStage(*Intf) == RS_Done) 816 return false; 817 // Once a live range becomes small enough, it is urgent that we find a 818 // register for it. This is indicated by an infinite spill weight. These 819 // urgent live ranges get to evict almost anything. 820 // 821 // Also allow urgent evictions of unspillable ranges from a strictly 822 // larger allocation order. 823 bool Urgent = !VirtReg.isSpillable() && 824 (Intf->isSpillable() || 825 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 826 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 827 // Only evict older cascades or live ranges without a cascade. 828 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 829 if (Cascade <= IntfCascade) { 830 if (!Urgent) 831 return false; 832 // We permit breaking cascades for urgent evictions. It should be the 833 // last resort, though, so make it really expensive. 834 Cost.BrokenHints += 10; 835 } 836 // Would this break a satisfied hint? 837 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 838 // Update eviction cost. 839 Cost.BrokenHints += BreaksHint; 840 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 841 // Abort if this would be too expensive. 842 if (!(Cost < MaxCost)) 843 return false; 844 if (Urgent) 845 continue; 846 // Apply the eviction policy for non-urgent evictions. 847 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 848 return false; 849 // If !MaxCost.isMax(), then we're just looking for a cheap register. 850 // Evicting another local live range in this case could lead to suboptimal 851 // coloring. 852 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && 853 (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) { 854 return false; 855 } 856 } 857 } 858 MaxCost = Cost; 859 return true; 860 } 861 862 /// evictInterference - Evict any interferring registers that prevent VirtReg 863 /// from being assigned to Physreg. This assumes that canEvictInterference 864 /// returned true. 865 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 866 SmallVectorImpl<unsigned> &NewVRegs) { 867 // Make sure that VirtReg has a cascade number, and assign that cascade 868 // number to every evicted register. These live ranges than then only be 869 // evicted by a newer cascade, preventing infinite loops. 870 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 871 if (!Cascade) 872 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 873 874 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 875 << " interference: Cascade " << Cascade << '\n'); 876 877 // Collect all interfering virtregs first. 878 SmallVector<LiveInterval*, 8> Intfs; 879 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 880 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 881 // We usually have the interfering VRegs cached so collectInterferingVRegs() 882 // should be fast, we may need to recalculate if when different physregs 883 // overlap the same register unit so we had different SubRanges queried 884 // against it. 885 Q.collectInterferingVRegs(); 886 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 887 Intfs.append(IVR.begin(), IVR.end()); 888 } 889 890 // Evict them second. This will invalidate the queries. 891 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 892 LiveInterval *Intf = Intfs[i]; 893 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 894 if (!VRM->hasPhys(Intf->reg)) 895 continue; 896 Matrix->unassign(*Intf); 897 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 898 VirtReg.isSpillable() < Intf->isSpillable()) && 899 "Cannot decrease cascade number, illegal eviction"); 900 ExtraRegInfo[Intf->reg].Cascade = Cascade; 901 ++NumEvicted; 902 NewVRegs.push_back(Intf->reg); 903 } 904 } 905 906 /// Returns true if the given \p PhysReg is a callee saved register and has not 907 /// been used for allocation yet. 908 bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const { 909 unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg); 910 if (CSR == 0) 911 return false; 912 913 return !Matrix->isPhysRegUsed(PhysReg); 914 } 915 916 /// tryEvict - Try to evict all interferences for a physreg. 917 /// @param VirtReg Currently unassigned virtual register. 918 /// @param Order Physregs to try. 919 /// @return Physreg to assign VirtReg, or 0. 920 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 921 AllocationOrder &Order, 922 SmallVectorImpl<unsigned> &NewVRegs, 923 unsigned CostPerUseLimit) { 924 NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription, 925 TimePassesIsEnabled); 926 927 // Keep track of the cheapest interference seen so far. 928 EvictionCost BestCost; 929 BestCost.setMax(); 930 unsigned BestPhys = 0; 931 unsigned OrderLimit = Order.getOrder().size(); 932 933 // When we are just looking for a reduced cost per use, don't break any 934 // hints, and only evict smaller spill weights. 935 if (CostPerUseLimit < ~0u) { 936 BestCost.BrokenHints = 0; 937 BestCost.MaxWeight = VirtReg.weight; 938 939 // Check of any registers in RC are below CostPerUseLimit. 940 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg); 941 unsigned MinCost = RegClassInfo.getMinCost(RC); 942 if (MinCost >= CostPerUseLimit) { 943 DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = " << MinCost 944 << ", no cheaper registers to be found.\n"); 945 return 0; 946 } 947 948 // It is normal for register classes to have a long tail of registers with 949 // the same cost. We don't need to look at them if they're too expensive. 950 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) { 951 OrderLimit = RegClassInfo.getLastCostChange(RC); 952 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n"); 953 } 954 } 955 956 Order.rewind(); 957 while (unsigned PhysReg = Order.next(OrderLimit)) { 958 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 959 continue; 960 // The first use of a callee-saved register in a function has cost 1. 961 // Don't start using a CSR when the CostPerUseLimit is low. 962 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) { 963 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 964 << PrintReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI) 965 << '\n'); 966 continue; 967 } 968 969 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 970 continue; 971 972 // Best so far. 973 BestPhys = PhysReg; 974 975 // Stop if the hint can be used. 976 if (Order.isHint()) 977 break; 978 } 979 980 if (!BestPhys) 981 return 0; 982 983 evictInterference(VirtReg, BestPhys, NewVRegs); 984 return BestPhys; 985 } 986 987 //===----------------------------------------------------------------------===// 988 // Region Splitting 989 //===----------------------------------------------------------------------===// 990 991 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 992 /// interference pattern in Physreg and its aliases. Add the constraints to 993 /// SpillPlacement and return the static cost of this split in Cost, assuming 994 /// that all preferences in SplitConstraints are met. 995 /// Return false if there are no bundles with positive bias. 996 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 997 BlockFrequency &Cost) { 998 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 999 1000 // Reset interference dependent info. 1001 SplitConstraints.resize(UseBlocks.size()); 1002 BlockFrequency StaticCost = 0; 1003 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1004 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1005 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1006 1007 BC.Number = BI.MBB->getNumber(); 1008 Intf.moveToBlock(BC.Number); 1009 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 1010 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 1011 BC.ChangesValue = BI.FirstDef.isValid(); 1012 1013 if (!Intf.hasInterference()) 1014 continue; 1015 1016 // Number of spill code instructions to insert. 1017 unsigned Ins = 0; 1018 1019 // Interference for the live-in value. 1020 if (BI.LiveIn) { 1021 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) { 1022 BC.Entry = SpillPlacement::MustSpill; 1023 ++Ins; 1024 } else if (Intf.first() < BI.FirstInstr) { 1025 BC.Entry = SpillPlacement::PrefSpill; 1026 ++Ins; 1027 } else if (Intf.first() < BI.LastInstr) { 1028 ++Ins; 1029 } 1030 } 1031 1032 // Interference for the live-out value. 1033 if (BI.LiveOut) { 1034 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) { 1035 BC.Exit = SpillPlacement::MustSpill; 1036 ++Ins; 1037 } else if (Intf.last() > BI.LastInstr) { 1038 BC.Exit = SpillPlacement::PrefSpill; 1039 ++Ins; 1040 } else if (Intf.last() > BI.FirstInstr) { 1041 ++Ins; 1042 } 1043 } 1044 1045 // Accumulate the total frequency of inserted spill code. 1046 while (Ins--) 1047 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 1048 } 1049 Cost = StaticCost; 1050 1051 // Add constraints for use-blocks. Note that these are the only constraints 1052 // that may add a positive bias, it is downhill from here. 1053 SpillPlacer->addConstraints(SplitConstraints); 1054 return SpillPlacer->scanActiveBundles(); 1055 } 1056 1057 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 1058 /// live-through blocks in Blocks. 1059 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 1060 ArrayRef<unsigned> Blocks) { 1061 const unsigned GroupSize = 8; 1062 SpillPlacement::BlockConstraint BCS[GroupSize]; 1063 unsigned TBS[GroupSize]; 1064 unsigned B = 0, T = 0; 1065 1066 for (unsigned i = 0; i != Blocks.size(); ++i) { 1067 unsigned Number = Blocks[i]; 1068 Intf.moveToBlock(Number); 1069 1070 if (!Intf.hasInterference()) { 1071 assert(T < GroupSize && "Array overflow"); 1072 TBS[T] = Number; 1073 if (++T == GroupSize) { 1074 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 1075 T = 0; 1076 } 1077 continue; 1078 } 1079 1080 assert(B < GroupSize && "Array overflow"); 1081 BCS[B].Number = Number; 1082 1083 // Interference for the live-in value. 1084 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 1085 BCS[B].Entry = SpillPlacement::MustSpill; 1086 else 1087 BCS[B].Entry = SpillPlacement::PrefSpill; 1088 1089 // Interference for the live-out value. 1090 if (Intf.last() >= SA->getLastSplitPoint(Number)) 1091 BCS[B].Exit = SpillPlacement::MustSpill; 1092 else 1093 BCS[B].Exit = SpillPlacement::PrefSpill; 1094 1095 if (++B == GroupSize) { 1096 SpillPlacer->addConstraints(makeArrayRef(BCS, B)); 1097 B = 0; 1098 } 1099 } 1100 1101 SpillPlacer->addConstraints(makeArrayRef(BCS, B)); 1102 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 1103 } 1104 1105 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 1106 // Keep track of through blocks that have not been added to SpillPlacer. 1107 BitVector Todo = SA->getThroughBlocks(); 1108 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 1109 unsigned AddedTo = 0; 1110 #ifndef NDEBUG 1111 unsigned Visited = 0; 1112 #endif 1113 1114 while (true) { 1115 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 1116 // Find new through blocks in the periphery of PrefRegBundles. 1117 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 1118 unsigned Bundle = NewBundles[i]; 1119 // Look at all blocks connected to Bundle in the full graph. 1120 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 1121 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 1122 I != E; ++I) { 1123 unsigned Block = *I; 1124 if (!Todo.test(Block)) 1125 continue; 1126 Todo.reset(Block); 1127 // This is a new through block. Add it to SpillPlacer later. 1128 ActiveBlocks.push_back(Block); 1129 #ifndef NDEBUG 1130 ++Visited; 1131 #endif 1132 } 1133 } 1134 // Any new blocks to add? 1135 if (ActiveBlocks.size() == AddedTo) 1136 break; 1137 1138 // Compute through constraints from the interference, or assume that all 1139 // through blocks prefer spilling when forming compact regions. 1140 auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 1141 if (Cand.PhysReg) 1142 addThroughConstraints(Cand.Intf, NewBlocks); 1143 else 1144 // Provide a strong negative bias on through blocks to prevent unwanted 1145 // liveness on loop backedges. 1146 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 1147 AddedTo = ActiveBlocks.size(); 1148 1149 // Perhaps iterating can enable more bundles? 1150 SpillPlacer->iterate(); 1151 } 1152 DEBUG(dbgs() << ", v=" << Visited); 1153 } 1154 1155 /// calcCompactRegion - Compute the set of edge bundles that should be live 1156 /// when splitting the current live range into compact regions. Compact 1157 /// regions can be computed without looking at interference. They are the 1158 /// regions formed by removing all the live-through blocks from the live range. 1159 /// 1160 /// Returns false if the current live range is already compact, or if the 1161 /// compact regions would form single block regions anyway. 1162 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 1163 // Without any through blocks, the live range is already compact. 1164 if (!SA->getNumThroughBlocks()) 1165 return false; 1166 1167 // Compact regions don't correspond to any physreg. 1168 Cand.reset(IntfCache, 0); 1169 1170 DEBUG(dbgs() << "Compact region bundles"); 1171 1172 // Use the spill placer to determine the live bundles. GrowRegion pretends 1173 // that all the through blocks have interference when PhysReg is unset. 1174 SpillPlacer->prepare(Cand.LiveBundles); 1175 1176 // The static split cost will be zero since Cand.Intf reports no interference. 1177 BlockFrequency Cost; 1178 if (!addSplitConstraints(Cand.Intf, Cost)) { 1179 DEBUG(dbgs() << ", none.\n"); 1180 return false; 1181 } 1182 1183 growRegion(Cand); 1184 SpillPlacer->finish(); 1185 1186 if (!Cand.LiveBundles.any()) { 1187 DEBUG(dbgs() << ", none.\n"); 1188 return false; 1189 } 1190 1191 DEBUG({ 1192 for (int i : Cand.LiveBundles.set_bits()) 1193 dbgs() << " EB#" << i; 1194 dbgs() << ".\n"; 1195 }); 1196 return true; 1197 } 1198 1199 /// calcSpillCost - Compute how expensive it would be to split the live range in 1200 /// SA around all use blocks instead of forming bundle regions. 1201 BlockFrequency RAGreedy::calcSpillCost() { 1202 BlockFrequency Cost = 0; 1203 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1204 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1205 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1206 unsigned Number = BI.MBB->getNumber(); 1207 // We normally only need one spill instruction - a load or a store. 1208 Cost += SpillPlacer->getBlockFrequency(Number); 1209 1210 // Unless the value is redefined in the block. 1211 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 1212 Cost += SpillPlacer->getBlockFrequency(Number); 1213 } 1214 return Cost; 1215 } 1216 1217 /// calcGlobalSplitCost - Return the global split cost of following the split 1218 /// pattern in LiveBundles. This cost should be added to the local cost of the 1219 /// interference pattern in SplitConstraints. 1220 /// 1221 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 1222 BlockFrequency GlobalCost = 0; 1223 const BitVector &LiveBundles = Cand.LiveBundles; 1224 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1225 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1226 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1227 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1228 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)]; 1229 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)]; 1230 unsigned Ins = 0; 1231 1232 if (BI.LiveIn) 1233 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1234 if (BI.LiveOut) 1235 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1236 while (Ins--) 1237 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1238 } 1239 1240 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1241 unsigned Number = Cand.ActiveBlocks[i]; 1242 bool RegIn = LiveBundles[Bundles->getBundle(Number, false)]; 1243 bool RegOut = LiveBundles[Bundles->getBundle(Number, true)]; 1244 if (!RegIn && !RegOut) 1245 continue; 1246 if (RegIn && RegOut) { 1247 // We need double spill code if this block has interference. 1248 Cand.Intf.moveToBlock(Number); 1249 if (Cand.Intf.hasInterference()) { 1250 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1251 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1252 } 1253 continue; 1254 } 1255 // live-in / stack-out or stack-in live-out. 1256 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1257 } 1258 return GlobalCost; 1259 } 1260 1261 /// splitAroundRegion - Split the current live range around the regions 1262 /// determined by BundleCand and GlobalCand. 1263 /// 1264 /// Before calling this function, GlobalCand and BundleCand must be initialized 1265 /// so each bundle is assigned to a valid candidate, or NoCand for the 1266 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1267 /// objects must be initialized for the current live range, and intervals 1268 /// created for the used candidates. 1269 /// 1270 /// @param LREdit The LiveRangeEdit object handling the current split. 1271 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1272 /// must appear in this list. 1273 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1274 ArrayRef<unsigned> UsedCands) { 1275 // These are the intervals created for new global ranges. We may create more 1276 // intervals for local ranges. 1277 const unsigned NumGlobalIntvs = LREdit.size(); 1278 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 1279 assert(NumGlobalIntvs && "No global intervals configured"); 1280 1281 // Isolate even single instructions when dealing with a proper sub-class. 1282 // That guarantees register class inflation for the stack interval because it 1283 // is all copies. 1284 unsigned Reg = SA->getParent().reg; 1285 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1286 1287 // First handle all the blocks with uses. 1288 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1289 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1290 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1291 unsigned Number = BI.MBB->getNumber(); 1292 unsigned IntvIn = 0, IntvOut = 0; 1293 SlotIndex IntfIn, IntfOut; 1294 if (BI.LiveIn) { 1295 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; 1296 if (CandIn != NoCand) { 1297 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1298 IntvIn = Cand.IntvIdx; 1299 Cand.Intf.moveToBlock(Number); 1300 IntfIn = Cand.Intf.first(); 1301 } 1302 } 1303 if (BI.LiveOut) { 1304 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; 1305 if (CandOut != NoCand) { 1306 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1307 IntvOut = Cand.IntvIdx; 1308 Cand.Intf.moveToBlock(Number); 1309 IntfOut = Cand.Intf.last(); 1310 } 1311 } 1312 1313 // Create separate intervals for isolated blocks with multiple uses. 1314 if (!IntvIn && !IntvOut) { 1315 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1316 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1317 SE->splitSingleBlock(BI); 1318 continue; 1319 } 1320 1321 if (IntvIn && IntvOut) 1322 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1323 else if (IntvIn) 1324 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1325 else 1326 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1327 } 1328 1329 // Handle live-through blocks. The relevant live-through blocks are stored in 1330 // the ActiveBlocks list with each candidate. We need to filter out 1331 // duplicates. 1332 BitVector Todo = SA->getThroughBlocks(); 1333 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1334 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1335 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1336 unsigned Number = Blocks[i]; 1337 if (!Todo.test(Number)) 1338 continue; 1339 Todo.reset(Number); 1340 1341 unsigned IntvIn = 0, IntvOut = 0; 1342 SlotIndex IntfIn, IntfOut; 1343 1344 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)]; 1345 if (CandIn != NoCand) { 1346 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1347 IntvIn = Cand.IntvIdx; 1348 Cand.Intf.moveToBlock(Number); 1349 IntfIn = Cand.Intf.first(); 1350 } 1351 1352 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)]; 1353 if (CandOut != NoCand) { 1354 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1355 IntvOut = Cand.IntvIdx; 1356 Cand.Intf.moveToBlock(Number); 1357 IntfOut = Cand.Intf.last(); 1358 } 1359 if (!IntvIn && !IntvOut) 1360 continue; 1361 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1362 } 1363 } 1364 1365 ++NumGlobalSplits; 1366 1367 SmallVector<unsigned, 8> IntvMap; 1368 SE->finish(&IntvMap); 1369 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1370 1371 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1372 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1373 1374 // Sort out the new intervals created by splitting. We get four kinds: 1375 // - Remainder intervals should not be split again. 1376 // - Candidate intervals can be assigned to Cand.PhysReg. 1377 // - Block-local splits are candidates for local splitting. 1378 // - DCE leftovers should go back on the queue. 1379 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1380 LiveInterval &Reg = LIS->getInterval(LREdit.get(i)); 1381 1382 // Ignore old intervals from DCE. 1383 if (getStage(Reg) != RS_New) 1384 continue; 1385 1386 // Remainder interval. Don't try splitting again, spill if it doesn't 1387 // allocate. 1388 if (IntvMap[i] == 0) { 1389 setStage(Reg, RS_Spill); 1390 continue; 1391 } 1392 1393 // Global intervals. Allow repeated splitting as long as the number of live 1394 // blocks is strictly decreasing. 1395 if (IntvMap[i] < NumGlobalIntvs) { 1396 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1397 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1398 << " blocks as original.\n"); 1399 // Don't allow repeated splitting as a safe guard against looping. 1400 setStage(Reg, RS_Split2); 1401 } 1402 continue; 1403 } 1404 1405 // Other intervals are treated as new. This includes local intervals created 1406 // for blocks with multiple uses, and anything created by DCE. 1407 } 1408 1409 if (VerifyEnabled) 1410 MF->verify(this, "After splitting live range around region"); 1411 } 1412 1413 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1414 SmallVectorImpl<unsigned> &NewVRegs) { 1415 unsigned NumCands = 0; 1416 BlockFrequency BestCost; 1417 1418 // Check if we can split this live range around a compact region. 1419 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1420 if (HasCompact) { 1421 // Yes, keep GlobalCand[0] as the compact region candidate. 1422 NumCands = 1; 1423 BestCost = BlockFrequency::getMaxFrequency(); 1424 } else { 1425 // No benefit from the compact region, our fallback will be per-block 1426 // splitting. Make sure we find a solution that is cheaper than spilling. 1427 BestCost = calcSpillCost(); 1428 DEBUG(dbgs() << "Cost of isolating all blocks = "; 1429 MBFI->printBlockFreq(dbgs(), BestCost) << '\n'); 1430 } 1431 1432 unsigned BestCand = 1433 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands, 1434 false/*IgnoreCSR*/); 1435 1436 // No solutions found, fall back to single block splitting. 1437 if (!HasCompact && BestCand == NoCand) 1438 return 0; 1439 1440 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs); 1441 } 1442 1443 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg, 1444 AllocationOrder &Order, 1445 BlockFrequency &BestCost, 1446 unsigned &NumCands, 1447 bool IgnoreCSR) { 1448 unsigned BestCand = NoCand; 1449 Order.rewind(); 1450 while (unsigned PhysReg = Order.next()) { 1451 if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg)) 1452 continue; 1453 1454 // Discard bad candidates before we run out of interference cache cursors. 1455 // This will only affect register classes with a lot of registers (>32). 1456 if (NumCands == IntfCache.getMaxCursors()) { 1457 unsigned WorstCount = ~0u; 1458 unsigned Worst = 0; 1459 for (unsigned i = 0; i != NumCands; ++i) { 1460 if (i == BestCand || !GlobalCand[i].PhysReg) 1461 continue; 1462 unsigned Count = GlobalCand[i].LiveBundles.count(); 1463 if (Count < WorstCount) { 1464 Worst = i; 1465 WorstCount = Count; 1466 } 1467 } 1468 --NumCands; 1469 GlobalCand[Worst] = GlobalCand[NumCands]; 1470 if (BestCand == NumCands) 1471 BestCand = Worst; 1472 } 1473 1474 if (GlobalCand.size() <= NumCands) 1475 GlobalCand.resize(NumCands+1); 1476 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1477 Cand.reset(IntfCache, PhysReg); 1478 1479 SpillPlacer->prepare(Cand.LiveBundles); 1480 BlockFrequency Cost; 1481 if (!addSplitConstraints(Cand.Intf, Cost)) { 1482 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1483 continue; 1484 } 1485 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = "; 1486 MBFI->printBlockFreq(dbgs(), Cost)); 1487 if (Cost >= BestCost) { 1488 DEBUG({ 1489 if (BestCand == NoCand) 1490 dbgs() << " worse than no bundles\n"; 1491 else 1492 dbgs() << " worse than " 1493 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1494 }); 1495 continue; 1496 } 1497 growRegion(Cand); 1498 1499 SpillPlacer->finish(); 1500 1501 // No live bundles, defer to splitSingleBlocks(). 1502 if (!Cand.LiveBundles.any()) { 1503 DEBUG(dbgs() << " no bundles.\n"); 1504 continue; 1505 } 1506 1507 Cost += calcGlobalSplitCost(Cand); 1508 DEBUG({ 1509 dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost) 1510 << " with bundles"; 1511 for (int i : Cand.LiveBundles.set_bits()) 1512 dbgs() << " EB#" << i; 1513 dbgs() << ".\n"; 1514 }); 1515 if (Cost < BestCost) { 1516 BestCand = NumCands; 1517 BestCost = Cost; 1518 } 1519 ++NumCands; 1520 } 1521 return BestCand; 1522 } 1523 1524 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand, 1525 bool HasCompact, 1526 SmallVectorImpl<unsigned> &NewVRegs) { 1527 SmallVector<unsigned, 8> UsedCands; 1528 // Prepare split editor. 1529 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1530 SE->reset(LREdit, SplitSpillMode); 1531 1532 // Assign all edge bundles to the preferred candidate, or NoCand. 1533 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1534 1535 // Assign bundles for the best candidate region. 1536 if (BestCand != NoCand) { 1537 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1538 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1539 UsedCands.push_back(BestCand); 1540 Cand.IntvIdx = SE->openIntv(); 1541 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1542 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1543 (void)B; 1544 } 1545 } 1546 1547 // Assign bundles for the compact region. 1548 if (HasCompact) { 1549 GlobalSplitCandidate &Cand = GlobalCand.front(); 1550 assert(!Cand.PhysReg && "Compact region has no physreg"); 1551 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1552 UsedCands.push_back(0); 1553 Cand.IntvIdx = SE->openIntv(); 1554 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1555 << Cand.IntvIdx << ".\n"); 1556 (void)B; 1557 } 1558 } 1559 1560 splitAroundRegion(LREdit, UsedCands); 1561 return 0; 1562 } 1563 1564 //===----------------------------------------------------------------------===// 1565 // Per-Block Splitting 1566 //===----------------------------------------------------------------------===// 1567 1568 /// tryBlockSplit - Split a global live range around every block with uses. This 1569 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1570 /// they don't allocate. 1571 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1572 SmallVectorImpl<unsigned> &NewVRegs) { 1573 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1574 unsigned Reg = VirtReg.reg; 1575 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1576 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1577 SE->reset(LREdit, SplitSpillMode); 1578 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1579 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1580 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1581 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1582 SE->splitSingleBlock(BI); 1583 } 1584 // No blocks were split. 1585 if (LREdit.empty()) 1586 return 0; 1587 1588 // We did split for some blocks. 1589 SmallVector<unsigned, 8> IntvMap; 1590 SE->finish(&IntvMap); 1591 1592 // Tell LiveDebugVariables about the new ranges. 1593 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1594 1595 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1596 1597 // Sort out the new intervals created by splitting. The remainder interval 1598 // goes straight to spilling, the new local ranges get to stay RS_New. 1599 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1600 LiveInterval &LI = LIS->getInterval(LREdit.get(i)); 1601 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1602 setStage(LI, RS_Spill); 1603 } 1604 1605 if (VerifyEnabled) 1606 MF->verify(this, "After splitting live range around basic blocks"); 1607 return 0; 1608 } 1609 1610 //===----------------------------------------------------------------------===// 1611 // Per-Instruction Splitting 1612 //===----------------------------------------------------------------------===// 1613 1614 /// Get the number of allocatable registers that match the constraints of \p Reg 1615 /// on \p MI and that are also in \p SuperRC. 1616 static unsigned getNumAllocatableRegsForConstraints( 1617 const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC, 1618 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, 1619 const RegisterClassInfo &RCI) { 1620 assert(SuperRC && "Invalid register class"); 1621 1622 const TargetRegisterClass *ConstrainedRC = 1623 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI, 1624 /* ExploreBundle */ true); 1625 if (!ConstrainedRC) 1626 return 0; 1627 return RCI.getNumAllocatableRegs(ConstrainedRC); 1628 } 1629 1630 /// tryInstructionSplit - Split a live range around individual instructions. 1631 /// This is normally not worthwhile since the spiller is doing essentially the 1632 /// same thing. However, when the live range is in a constrained register 1633 /// class, it may help to insert copies such that parts of the live range can 1634 /// be moved to a larger register class. 1635 /// 1636 /// This is similar to spilling to a larger register class. 1637 unsigned 1638 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1639 SmallVectorImpl<unsigned> &NewVRegs) { 1640 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg); 1641 // There is no point to this if there are no larger sub-classes. 1642 if (!RegClassInfo.isProperSubClass(CurRC)) 1643 return 0; 1644 1645 // Always enable split spill mode, since we're effectively spilling to a 1646 // register. 1647 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1648 SE->reset(LREdit, SplitEditor::SM_Size); 1649 1650 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1651 if (Uses.size() <= 1) 1652 return 0; 1653 1654 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1655 1656 const TargetRegisterClass *SuperRC = 1657 TRI->getLargestLegalSuperClass(CurRC, *MF); 1658 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC); 1659 // Split around every non-copy instruction if this split will relax 1660 // the constraints on the virtual register. 1661 // Otherwise, splitting just inserts uncoalescable copies that do not help 1662 // the allocation. 1663 for (unsigned i = 0; i != Uses.size(); ++i) { 1664 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1665 if (MI->isFullCopy() || 1666 SuperRCNumAllocatableRegs == 1667 getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII, 1668 TRI, RCI)) { 1669 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1670 continue; 1671 } 1672 SE->openIntv(); 1673 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1674 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1675 SE->useIntv(SegStart, SegStop); 1676 } 1677 1678 if (LREdit.empty()) { 1679 DEBUG(dbgs() << "All uses were copies.\n"); 1680 return 0; 1681 } 1682 1683 SmallVector<unsigned, 8> IntvMap; 1684 SE->finish(&IntvMap); 1685 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1686 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1687 1688 // Assign all new registers to RS_Spill. This was the last chance. 1689 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1690 return 0; 1691 } 1692 1693 //===----------------------------------------------------------------------===// 1694 // Local Splitting 1695 //===----------------------------------------------------------------------===// 1696 1697 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1698 /// in order to use PhysReg between two entries in SA->UseSlots. 1699 /// 1700 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1701 /// 1702 void RAGreedy::calcGapWeights(unsigned PhysReg, 1703 SmallVectorImpl<float> &GapWeight) { 1704 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1705 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1706 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1707 const unsigned NumGaps = Uses.size()-1; 1708 1709 // Start and end points for the interference check. 1710 SlotIndex StartIdx = 1711 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1712 SlotIndex StopIdx = 1713 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1714 1715 GapWeight.assign(NumGaps, 0.0f); 1716 1717 // Add interference from each overlapping register. 1718 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1719 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1720 .checkInterference()) 1721 continue; 1722 1723 // We know that VirtReg is a continuous interval from FirstInstr to 1724 // LastInstr, so we don't need InterferenceQuery. 1725 // 1726 // Interference that overlaps an instruction is counted in both gaps 1727 // surrounding the instruction. The exception is interference before 1728 // StartIdx and after StopIdx. 1729 // 1730 LiveIntervalUnion::SegmentIter IntI = 1731 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1732 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1733 // Skip the gaps before IntI. 1734 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1735 if (++Gap == NumGaps) 1736 break; 1737 if (Gap == NumGaps) 1738 break; 1739 1740 // Update the gaps covered by IntI. 1741 const float weight = IntI.value()->weight; 1742 for (; Gap != NumGaps; ++Gap) { 1743 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1744 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1745 break; 1746 } 1747 if (Gap == NumGaps) 1748 break; 1749 } 1750 } 1751 1752 // Add fixed interference. 1753 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1754 const LiveRange &LR = LIS->getRegUnit(*Units); 1755 LiveRange::const_iterator I = LR.find(StartIdx); 1756 LiveRange::const_iterator E = LR.end(); 1757 1758 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1759 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1760 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1761 if (++Gap == NumGaps) 1762 break; 1763 if (Gap == NumGaps) 1764 break; 1765 1766 for (; Gap != NumGaps; ++Gap) { 1767 GapWeight[Gap] = huge_valf; 1768 if (Uses[Gap+1].getBaseIndex() >= I->end) 1769 break; 1770 } 1771 if (Gap == NumGaps) 1772 break; 1773 } 1774 } 1775 } 1776 1777 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1778 /// basic block. 1779 /// 1780 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1781 SmallVectorImpl<unsigned> &NewVRegs) { 1782 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1783 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1784 1785 // Note that it is possible to have an interval that is live-in or live-out 1786 // while only covering a single block - A phi-def can use undef values from 1787 // predecessors, and the block could be a single-block loop. 1788 // We don't bother doing anything clever about such a case, we simply assume 1789 // that the interval is continuous from FirstInstr to LastInstr. We should 1790 // make sure that we don't do anything illegal to such an interval, though. 1791 1792 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1793 if (Uses.size() <= 2) 1794 return 0; 1795 const unsigned NumGaps = Uses.size()-1; 1796 1797 DEBUG({ 1798 dbgs() << "tryLocalSplit: "; 1799 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1800 dbgs() << ' ' << Uses[i]; 1801 dbgs() << '\n'; 1802 }); 1803 1804 // If VirtReg is live across any register mask operands, compute a list of 1805 // gaps with register masks. 1806 SmallVector<unsigned, 8> RegMaskGaps; 1807 if (Matrix->checkRegMaskInterference(VirtReg)) { 1808 // Get regmask slots for the whole block. 1809 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1810 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1811 // Constrain to VirtReg's live range. 1812 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1813 Uses.front().getRegSlot()) - RMS.begin(); 1814 unsigned re = RMS.size(); 1815 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1816 // Look for Uses[i] <= RMS <= Uses[i+1]. 1817 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1818 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1819 continue; 1820 // Skip a regmask on the same instruction as the last use. It doesn't 1821 // overlap the live range. 1822 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1823 break; 1824 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1825 RegMaskGaps.push_back(i); 1826 // Advance ri to the next gap. A regmask on one of the uses counts in 1827 // both gaps. 1828 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1829 ++ri; 1830 } 1831 DEBUG(dbgs() << '\n'); 1832 } 1833 1834 // Since we allow local split results to be split again, there is a risk of 1835 // creating infinite loops. It is tempting to require that the new live 1836 // ranges have less instructions than the original. That would guarantee 1837 // convergence, but it is too strict. A live range with 3 instructions can be 1838 // split 2+3 (including the COPY), and we want to allow that. 1839 // 1840 // Instead we use these rules: 1841 // 1842 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1843 // noop split, of course). 1844 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1845 // the new ranges must have fewer instructions than before the split. 1846 // 3. New ranges with the same number of instructions are marked RS_Split2, 1847 // smaller ranges are marked RS_New. 1848 // 1849 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1850 // excessive splitting and infinite loops. 1851 // 1852 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1853 1854 // Best split candidate. 1855 unsigned BestBefore = NumGaps; 1856 unsigned BestAfter = 0; 1857 float BestDiff = 0; 1858 1859 const float blockFreq = 1860 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 1861 (1.0f / MBFI->getEntryFreq()); 1862 SmallVector<float, 8> GapWeight; 1863 1864 Order.rewind(); 1865 while (unsigned PhysReg = Order.next()) { 1866 // Keep track of the largest spill weight that would need to be evicted in 1867 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1868 calcGapWeights(PhysReg, GapWeight); 1869 1870 // Remove any gaps with regmask clobbers. 1871 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1872 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1873 GapWeight[RegMaskGaps[i]] = huge_valf; 1874 1875 // Try to find the best sequence of gaps to close. 1876 // The new spill weight must be larger than any gap interference. 1877 1878 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1879 unsigned SplitBefore = 0, SplitAfter = 1; 1880 1881 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1882 // It is the spill weight that needs to be evicted. 1883 float MaxGap = GapWeight[0]; 1884 1885 while (true) { 1886 // Live before/after split? 1887 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1888 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1889 1890 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1891 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1892 << " i=" << MaxGap); 1893 1894 // Stop before the interval gets so big we wouldn't be making progress. 1895 if (!LiveBefore && !LiveAfter) { 1896 DEBUG(dbgs() << " all\n"); 1897 break; 1898 } 1899 // Should the interval be extended or shrunk? 1900 bool Shrink = true; 1901 1902 // How many gaps would the new range have? 1903 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1904 1905 // Legally, without causing looping? 1906 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1907 1908 if (Legal && MaxGap < huge_valf) { 1909 // Estimate the new spill weight. Each instruction reads or writes the 1910 // register. Conservatively assume there are no read-modify-write 1911 // instructions. 1912 // 1913 // Try to guess the size of the new interval. 1914 const float EstWeight = normalizeSpillWeight( 1915 blockFreq * (NewGaps + 1), 1916 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1917 (LiveBefore + LiveAfter) * SlotIndex::InstrDist, 1918 1); 1919 // Would this split be possible to allocate? 1920 // Never allocate all gaps, we wouldn't be making progress. 1921 DEBUG(dbgs() << " w=" << EstWeight); 1922 if (EstWeight * Hysteresis >= MaxGap) { 1923 Shrink = false; 1924 float Diff = EstWeight - MaxGap; 1925 if (Diff > BestDiff) { 1926 DEBUG(dbgs() << " (best)"); 1927 BestDiff = Hysteresis * Diff; 1928 BestBefore = SplitBefore; 1929 BestAfter = SplitAfter; 1930 } 1931 } 1932 } 1933 1934 // Try to shrink. 1935 if (Shrink) { 1936 if (++SplitBefore < SplitAfter) { 1937 DEBUG(dbgs() << " shrink\n"); 1938 // Recompute the max when necessary. 1939 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1940 MaxGap = GapWeight[SplitBefore]; 1941 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1942 MaxGap = std::max(MaxGap, GapWeight[i]); 1943 } 1944 continue; 1945 } 1946 MaxGap = 0; 1947 } 1948 1949 // Try to extend the interval. 1950 if (SplitAfter >= NumGaps) { 1951 DEBUG(dbgs() << " end\n"); 1952 break; 1953 } 1954 1955 DEBUG(dbgs() << " extend\n"); 1956 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1957 } 1958 } 1959 1960 // Didn't find any candidates? 1961 if (BestBefore == NumGaps) 1962 return 0; 1963 1964 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1965 << '-' << Uses[BestAfter] << ", " << BestDiff 1966 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1967 1968 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 1969 SE->reset(LREdit); 1970 1971 SE->openIntv(); 1972 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1973 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1974 SE->useIntv(SegStart, SegStop); 1975 SmallVector<unsigned, 8> IntvMap; 1976 SE->finish(&IntvMap); 1977 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1978 1979 // If the new range has the same number of instructions as before, mark it as 1980 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1981 // leave the new intervals as RS_New so they can compete. 1982 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1983 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1984 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1985 if (NewGaps >= NumGaps) { 1986 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1987 assert(!ProgressRequired && "Didn't make progress when it was required."); 1988 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1989 if (IntvMap[i] == 1) { 1990 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2); 1991 DEBUG(dbgs() << PrintReg(LREdit.get(i))); 1992 } 1993 DEBUG(dbgs() << '\n'); 1994 } 1995 ++NumLocalSplits; 1996 1997 return 0; 1998 } 1999 2000 //===----------------------------------------------------------------------===// 2001 // Live Range Splitting 2002 //===----------------------------------------------------------------------===// 2003 2004 /// trySplit - Try to split VirtReg or one of its interferences, making it 2005 /// assignable. 2006 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 2007 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 2008 SmallVectorImpl<unsigned>&NewVRegs) { 2009 // Ranges must be Split2 or less. 2010 if (getStage(VirtReg) >= RS_Spill) 2011 return 0; 2012 2013 // Local intervals are handled separately. 2014 if (LIS->intervalIsInOneMBB(VirtReg)) { 2015 NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName, 2016 TimerGroupDescription, TimePassesIsEnabled); 2017 SA->analyze(&VirtReg); 2018 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 2019 if (PhysReg || !NewVRegs.empty()) 2020 return PhysReg; 2021 return tryInstructionSplit(VirtReg, Order, NewVRegs); 2022 } 2023 2024 NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName, 2025 TimerGroupDescription, TimePassesIsEnabled); 2026 2027 SA->analyze(&VirtReg); 2028 2029 // FIXME: SplitAnalysis may repair broken live ranges coming from the 2030 // coalescer. That may cause the range to become allocatable which means that 2031 // tryRegionSplit won't be making progress. This check should be replaced with 2032 // an assertion when the coalescer is fixed. 2033 if (SA->didRepairRange()) { 2034 // VirtReg has changed, so all cached queries are invalid. 2035 Matrix->invalidateVirtRegs(); 2036 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 2037 return PhysReg; 2038 } 2039 2040 // First try to split around a region spanning multiple blocks. RS_Split2 2041 // ranges already made dubious progress with region splitting, so they go 2042 // straight to single block splitting. 2043 if (getStage(VirtReg) < RS_Split2) { 2044 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 2045 if (PhysReg || !NewVRegs.empty()) 2046 return PhysReg; 2047 } 2048 2049 // Then isolate blocks. 2050 return tryBlockSplit(VirtReg, Order, NewVRegs); 2051 } 2052 2053 //===----------------------------------------------------------------------===// 2054 // Last Chance Recoloring 2055 //===----------------------------------------------------------------------===// 2056 2057 /// mayRecolorAllInterferences - Check if the virtual registers that 2058 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be 2059 /// recolored to free \p PhysReg. 2060 /// When true is returned, \p RecoloringCandidates has been augmented with all 2061 /// the live intervals that need to be recolored in order to free \p PhysReg 2062 /// for \p VirtReg. 2063 /// \p FixedRegisters contains all the virtual registers that cannot be 2064 /// recolored. 2065 bool 2066 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg, 2067 SmallLISet &RecoloringCandidates, 2068 const SmallVirtRegSet &FixedRegisters) { 2069 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg); 2070 2071 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 2072 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 2073 // If there is LastChanceRecoloringMaxInterference or more interferences, 2074 // chances are one would not be recolorable. 2075 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >= 2076 LastChanceRecoloringMaxInterference && !ExhaustiveSearch) { 2077 DEBUG(dbgs() << "Early abort: too many interferences.\n"); 2078 CutOffInfo |= CO_Interf; 2079 return false; 2080 } 2081 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 2082 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 2083 // If Intf is done and sit on the same register class as VirtReg, 2084 // it would not be recolorable as it is in the same state as VirtReg. 2085 if ((getStage(*Intf) == RS_Done && 2086 MRI->getRegClass(Intf->reg) == CurRC) || 2087 FixedRegisters.count(Intf->reg)) { 2088 DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n"); 2089 return false; 2090 } 2091 RecoloringCandidates.insert(Intf); 2092 } 2093 } 2094 return true; 2095 } 2096 2097 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring 2098 /// its interferences. 2099 /// Last chance recoloring chooses a color for \p VirtReg and recolors every 2100 /// virtual register that was using it. The recoloring process may recursively 2101 /// use the last chance recoloring. Therefore, when a virtual register has been 2102 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot 2103 /// be last-chance-recolored again during this recoloring "session". 2104 /// E.g., 2105 /// Let 2106 /// vA can use {R1, R2 } 2107 /// vB can use { R2, R3} 2108 /// vC can use {R1 } 2109 /// Where vA, vB, and vC cannot be split anymore (they are reloads for 2110 /// instance) and they all interfere. 2111 /// 2112 /// vA is assigned R1 2113 /// vB is assigned R2 2114 /// vC tries to evict vA but vA is already done. 2115 /// Regular register allocation fails. 2116 /// 2117 /// Last chance recoloring kicks in: 2118 /// vC does as if vA was evicted => vC uses R1. 2119 /// vC is marked as fixed. 2120 /// vA needs to find a color. 2121 /// None are available. 2122 /// vA cannot evict vC: vC is a fixed virtual register now. 2123 /// vA does as if vB was evicted => vA uses R2. 2124 /// vB needs to find a color. 2125 /// R3 is available. 2126 /// Recoloring => vC = R1, vA = R2, vB = R3 2127 /// 2128 /// \p Order defines the preferred allocation order for \p VirtReg. 2129 /// \p NewRegs will contain any new virtual register that have been created 2130 /// (split, spill) during the process and that must be assigned. 2131 /// \p FixedRegisters contains all the virtual registers that cannot be 2132 /// recolored. 2133 /// \p Depth gives the current depth of the last chance recoloring. 2134 /// \return a physical register that can be used for VirtReg or ~0u if none 2135 /// exists. 2136 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg, 2137 AllocationOrder &Order, 2138 SmallVectorImpl<unsigned> &NewVRegs, 2139 SmallVirtRegSet &FixedRegisters, 2140 unsigned Depth) { 2141 DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n'); 2142 // Ranges must be Done. 2143 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && 2144 "Last chance recoloring should really be last chance"); 2145 // Set the max depth to LastChanceRecoloringMaxDepth. 2146 // We may want to reconsider that if we end up with a too large search space 2147 // for target with hundreds of registers. 2148 // Indeed, in that case we may want to cut the search space earlier. 2149 if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) { 2150 DEBUG(dbgs() << "Abort because max depth has been reached.\n"); 2151 CutOffInfo |= CO_Depth; 2152 return ~0u; 2153 } 2154 2155 // Set of Live intervals that will need to be recolored. 2156 SmallLISet RecoloringCandidates; 2157 // Record the original mapping virtual register to physical register in case 2158 // the recoloring fails. 2159 DenseMap<unsigned, unsigned> VirtRegToPhysReg; 2160 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in 2161 // this recoloring "session". 2162 FixedRegisters.insert(VirtReg.reg); 2163 SmallVector<unsigned, 4> CurrentNewVRegs; 2164 2165 Order.rewind(); 2166 while (unsigned PhysReg = Order.next()) { 2167 DEBUG(dbgs() << "Try to assign: " << VirtReg << " to " 2168 << PrintReg(PhysReg, TRI) << '\n'); 2169 RecoloringCandidates.clear(); 2170 VirtRegToPhysReg.clear(); 2171 CurrentNewVRegs.clear(); 2172 2173 // It is only possible to recolor virtual register interference. 2174 if (Matrix->checkInterference(VirtReg, PhysReg) > 2175 LiveRegMatrix::IK_VirtReg) { 2176 DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n"); 2177 2178 continue; 2179 } 2180 2181 // Early give up on this PhysReg if it is obvious we cannot recolor all 2182 // the interferences. 2183 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates, 2184 FixedRegisters)) { 2185 DEBUG(dbgs() << "Some inteferences cannot be recolored.\n"); 2186 continue; 2187 } 2188 2189 // RecoloringCandidates contains all the virtual registers that interfer 2190 // with VirtReg on PhysReg (or one of its aliases). 2191 // Enqueue them for recoloring and perform the actual recoloring. 2192 PQueue RecoloringQueue; 2193 for (SmallLISet::iterator It = RecoloringCandidates.begin(), 2194 EndIt = RecoloringCandidates.end(); 2195 It != EndIt; ++It) { 2196 unsigned ItVirtReg = (*It)->reg; 2197 enqueue(RecoloringQueue, *It); 2198 assert(VRM->hasPhys(ItVirtReg) && 2199 "Interferences are supposed to be with allocated vairables"); 2200 2201 // Record the current allocation. 2202 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg); 2203 // unset the related struct. 2204 Matrix->unassign(**It); 2205 } 2206 2207 // Do as if VirtReg was assigned to PhysReg so that the underlying 2208 // recoloring has the right information about the interferes and 2209 // available colors. 2210 Matrix->assign(VirtReg, PhysReg); 2211 2212 // Save the current recoloring state. 2213 // If we cannot recolor all the interferences, we will have to start again 2214 // at this point for the next physical register. 2215 SmallVirtRegSet SaveFixedRegisters(FixedRegisters); 2216 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs, 2217 FixedRegisters, Depth)) { 2218 // Push the queued vregs into the main queue. 2219 for (unsigned NewVReg : CurrentNewVRegs) 2220 NewVRegs.push_back(NewVReg); 2221 // Do not mess up with the global assignment process. 2222 // I.e., VirtReg must be unassigned. 2223 Matrix->unassign(VirtReg); 2224 return PhysReg; 2225 } 2226 2227 DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to " 2228 << PrintReg(PhysReg, TRI) << '\n'); 2229 2230 // The recoloring attempt failed, undo the changes. 2231 FixedRegisters = SaveFixedRegisters; 2232 Matrix->unassign(VirtReg); 2233 2234 // For a newly created vreg which is also in RecoloringCandidates, 2235 // don't add it to NewVRegs because its physical register will be restored 2236 // below. Other vregs in CurrentNewVRegs are created by calling 2237 // selectOrSplit and should be added into NewVRegs. 2238 for (SmallVectorImpl<unsigned>::iterator Next = CurrentNewVRegs.begin(), 2239 End = CurrentNewVRegs.end(); 2240 Next != End; ++Next) { 2241 if (RecoloringCandidates.count(&LIS->getInterval(*Next))) 2242 continue; 2243 NewVRegs.push_back(*Next); 2244 } 2245 2246 for (SmallLISet::iterator It = RecoloringCandidates.begin(), 2247 EndIt = RecoloringCandidates.end(); 2248 It != EndIt; ++It) { 2249 unsigned ItVirtReg = (*It)->reg; 2250 if (VRM->hasPhys(ItVirtReg)) 2251 Matrix->unassign(**It); 2252 unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg]; 2253 Matrix->assign(**It, ItPhysReg); 2254 } 2255 } 2256 2257 // Last chance recoloring did not worked either, give up. 2258 return ~0u; 2259 } 2260 2261 /// tryRecoloringCandidates - Try to assign a new color to every register 2262 /// in \RecoloringQueue. 2263 /// \p NewRegs will contain any new virtual register created during the 2264 /// recoloring process. 2265 /// \p FixedRegisters[in/out] contains all the registers that have been 2266 /// recolored. 2267 /// \return true if all virtual registers in RecoloringQueue were successfully 2268 /// recolored, false otherwise. 2269 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue, 2270 SmallVectorImpl<unsigned> &NewVRegs, 2271 SmallVirtRegSet &FixedRegisters, 2272 unsigned Depth) { 2273 while (!RecoloringQueue.empty()) { 2274 LiveInterval *LI = dequeue(RecoloringQueue); 2275 DEBUG(dbgs() << "Try to recolor: " << *LI << '\n'); 2276 unsigned PhysReg; 2277 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1); 2278 // When splitting happens, the live-range may actually be empty. 2279 // In that case, this is okay to continue the recoloring even 2280 // if we did not find an alternative color for it. Indeed, 2281 // there will not be anything to color for LI in the end. 2282 if (PhysReg == ~0u || (!PhysReg && !LI->empty())) 2283 return false; 2284 2285 if (!PhysReg) { 2286 assert(LI->empty() && "Only empty live-range do not require a register"); 2287 DEBUG(dbgs() << "Recoloring of " << *LI << " succeeded. Empty LI.\n"); 2288 continue; 2289 } 2290 DEBUG(dbgs() << "Recoloring of " << *LI 2291 << " succeeded with: " << PrintReg(PhysReg, TRI) << '\n'); 2292 2293 Matrix->assign(*LI, PhysReg); 2294 FixedRegisters.insert(LI->reg); 2295 } 2296 return true; 2297 } 2298 2299 //===----------------------------------------------------------------------===// 2300 // Main Entry Point 2301 //===----------------------------------------------------------------------===// 2302 2303 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 2304 SmallVectorImpl<unsigned> &NewVRegs) { 2305 CutOffInfo = CO_None; 2306 LLVMContext &Ctx = MF->getFunction()->getContext(); 2307 SmallVirtRegSet FixedRegisters; 2308 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters); 2309 if (Reg == ~0U && (CutOffInfo != CO_None)) { 2310 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf); 2311 if (CutOffEncountered == CO_Depth) 2312 Ctx.emitError("register allocation failed: maximum depth for recoloring " 2313 "reached. Use -fexhaustive-register-search to skip " 2314 "cutoffs"); 2315 else if (CutOffEncountered == CO_Interf) 2316 Ctx.emitError("register allocation failed: maximum interference for " 2317 "recoloring reached. Use -fexhaustive-register-search " 2318 "to skip cutoffs"); 2319 else if (CutOffEncountered == (CO_Depth | CO_Interf)) 2320 Ctx.emitError("register allocation failed: maximum interference and " 2321 "depth for recoloring reached. Use " 2322 "-fexhaustive-register-search to skip cutoffs"); 2323 } 2324 return Reg; 2325 } 2326 2327 /// Using a CSR for the first time has a cost because it causes push|pop 2328 /// to be added to prologue|epilogue. Splitting a cold section of the live 2329 /// range can have lower cost than using the CSR for the first time; 2330 /// Spilling a live range in the cold path can have lower cost than using 2331 /// the CSR for the first time. Returns the physical register if we decide 2332 /// to use the CSR; otherwise return 0. 2333 unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg, 2334 AllocationOrder &Order, 2335 unsigned PhysReg, 2336 unsigned &CostPerUseLimit, 2337 SmallVectorImpl<unsigned> &NewVRegs) { 2338 if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) { 2339 // We choose spill over using the CSR for the first time if the spill cost 2340 // is lower than CSRCost. 2341 SA->analyze(&VirtReg); 2342 if (calcSpillCost() >= CSRCost) 2343 return PhysReg; 2344 2345 // We are going to spill, set CostPerUseLimit to 1 to make sure that 2346 // we will not use a callee-saved register in tryEvict. 2347 CostPerUseLimit = 1; 2348 return 0; 2349 } 2350 if (getStage(VirtReg) < RS_Split) { 2351 // We choose pre-splitting over using the CSR for the first time if 2352 // the cost of splitting is lower than CSRCost. 2353 SA->analyze(&VirtReg); 2354 unsigned NumCands = 0; 2355 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost. 2356 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost, 2357 NumCands, true /*IgnoreCSR*/); 2358 if (BestCand == NoCand) 2359 // Use the CSR if we can't find a region split below CSRCost. 2360 return PhysReg; 2361 2362 // Perform the actual pre-splitting. 2363 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs); 2364 return 0; 2365 } 2366 return PhysReg; 2367 } 2368 2369 void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) { 2370 // Do not keep invalid information around. 2371 SetOfBrokenHints.remove(&LI); 2372 } 2373 2374 void RAGreedy::initializeCSRCost() { 2375 // We use the larger one out of the command-line option and the value report 2376 // by TRI. 2377 CSRCost = BlockFrequency( 2378 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost())); 2379 if (!CSRCost.getFrequency()) 2380 return; 2381 2382 // Raw cost is relative to Entry == 2^14; scale it appropriately. 2383 uint64_t ActualEntry = MBFI->getEntryFreq(); 2384 if (!ActualEntry) { 2385 CSRCost = 0; 2386 return; 2387 } 2388 uint64_t FixedEntry = 1 << 14; 2389 if (ActualEntry < FixedEntry) 2390 CSRCost *= BranchProbability(ActualEntry, FixedEntry); 2391 else if (ActualEntry <= UINT32_MAX) 2392 // Invert the fraction and divide. 2393 CSRCost /= BranchProbability(FixedEntry, ActualEntry); 2394 else 2395 // Can't use BranchProbability in general, since it takes 32-bit numbers. 2396 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry); 2397 } 2398 2399 /// \brief Collect the hint info for \p Reg. 2400 /// The results are stored into \p Out. 2401 /// \p Out is not cleared before being populated. 2402 void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) { 2403 for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) { 2404 if (!Instr.isFullCopy()) 2405 continue; 2406 // Look for the other end of the copy. 2407 unsigned OtherReg = Instr.getOperand(0).getReg(); 2408 if (OtherReg == Reg) { 2409 OtherReg = Instr.getOperand(1).getReg(); 2410 if (OtherReg == Reg) 2411 continue; 2412 } 2413 // Get the current assignment. 2414 unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg) 2415 ? OtherReg 2416 : VRM->getPhys(OtherReg); 2417 // Push the collected information. 2418 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg, 2419 OtherPhysReg)); 2420 } 2421 } 2422 2423 /// \brief Using the given \p List, compute the cost of the broken hints if 2424 /// \p PhysReg was used. 2425 /// \return The cost of \p List for \p PhysReg. 2426 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List, 2427 unsigned PhysReg) { 2428 BlockFrequency Cost = 0; 2429 for (const HintInfo &Info : List) { 2430 if (Info.PhysReg != PhysReg) 2431 Cost += Info.Freq; 2432 } 2433 return Cost; 2434 } 2435 2436 /// \brief Using the register assigned to \p VirtReg, try to recolor 2437 /// all the live ranges that are copy-related with \p VirtReg. 2438 /// The recoloring is then propagated to all the live-ranges that have 2439 /// been recolored and so on, until no more copies can be coalesced or 2440 /// it is not profitable. 2441 /// For a given live range, profitability is determined by the sum of the 2442 /// frequencies of the non-identity copies it would introduce with the old 2443 /// and new register. 2444 void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) { 2445 // We have a broken hint, check if it is possible to fix it by 2446 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted 2447 // some register and PhysReg may be available for the other live-ranges. 2448 SmallSet<unsigned, 4> Visited; 2449 SmallVector<unsigned, 2> RecoloringCandidates; 2450 HintsInfo Info; 2451 unsigned Reg = VirtReg.reg; 2452 unsigned PhysReg = VRM->getPhys(Reg); 2453 // Start the recoloring algorithm from the input live-interval, then 2454 // it will propagate to the ones that are copy-related with it. 2455 Visited.insert(Reg); 2456 RecoloringCandidates.push_back(Reg); 2457 2458 DEBUG(dbgs() << "Trying to reconcile hints for: " << PrintReg(Reg, TRI) << '(' 2459 << PrintReg(PhysReg, TRI) << ")\n"); 2460 2461 do { 2462 Reg = RecoloringCandidates.pop_back_val(); 2463 2464 // We cannot recolor physical register. 2465 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 2466 continue; 2467 2468 assert(VRM->hasPhys(Reg) && "We have unallocated variable!!"); 2469 2470 // Get the live interval mapped with this virtual register to be able 2471 // to check for the interference with the new color. 2472 LiveInterval &LI = LIS->getInterval(Reg); 2473 unsigned CurrPhys = VRM->getPhys(Reg); 2474 // Check that the new color matches the register class constraints and 2475 // that it is free for this live range. 2476 if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) || 2477 Matrix->checkInterference(LI, PhysReg))) 2478 continue; 2479 2480 DEBUG(dbgs() << PrintReg(Reg, TRI) << '(' << PrintReg(CurrPhys, TRI) 2481 << ") is recolorable.\n"); 2482 2483 // Gather the hint info. 2484 Info.clear(); 2485 collectHintInfo(Reg, Info); 2486 // Check if recoloring the live-range will increase the cost of the 2487 // non-identity copies. 2488 if (CurrPhys != PhysReg) { 2489 DEBUG(dbgs() << "Checking profitability:\n"); 2490 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys); 2491 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg); 2492 DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency() 2493 << "\nNew Cost: " << NewCopiesCost.getFrequency() << '\n'); 2494 if (OldCopiesCost < NewCopiesCost) { 2495 DEBUG(dbgs() << "=> Not profitable.\n"); 2496 continue; 2497 } 2498 // At this point, the cost is either cheaper or equal. If it is 2499 // equal, we consider this is profitable because it may expose 2500 // more recoloring opportunities. 2501 DEBUG(dbgs() << "=> Profitable.\n"); 2502 // Recolor the live-range. 2503 Matrix->unassign(LI); 2504 Matrix->assign(LI, PhysReg); 2505 } 2506 // Push all copy-related live-ranges to keep reconciling the broken 2507 // hints. 2508 for (const HintInfo &HI : Info) { 2509 if (Visited.insert(HI.Reg).second) 2510 RecoloringCandidates.push_back(HI.Reg); 2511 } 2512 } while (!RecoloringCandidates.empty()); 2513 } 2514 2515 /// \brief Try to recolor broken hints. 2516 /// Broken hints may be repaired by recoloring when an evicted variable 2517 /// freed up a register for a larger live-range. 2518 /// Consider the following example: 2519 /// BB1: 2520 /// a = 2521 /// b = 2522 /// BB2: 2523 /// ... 2524 /// = b 2525 /// = a 2526 /// Let us assume b gets split: 2527 /// BB1: 2528 /// a = 2529 /// b = 2530 /// BB2: 2531 /// c = b 2532 /// ... 2533 /// d = c 2534 /// = d 2535 /// = a 2536 /// Because of how the allocation work, b, c, and d may be assigned different 2537 /// colors. Now, if a gets evicted later: 2538 /// BB1: 2539 /// a = 2540 /// st a, SpillSlot 2541 /// b = 2542 /// BB2: 2543 /// c = b 2544 /// ... 2545 /// d = c 2546 /// = d 2547 /// e = ld SpillSlot 2548 /// = e 2549 /// This is likely that we can assign the same register for b, c, and d, 2550 /// getting rid of 2 copies. 2551 void RAGreedy::tryHintsRecoloring() { 2552 for (LiveInterval *LI : SetOfBrokenHints) { 2553 assert(TargetRegisterInfo::isVirtualRegister(LI->reg) && 2554 "Recoloring is possible only for virtual registers"); 2555 // Some dead defs may be around (e.g., because of debug uses). 2556 // Ignore those. 2557 if (!VRM->hasPhys(LI->reg)) 2558 continue; 2559 tryHintRecoloring(*LI); 2560 } 2561 } 2562 2563 unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg, 2564 SmallVectorImpl<unsigned> &NewVRegs, 2565 SmallVirtRegSet &FixedRegisters, 2566 unsigned Depth) { 2567 unsigned CostPerUseLimit = ~0u; 2568 // First try assigning a free register. 2569 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix); 2570 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) { 2571 // When NewVRegs is not empty, we may have made decisions such as evicting 2572 // a virtual register, go with the earlier decisions and use the physical 2573 // register. 2574 if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) && 2575 NewVRegs.empty()) { 2576 unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg, 2577 CostPerUseLimit, NewVRegs); 2578 if (CSRReg || !NewVRegs.empty()) 2579 // Return now if we decide to use a CSR or create new vregs due to 2580 // pre-splitting. 2581 return CSRReg; 2582 } else 2583 return PhysReg; 2584 } 2585 2586 LiveRangeStage Stage = getStage(VirtReg); 2587 DEBUG(dbgs() << StageName[Stage] 2588 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 2589 2590 // Try to evict a less worthy live range, but only for ranges from the primary 2591 // queue. The RS_Split ranges already failed to do this, and they should not 2592 // get a second chance until they have been split. 2593 if (Stage != RS_Split) 2594 if (unsigned PhysReg = 2595 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit)) { 2596 unsigned Hint = MRI->getSimpleHint(VirtReg.reg); 2597 // If VirtReg has a hint and that hint is broken record this 2598 // virtual register as a recoloring candidate for broken hint. 2599 // Indeed, since we evicted a variable in its neighborhood it is 2600 // likely we can at least partially recolor some of the 2601 // copy-related live-ranges. 2602 if (Hint && Hint != PhysReg) 2603 SetOfBrokenHints.insert(&VirtReg); 2604 return PhysReg; 2605 } 2606 2607 assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs"); 2608 2609 // The first time we see a live range, don't try to split or spill. 2610 // Wait until the second time, when all smaller ranges have been allocated. 2611 // This gives a better picture of the interference to split around. 2612 if (Stage < RS_Split) { 2613 setStage(VirtReg, RS_Split); 2614 DEBUG(dbgs() << "wait for second round\n"); 2615 NewVRegs.push_back(VirtReg.reg); 2616 return 0; 2617 } 2618 2619 if (Stage < RS_Spill) { 2620 // Try splitting VirtReg or interferences. 2621 unsigned NewVRegSizeBefore = NewVRegs.size(); 2622 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 2623 if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) 2624 return PhysReg; 2625 } 2626 2627 // If we couldn't allocate a register from spilling, there is probably some 2628 // invalid inline assembly. The base class will report it. 2629 if (Stage >= RS_Done || !VirtReg.isSpillable()) 2630 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters, 2631 Depth); 2632 2633 // Finally spill VirtReg itself. 2634 if (EnableDeferredSpilling && getStage(VirtReg) < RS_Memory) { 2635 // TODO: This is experimental and in particular, we do not model 2636 // the live range splitting done by spilling correctly. 2637 // We would need a deep integration with the spiller to do the 2638 // right thing here. Anyway, that is still good for early testing. 2639 setStage(VirtReg, RS_Memory); 2640 DEBUG(dbgs() << "Do as if this register is in memory\n"); 2641 NewVRegs.push_back(VirtReg.reg); 2642 } else { 2643 NamedRegionTimer T("spill", "Spiller", TimerGroupName, 2644 TimerGroupDescription, TimePassesIsEnabled); 2645 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats); 2646 spiller().spill(LRE); 2647 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 2648 2649 if (VerifyEnabled) 2650 MF->verify(this, "After spilling"); 2651 } 2652 2653 // The live virtual register requesting allocation was spilled, so tell 2654 // the caller not to allocate anything during this round. 2655 return 0; 2656 } 2657 2658 void RAGreedy::reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads, 2659 unsigned &FoldedReloads, 2660 unsigned &Spills, 2661 unsigned &FoldedSpills) { 2662 Reloads = 0; 2663 FoldedReloads = 0; 2664 Spills = 0; 2665 FoldedSpills = 0; 2666 2667 // Sum up the spill and reloads in subloops. 2668 for (MachineLoop *SubLoop : *L) { 2669 unsigned SubReloads; 2670 unsigned SubFoldedReloads; 2671 unsigned SubSpills; 2672 unsigned SubFoldedSpills; 2673 2674 reportNumberOfSplillsReloads(SubLoop, SubReloads, SubFoldedReloads, 2675 SubSpills, SubFoldedSpills); 2676 Reloads += SubReloads; 2677 FoldedReloads += SubFoldedReloads; 2678 Spills += SubSpills; 2679 FoldedSpills += SubFoldedSpills; 2680 } 2681 2682 const MachineFrameInfo &MFI = MF->getFrameInfo(); 2683 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); 2684 int FI; 2685 2686 for (MachineBasicBlock *MBB : L->getBlocks()) 2687 // Handle blocks that were not included in subloops. 2688 if (Loops->getLoopFor(MBB) == L) 2689 for (MachineInstr &MI : *MBB) { 2690 const MachineMemOperand *MMO; 2691 2692 if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI)) 2693 ++Reloads; 2694 else if (TII->hasLoadFromStackSlot(MI, MMO, FI) && 2695 MFI.isSpillSlotObjectIndex(FI)) 2696 ++FoldedReloads; 2697 else if (TII->isStoreToStackSlot(MI, FI) && 2698 MFI.isSpillSlotObjectIndex(FI)) 2699 ++Spills; 2700 else if (TII->hasStoreToStackSlot(MI, MMO, FI) && 2701 MFI.isSpillSlotObjectIndex(FI)) 2702 ++FoldedSpills; 2703 } 2704 2705 if (Reloads || FoldedReloads || Spills || FoldedSpills) { 2706 using namespace ore; 2707 2708 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReload", 2709 L->getStartLoc(), L->getHeader()); 2710 if (Spills) 2711 R << NV("NumSpills", Spills) << " spills "; 2712 if (FoldedSpills) 2713 R << NV("NumFoldedSpills", FoldedSpills) << " folded spills "; 2714 if (Reloads) 2715 R << NV("NumReloads", Reloads) << " reloads "; 2716 if (FoldedReloads) 2717 R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads "; 2718 ORE->emit(R << "generated in loop"); 2719 } 2720 } 2721 2722 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 2723 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 2724 << "********** Function: " << mf.getName() << '\n'); 2725 2726 MF = &mf; 2727 TRI = MF->getSubtarget().getRegisterInfo(); 2728 TII = MF->getSubtarget().getInstrInfo(); 2729 RCI.runOnMachineFunction(mf); 2730 2731 EnableLocalReassign = EnableLocalReassignment || 2732 MF->getSubtarget().enableRALocalReassignment( 2733 MF->getTarget().getOptLevel()); 2734 2735 if (VerifyEnabled) 2736 MF->verify(this, "Before greedy register allocator"); 2737 2738 RegAllocBase::init(getAnalysis<VirtRegMap>(), 2739 getAnalysis<LiveIntervals>(), 2740 getAnalysis<LiveRegMatrix>()); 2741 Indexes = &getAnalysis<SlotIndexes>(); 2742 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 2743 DomTree = &getAnalysis<MachineDominatorTree>(); 2744 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE(); 2745 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 2746 Loops = &getAnalysis<MachineLoopInfo>(); 2747 Bundles = &getAnalysis<EdgeBundles>(); 2748 SpillPlacer = &getAnalysis<SpillPlacement>(); 2749 DebugVars = &getAnalysis<LiveDebugVariables>(); 2750 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 2751 2752 initializeCSRCost(); 2753 2754 calculateSpillWeightsAndHints(*LIS, mf, VRM, *Loops, *MBFI); 2755 2756 DEBUG(LIS->dump()); 2757 2758 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 2759 SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI)); 2760 ExtraRegInfo.clear(); 2761 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 2762 NextCascade = 1; 2763 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 2764 GlobalCand.resize(32); // This will grow as needed. 2765 SetOfBrokenHints.clear(); 2766 2767 allocatePhysRegs(); 2768 tryHintsRecoloring(); 2769 postOptimization(); 2770 reportNumberOfSplillsReloads(); 2771 2772 releaseMemory(); 2773 return true; 2774 } 2775