1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "llvm/CodeGen/Passes.h" 17 #include "AllocationOrder.h" 18 #include "InterferenceCache.h" 19 #include "LiveDebugVariables.h" 20 #include "RegAllocBase.h" 21 #include "SpillPlacement.h" 22 #include "Spiller.h" 23 #include "SplitKit.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/CodeGen/CalcSpillWeights.h" 27 #include "llvm/CodeGen/EdgeBundles.h" 28 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 29 #include "llvm/CodeGen/LiveRangeEdit.h" 30 #include "llvm/CodeGen/LiveRegMatrix.h" 31 #include "llvm/CodeGen/LiveStackAnalysis.h" 32 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/RegAllocRegistry.h" 38 #include "llvm/CodeGen/RegisterClassInfo.h" 39 #include "llvm/CodeGen/VirtRegMap.h" 40 #include "llvm/PassAnalysisSupport.h" 41 #include "llvm/Support/CommandLine.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/Timer.h" 45 #include "llvm/Support/raw_ostream.h" 46 #include <queue> 47 48 using namespace llvm; 49 50 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52 STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54 static cl::opt<SplitEditor::ComplementSpillMode> 55 SplitSpillMode("split-spill-mode", cl::Hidden, 56 cl::desc("Spill mode for splitting live ranges"), 57 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 58 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 59 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 60 clEnumValEnd), 61 cl::init(SplitEditor::SM_Partition)); 62 63 static cl::opt<unsigned> 64 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden, 65 cl::desc("Last chance recoloring max depth"), 66 cl::init(5)); 67 68 static cl::opt<unsigned> LastChanceRecoloringMaxInterference( 69 "lcr-max-interf", cl::Hidden, 70 cl::desc("Last chance recoloring maximum number of considered" 71 " interference at a time"), 72 cl::init(8)); 73 74 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 75 createGreedyRegisterAllocator); 76 77 namespace { 78 class RAGreedy : public MachineFunctionPass, 79 public RegAllocBase, 80 private LiveRangeEdit::Delegate { 81 // Convenient shortcuts. 82 typedef std::priority_queue<std::pair<unsigned, unsigned> > PQueue; 83 typedef SmallPtrSet<LiveInterval *, 4> SmallLISet; 84 typedef SmallSet<unsigned, 16> SmallVirtRegSet; 85 86 // context 87 MachineFunction *MF; 88 89 // Shortcuts to some useful interface. 90 const TargetInstrInfo *TII; 91 const TargetRegisterInfo *TRI; 92 RegisterClassInfo RCI; 93 94 // analyses 95 SlotIndexes *Indexes; 96 MachineBlockFrequencyInfo *MBFI; 97 MachineDominatorTree *DomTree; 98 MachineLoopInfo *Loops; 99 EdgeBundles *Bundles; 100 SpillPlacement *SpillPlacer; 101 LiveDebugVariables *DebugVars; 102 103 // state 104 std::unique_ptr<Spiller> SpillerInstance; 105 PQueue Queue; 106 unsigned NextCascade; 107 108 // Live ranges pass through a number of stages as we try to allocate them. 109 // Some of the stages may also create new live ranges: 110 // 111 // - Region splitting. 112 // - Per-block splitting. 113 // - Local splitting. 114 // - Spilling. 115 // 116 // Ranges produced by one of the stages skip the previous stages when they are 117 // dequeued. This improves performance because we can skip interference checks 118 // that are unlikely to give any results. It also guarantees that the live 119 // range splitting algorithm terminates, something that is otherwise hard to 120 // ensure. 121 enum LiveRangeStage { 122 /// Newly created live range that has never been queued. 123 RS_New, 124 125 /// Only attempt assignment and eviction. Then requeue as RS_Split. 126 RS_Assign, 127 128 /// Attempt live range splitting if assignment is impossible. 129 RS_Split, 130 131 /// Attempt more aggressive live range splitting that is guaranteed to make 132 /// progress. This is used for split products that may not be making 133 /// progress. 134 RS_Split2, 135 136 /// Live range will be spilled. No more splitting will be attempted. 137 RS_Spill, 138 139 /// There is nothing more we can do to this live range. Abort compilation 140 /// if it can't be assigned. 141 RS_Done 142 }; 143 144 #ifndef NDEBUG 145 static const char *const StageName[]; 146 #endif 147 148 // RegInfo - Keep additional information about each live range. 149 struct RegInfo { 150 LiveRangeStage Stage; 151 152 // Cascade - Eviction loop prevention. See canEvictInterference(). 153 unsigned Cascade; 154 155 RegInfo() : Stage(RS_New), Cascade(0) {} 156 }; 157 158 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 159 160 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 161 return ExtraRegInfo[VirtReg.reg].Stage; 162 } 163 164 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 165 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 166 ExtraRegInfo[VirtReg.reg].Stage = Stage; 167 } 168 169 template<typename Iterator> 170 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 171 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 172 for (;Begin != End; ++Begin) { 173 unsigned Reg = *Begin; 174 if (ExtraRegInfo[Reg].Stage == RS_New) 175 ExtraRegInfo[Reg].Stage = NewStage; 176 } 177 } 178 179 /// Cost of evicting interference. 180 struct EvictionCost { 181 unsigned BrokenHints; ///< Total number of broken hints. 182 float MaxWeight; ///< Maximum spill weight evicted. 183 184 EvictionCost(): BrokenHints(0), MaxWeight(0) {} 185 186 bool isMax() const { return BrokenHints == ~0u; } 187 188 void setMax() { BrokenHints = ~0u; } 189 190 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } 191 192 bool operator<(const EvictionCost &O) const { 193 return std::tie(BrokenHints, MaxWeight) < 194 std::tie(O.BrokenHints, O.MaxWeight); 195 } 196 }; 197 198 // splitting state. 199 std::unique_ptr<SplitAnalysis> SA; 200 std::unique_ptr<SplitEditor> SE; 201 202 /// Cached per-block interference maps 203 InterferenceCache IntfCache; 204 205 /// All basic blocks where the current register has uses. 206 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 207 208 /// Global live range splitting candidate info. 209 struct GlobalSplitCandidate { 210 // Register intended for assignment, or 0. 211 unsigned PhysReg; 212 213 // SplitKit interval index for this candidate. 214 unsigned IntvIdx; 215 216 // Interference for PhysReg. 217 InterferenceCache::Cursor Intf; 218 219 // Bundles where this candidate should be live. 220 BitVector LiveBundles; 221 SmallVector<unsigned, 8> ActiveBlocks; 222 223 void reset(InterferenceCache &Cache, unsigned Reg) { 224 PhysReg = Reg; 225 IntvIdx = 0; 226 Intf.setPhysReg(Cache, Reg); 227 LiveBundles.clear(); 228 ActiveBlocks.clear(); 229 } 230 231 // Set B[i] = C for every live bundle where B[i] was NoCand. 232 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 233 unsigned Count = 0; 234 for (int i = LiveBundles.find_first(); i >= 0; 235 i = LiveBundles.find_next(i)) 236 if (B[i] == NoCand) { 237 B[i] = C; 238 Count++; 239 } 240 return Count; 241 } 242 }; 243 244 /// Candidate info for each PhysReg in AllocationOrder. 245 /// This vector never shrinks, but grows to the size of the largest register 246 /// class. 247 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 248 249 enum : unsigned { NoCand = ~0u }; 250 251 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 252 /// NoCand which indicates the stack interval. 253 SmallVector<unsigned, 32> BundleCand; 254 255 public: 256 RAGreedy(); 257 258 /// Return the pass name. 259 const char* getPassName() const override { 260 return "Greedy Register Allocator"; 261 } 262 263 /// RAGreedy analysis usage. 264 void getAnalysisUsage(AnalysisUsage &AU) const override; 265 void releaseMemory() override; 266 Spiller &spiller() override { return *SpillerInstance; } 267 void enqueue(LiveInterval *LI) override; 268 LiveInterval *dequeue() override; 269 unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override; 270 271 /// Perform register allocation. 272 bool runOnMachineFunction(MachineFunction &mf) override; 273 274 static char ID; 275 276 private: 277 unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &, 278 SmallVirtRegSet &, unsigned = 0); 279 280 bool LRE_CanEraseVirtReg(unsigned) override; 281 void LRE_WillShrinkVirtReg(unsigned) override; 282 void LRE_DidCloneVirtReg(unsigned, unsigned) override; 283 void enqueue(PQueue &CurQueue, LiveInterval *LI); 284 LiveInterval *dequeue(PQueue &CurQueue); 285 286 BlockFrequency calcSpillCost(); 287 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); 288 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 289 void growRegion(GlobalSplitCandidate &Cand); 290 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&); 291 bool calcCompactRegion(GlobalSplitCandidate&); 292 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 293 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 294 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg); 295 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 296 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 297 void evictInterference(LiveInterval&, unsigned, 298 SmallVectorImpl<unsigned>&); 299 bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg, 300 SmallLISet &RecoloringCandidates, 301 const SmallVirtRegSet &FixedRegisters); 302 303 unsigned tryAssign(LiveInterval&, AllocationOrder&, 304 SmallVectorImpl<unsigned>&); 305 unsigned tryEvict(LiveInterval&, AllocationOrder&, 306 SmallVectorImpl<unsigned>&, unsigned = ~0u); 307 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 308 SmallVectorImpl<unsigned>&); 309 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 310 SmallVectorImpl<unsigned>&); 311 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 312 SmallVectorImpl<unsigned>&); 313 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 314 SmallVectorImpl<unsigned>&); 315 unsigned trySplit(LiveInterval&, AllocationOrder&, 316 SmallVectorImpl<unsigned>&); 317 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &, 318 SmallVectorImpl<unsigned> &, 319 SmallVirtRegSet &, unsigned); 320 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &, 321 SmallVirtRegSet &, unsigned); 322 }; 323 } // end anonymous namespace 324 325 char RAGreedy::ID = 0; 326 327 #ifndef NDEBUG 328 const char *const RAGreedy::StageName[] = { 329 "RS_New", 330 "RS_Assign", 331 "RS_Split", 332 "RS_Split2", 333 "RS_Spill", 334 "RS_Done" 335 }; 336 #endif 337 338 // Hysteresis to use when comparing floats. 339 // This helps stabilize decisions based on float comparisons. 340 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875 341 342 343 FunctionPass* llvm::createGreedyRegisterAllocator() { 344 return new RAGreedy(); 345 } 346 347 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 348 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 349 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 350 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 351 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 352 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 353 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 354 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 355 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 356 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 357 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 358 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry()); 359 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 360 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 361 } 362 363 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 364 AU.setPreservesCFG(); 365 AU.addRequired<MachineBlockFrequencyInfo>(); 366 AU.addPreserved<MachineBlockFrequencyInfo>(); 367 AU.addRequired<AliasAnalysis>(); 368 AU.addPreserved<AliasAnalysis>(); 369 AU.addRequired<LiveIntervals>(); 370 AU.addPreserved<LiveIntervals>(); 371 AU.addRequired<SlotIndexes>(); 372 AU.addPreserved<SlotIndexes>(); 373 AU.addRequired<LiveDebugVariables>(); 374 AU.addPreserved<LiveDebugVariables>(); 375 AU.addRequired<LiveStacks>(); 376 AU.addPreserved<LiveStacks>(); 377 AU.addRequired<MachineDominatorTree>(); 378 AU.addPreserved<MachineDominatorTree>(); 379 AU.addRequired<MachineLoopInfo>(); 380 AU.addPreserved<MachineLoopInfo>(); 381 AU.addRequired<VirtRegMap>(); 382 AU.addPreserved<VirtRegMap>(); 383 AU.addRequired<LiveRegMatrix>(); 384 AU.addPreserved<LiveRegMatrix>(); 385 AU.addRequired<EdgeBundles>(); 386 AU.addRequired<SpillPlacement>(); 387 MachineFunctionPass::getAnalysisUsage(AU); 388 } 389 390 391 //===----------------------------------------------------------------------===// 392 // LiveRangeEdit delegate methods 393 //===----------------------------------------------------------------------===// 394 395 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 396 if (VRM->hasPhys(VirtReg)) { 397 Matrix->unassign(LIS->getInterval(VirtReg)); 398 return true; 399 } 400 // Unassigned virtreg is probably in the priority queue. 401 // RegAllocBase will erase it after dequeueing. 402 return false; 403 } 404 405 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 406 if (!VRM->hasPhys(VirtReg)) 407 return; 408 409 // Register is assigned, put it back on the queue for reassignment. 410 LiveInterval &LI = LIS->getInterval(VirtReg); 411 Matrix->unassign(LI); 412 enqueue(&LI); 413 } 414 415 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 416 // Cloning a register we haven't even heard about yet? Just ignore it. 417 if (!ExtraRegInfo.inBounds(Old)) 418 return; 419 420 // LRE may clone a virtual register because dead code elimination causes it to 421 // be split into connected components. The new components are much smaller 422 // than the original, so they should get a new chance at being assigned. 423 // same stage as the parent. 424 ExtraRegInfo[Old].Stage = RS_Assign; 425 ExtraRegInfo.grow(New); 426 ExtraRegInfo[New] = ExtraRegInfo[Old]; 427 } 428 429 void RAGreedy::releaseMemory() { 430 SpillerInstance.reset(0); 431 ExtraRegInfo.clear(); 432 GlobalCand.clear(); 433 } 434 435 void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); } 436 437 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) { 438 // Prioritize live ranges by size, assigning larger ranges first. 439 // The queue holds (size, reg) pairs. 440 const unsigned Size = LI->getSize(); 441 const unsigned Reg = LI->reg; 442 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 443 "Can only enqueue virtual registers"); 444 unsigned Prio; 445 446 ExtraRegInfo.grow(Reg); 447 if (ExtraRegInfo[Reg].Stage == RS_New) 448 ExtraRegInfo[Reg].Stage = RS_Assign; 449 450 if (ExtraRegInfo[Reg].Stage == RS_Split) { 451 // Unsplit ranges that couldn't be allocated immediately are deferred until 452 // everything else has been allocated. 453 Prio = Size; 454 } else { 455 // Giant live ranges fall back to the global assignment heuristic, which 456 // prevents excessive spilling in pathological cases. 457 bool ReverseLocal = TRI->reverseLocalAssignment(); 458 bool ForceGlobal = !ReverseLocal && TRI->mayOverrideLocalAssignment() && 459 (Size / SlotIndex::InstrDist) > (2 * MRI->getRegClass(Reg)->getNumRegs()); 460 461 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() && 462 LIS->intervalIsInOneMBB(*LI)) { 463 // Allocate original local ranges in linear instruction order. Since they 464 // are singly defined, this produces optimal coloring in the absence of 465 // global interference and other constraints. 466 if (!ReverseLocal) 467 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 468 else { 469 // Allocating bottom up may allow many short LRGs to be assigned first 470 // to one of the cheap registers. This could be much faster for very 471 // large blocks on targets with many physical registers. 472 Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex()); 473 } 474 } 475 else { 476 // Allocate global and split ranges in long->short order. Long ranges that 477 // don't fit should be spilled (or split) ASAP so they don't create 478 // interference. Mark a bit to prioritize global above local ranges. 479 Prio = (1u << 29) + Size; 480 } 481 // Mark a higher bit to prioritize global and local above RS_Split. 482 Prio |= (1u << 31); 483 484 // Boost ranges that have a physical register hint. 485 if (VRM->hasKnownPreference(Reg)) 486 Prio |= (1u << 30); 487 } 488 // The virtual register number is a tie breaker for same-sized ranges. 489 // Give lower vreg numbers higher priority to assign them first. 490 CurQueue.push(std::make_pair(Prio, ~Reg)); 491 } 492 493 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); } 494 495 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) { 496 if (CurQueue.empty()) 497 return 0; 498 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second); 499 CurQueue.pop(); 500 return LI; 501 } 502 503 504 //===----------------------------------------------------------------------===// 505 // Direct Assignment 506 //===----------------------------------------------------------------------===// 507 508 /// tryAssign - Try to assign VirtReg to an available register. 509 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 510 AllocationOrder &Order, 511 SmallVectorImpl<unsigned> &NewVRegs) { 512 Order.rewind(); 513 unsigned PhysReg; 514 while ((PhysReg = Order.next())) 515 if (!Matrix->checkInterference(VirtReg, PhysReg)) 516 break; 517 if (!PhysReg || Order.isHint()) 518 return PhysReg; 519 520 // PhysReg is available, but there may be a better choice. 521 522 // If we missed a simple hint, try to cheaply evict interference from the 523 // preferred register. 524 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 525 if (Order.isHint(Hint)) { 526 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 527 EvictionCost MaxCost; 528 MaxCost.setBrokenHints(1); 529 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 530 evictInterference(VirtReg, Hint, NewVRegs); 531 return Hint; 532 } 533 } 534 535 // Try to evict interference from a cheaper alternative. 536 unsigned Cost = TRI->getCostPerUse(PhysReg); 537 538 // Most registers have 0 additional cost. 539 if (!Cost) 540 return PhysReg; 541 542 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 543 << '\n'); 544 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 545 return CheapReg ? CheapReg : PhysReg; 546 } 547 548 549 //===----------------------------------------------------------------------===// 550 // Interference eviction 551 //===----------------------------------------------------------------------===// 552 553 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) { 554 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 555 unsigned PhysReg; 556 while ((PhysReg = Order.next())) { 557 if (PhysReg == PrevReg) 558 continue; 559 560 MCRegUnitIterator Units(PhysReg, TRI); 561 for (; Units.isValid(); ++Units) { 562 // Instantiate a "subquery", not to be confused with the Queries array. 563 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]); 564 if (subQ.checkInterference()) 565 break; 566 } 567 // If no units have interference, break out with the current PhysReg. 568 if (!Units.isValid()) 569 break; 570 } 571 if (PhysReg) 572 DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 573 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI) 574 << '\n'); 575 return PhysReg; 576 } 577 578 /// shouldEvict - determine if A should evict the assigned live range B. The 579 /// eviction policy defined by this function together with the allocation order 580 /// defined by enqueue() decides which registers ultimately end up being split 581 /// and spilled. 582 /// 583 /// Cascade numbers are used to prevent infinite loops if this function is a 584 /// cyclic relation. 585 /// 586 /// @param A The live range to be assigned. 587 /// @param IsHint True when A is about to be assigned to its preferred 588 /// register. 589 /// @param B The live range to be evicted. 590 /// @param BreaksHint True when B is already assigned to its preferred register. 591 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 592 LiveInterval &B, bool BreaksHint) { 593 bool CanSplit = getStage(B) < RS_Spill; 594 595 // Be fairly aggressive about following hints as long as the evictee can be 596 // split. 597 if (CanSplit && IsHint && !BreaksHint) 598 return true; 599 600 if (A.weight > B.weight) { 601 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n'); 602 return true; 603 } 604 return false; 605 } 606 607 /// canEvictInterference - Return true if all interferences between VirtReg and 608 /// PhysReg can be evicted. 609 /// 610 /// @param VirtReg Live range that is about to be assigned. 611 /// @param PhysReg Desired register for assignment. 612 /// @param IsHint True when PhysReg is VirtReg's preferred register. 613 /// @param MaxCost Only look for cheaper candidates and update with new cost 614 /// when returning true. 615 /// @returns True when interference can be evicted cheaper than MaxCost. 616 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 617 bool IsHint, EvictionCost &MaxCost) { 618 // It is only possible to evict virtual register interference. 619 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 620 return false; 621 622 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); 623 624 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 625 // involved in an eviction before. If a cascade number was assigned, deny 626 // evicting anything with the same or a newer cascade number. This prevents 627 // infinite eviction loops. 628 // 629 // This works out so a register without a cascade number is allowed to evict 630 // anything, and it can be evicted by anything. 631 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 632 if (!Cascade) 633 Cascade = NextCascade; 634 635 EvictionCost Cost; 636 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 637 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 638 // If there is 10 or more interferences, chances are one is heavier. 639 if (Q.collectInterferingVRegs(10) >= 10) 640 return false; 641 642 // Check if any interfering live range is heavier than MaxWeight. 643 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 644 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 645 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 646 "Only expecting virtual register interference from query"); 647 // Never evict spill products. They cannot split or spill. 648 if (getStage(*Intf) == RS_Done) 649 return false; 650 // Once a live range becomes small enough, it is urgent that we find a 651 // register for it. This is indicated by an infinite spill weight. These 652 // urgent live ranges get to evict almost anything. 653 // 654 // Also allow urgent evictions of unspillable ranges from a strictly 655 // larger allocation order. 656 bool Urgent = !VirtReg.isSpillable() && 657 (Intf->isSpillable() || 658 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 659 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 660 // Only evict older cascades or live ranges without a cascade. 661 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 662 if (Cascade <= IntfCascade) { 663 if (!Urgent) 664 return false; 665 // We permit breaking cascades for urgent evictions. It should be the 666 // last resort, though, so make it really expensive. 667 Cost.BrokenHints += 10; 668 } 669 // Would this break a satisfied hint? 670 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 671 // Update eviction cost. 672 Cost.BrokenHints += BreaksHint; 673 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 674 // Abort if this would be too expensive. 675 if (!(Cost < MaxCost)) 676 return false; 677 if (Urgent) 678 continue; 679 // Apply the eviction policy for non-urgent evictions. 680 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 681 return false; 682 // If !MaxCost.isMax(), then we're just looking for a cheap register. 683 // Evicting another local live range in this case could lead to suboptimal 684 // coloring. 685 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && 686 !canReassign(*Intf, PhysReg)) { 687 return false; 688 } 689 } 690 } 691 MaxCost = Cost; 692 return true; 693 } 694 695 /// evictInterference - Evict any interferring registers that prevent VirtReg 696 /// from being assigned to Physreg. This assumes that canEvictInterference 697 /// returned true. 698 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 699 SmallVectorImpl<unsigned> &NewVRegs) { 700 // Make sure that VirtReg has a cascade number, and assign that cascade 701 // number to every evicted register. These live ranges than then only be 702 // evicted by a newer cascade, preventing infinite loops. 703 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 704 if (!Cascade) 705 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 706 707 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 708 << " interference: Cascade " << Cascade << '\n'); 709 710 // Collect all interfering virtregs first. 711 SmallVector<LiveInterval*, 8> Intfs; 712 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 713 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 714 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 715 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 716 Intfs.append(IVR.begin(), IVR.end()); 717 } 718 719 // Evict them second. This will invalidate the queries. 720 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 721 LiveInterval *Intf = Intfs[i]; 722 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 723 if (!VRM->hasPhys(Intf->reg)) 724 continue; 725 Matrix->unassign(*Intf); 726 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 727 VirtReg.isSpillable() < Intf->isSpillable()) && 728 "Cannot decrease cascade number, illegal eviction"); 729 ExtraRegInfo[Intf->reg].Cascade = Cascade; 730 ++NumEvicted; 731 NewVRegs.push_back(Intf->reg); 732 } 733 } 734 735 /// tryEvict - Try to evict all interferences for a physreg. 736 /// @param VirtReg Currently unassigned virtual register. 737 /// @param Order Physregs to try. 738 /// @return Physreg to assign VirtReg, or 0. 739 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 740 AllocationOrder &Order, 741 SmallVectorImpl<unsigned> &NewVRegs, 742 unsigned CostPerUseLimit) { 743 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 744 745 // Keep track of the cheapest interference seen so far. 746 EvictionCost BestCost; 747 BestCost.setMax(); 748 unsigned BestPhys = 0; 749 unsigned OrderLimit = Order.getOrder().size(); 750 751 // When we are just looking for a reduced cost per use, don't break any 752 // hints, and only evict smaller spill weights. 753 if (CostPerUseLimit < ~0u) { 754 BestCost.BrokenHints = 0; 755 BestCost.MaxWeight = VirtReg.weight; 756 757 // Check of any registers in RC are below CostPerUseLimit. 758 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg); 759 unsigned MinCost = RegClassInfo.getMinCost(RC); 760 if (MinCost >= CostPerUseLimit) { 761 DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost 762 << ", no cheaper registers to be found.\n"); 763 return 0; 764 } 765 766 // It is normal for register classes to have a long tail of registers with 767 // the same cost. We don't need to look at them if they're too expensive. 768 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) { 769 OrderLimit = RegClassInfo.getLastCostChange(RC); 770 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n"); 771 } 772 } 773 774 Order.rewind(); 775 while (unsigned PhysReg = Order.next(OrderLimit)) { 776 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 777 continue; 778 // The first use of a callee-saved register in a function has cost 1. 779 // Don't start using a CSR when the CostPerUseLimit is low. 780 if (CostPerUseLimit == 1) 781 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 782 if (!MRI->isPhysRegUsed(CSR)) { 783 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 784 << PrintReg(CSR, TRI) << '\n'); 785 continue; 786 } 787 788 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 789 continue; 790 791 // Best so far. 792 BestPhys = PhysReg; 793 794 // Stop if the hint can be used. 795 if (Order.isHint()) 796 break; 797 } 798 799 if (!BestPhys) 800 return 0; 801 802 evictInterference(VirtReg, BestPhys, NewVRegs); 803 return BestPhys; 804 } 805 806 807 //===----------------------------------------------------------------------===// 808 // Region Splitting 809 //===----------------------------------------------------------------------===// 810 811 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 812 /// interference pattern in Physreg and its aliases. Add the constraints to 813 /// SpillPlacement and return the static cost of this split in Cost, assuming 814 /// that all preferences in SplitConstraints are met. 815 /// Return false if there are no bundles with positive bias. 816 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 817 BlockFrequency &Cost) { 818 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 819 820 // Reset interference dependent info. 821 SplitConstraints.resize(UseBlocks.size()); 822 BlockFrequency StaticCost = 0; 823 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 824 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 825 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 826 827 BC.Number = BI.MBB->getNumber(); 828 Intf.moveToBlock(BC.Number); 829 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 830 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 831 BC.ChangesValue = BI.FirstDef.isValid(); 832 833 if (!Intf.hasInterference()) 834 continue; 835 836 // Number of spill code instructions to insert. 837 unsigned Ins = 0; 838 839 // Interference for the live-in value. 840 if (BI.LiveIn) { 841 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 842 BC.Entry = SpillPlacement::MustSpill, ++Ins; 843 else if (Intf.first() < BI.FirstInstr) 844 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 845 else if (Intf.first() < BI.LastInstr) 846 ++Ins; 847 } 848 849 // Interference for the live-out value. 850 if (BI.LiveOut) { 851 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 852 BC.Exit = SpillPlacement::MustSpill, ++Ins; 853 else if (Intf.last() > BI.LastInstr) 854 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 855 else if (Intf.last() > BI.FirstInstr) 856 ++Ins; 857 } 858 859 // Accumulate the total frequency of inserted spill code. 860 while (Ins--) 861 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 862 } 863 Cost = StaticCost; 864 865 // Add constraints for use-blocks. Note that these are the only constraints 866 // that may add a positive bias, it is downhill from here. 867 SpillPlacer->addConstraints(SplitConstraints); 868 return SpillPlacer->scanActiveBundles(); 869 } 870 871 872 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 873 /// live-through blocks in Blocks. 874 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 875 ArrayRef<unsigned> Blocks) { 876 const unsigned GroupSize = 8; 877 SpillPlacement::BlockConstraint BCS[GroupSize]; 878 unsigned TBS[GroupSize]; 879 unsigned B = 0, T = 0; 880 881 for (unsigned i = 0; i != Blocks.size(); ++i) { 882 unsigned Number = Blocks[i]; 883 Intf.moveToBlock(Number); 884 885 if (!Intf.hasInterference()) { 886 assert(T < GroupSize && "Array overflow"); 887 TBS[T] = Number; 888 if (++T == GroupSize) { 889 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 890 T = 0; 891 } 892 continue; 893 } 894 895 assert(B < GroupSize && "Array overflow"); 896 BCS[B].Number = Number; 897 898 // Interference for the live-in value. 899 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 900 BCS[B].Entry = SpillPlacement::MustSpill; 901 else 902 BCS[B].Entry = SpillPlacement::PrefSpill; 903 904 // Interference for the live-out value. 905 if (Intf.last() >= SA->getLastSplitPoint(Number)) 906 BCS[B].Exit = SpillPlacement::MustSpill; 907 else 908 BCS[B].Exit = SpillPlacement::PrefSpill; 909 910 if (++B == GroupSize) { 911 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 912 SpillPlacer->addConstraints(Array); 913 B = 0; 914 } 915 } 916 917 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 918 SpillPlacer->addConstraints(Array); 919 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 920 } 921 922 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 923 // Keep track of through blocks that have not been added to SpillPlacer. 924 BitVector Todo = SA->getThroughBlocks(); 925 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 926 unsigned AddedTo = 0; 927 #ifndef NDEBUG 928 unsigned Visited = 0; 929 #endif 930 931 for (;;) { 932 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 933 // Find new through blocks in the periphery of PrefRegBundles. 934 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 935 unsigned Bundle = NewBundles[i]; 936 // Look at all blocks connected to Bundle in the full graph. 937 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 938 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 939 I != E; ++I) { 940 unsigned Block = *I; 941 if (!Todo.test(Block)) 942 continue; 943 Todo.reset(Block); 944 // This is a new through block. Add it to SpillPlacer later. 945 ActiveBlocks.push_back(Block); 946 #ifndef NDEBUG 947 ++Visited; 948 #endif 949 } 950 } 951 // Any new blocks to add? 952 if (ActiveBlocks.size() == AddedTo) 953 break; 954 955 // Compute through constraints from the interference, or assume that all 956 // through blocks prefer spilling when forming compact regions. 957 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 958 if (Cand.PhysReg) 959 addThroughConstraints(Cand.Intf, NewBlocks); 960 else 961 // Provide a strong negative bias on through blocks to prevent unwanted 962 // liveness on loop backedges. 963 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 964 AddedTo = ActiveBlocks.size(); 965 966 // Perhaps iterating can enable more bundles? 967 SpillPlacer->iterate(); 968 } 969 DEBUG(dbgs() << ", v=" << Visited); 970 } 971 972 /// calcCompactRegion - Compute the set of edge bundles that should be live 973 /// when splitting the current live range into compact regions. Compact 974 /// regions can be computed without looking at interference. They are the 975 /// regions formed by removing all the live-through blocks from the live range. 976 /// 977 /// Returns false if the current live range is already compact, or if the 978 /// compact regions would form single block regions anyway. 979 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 980 // Without any through blocks, the live range is already compact. 981 if (!SA->getNumThroughBlocks()) 982 return false; 983 984 // Compact regions don't correspond to any physreg. 985 Cand.reset(IntfCache, 0); 986 987 DEBUG(dbgs() << "Compact region bundles"); 988 989 // Use the spill placer to determine the live bundles. GrowRegion pretends 990 // that all the through blocks have interference when PhysReg is unset. 991 SpillPlacer->prepare(Cand.LiveBundles); 992 993 // The static split cost will be zero since Cand.Intf reports no interference. 994 BlockFrequency Cost; 995 if (!addSplitConstraints(Cand.Intf, Cost)) { 996 DEBUG(dbgs() << ", none.\n"); 997 return false; 998 } 999 1000 growRegion(Cand); 1001 SpillPlacer->finish(); 1002 1003 if (!Cand.LiveBundles.any()) { 1004 DEBUG(dbgs() << ", none.\n"); 1005 return false; 1006 } 1007 1008 DEBUG({ 1009 for (int i = Cand.LiveBundles.find_first(); i>=0; 1010 i = Cand.LiveBundles.find_next(i)) 1011 dbgs() << " EB#" << i; 1012 dbgs() << ".\n"; 1013 }); 1014 return true; 1015 } 1016 1017 /// calcSpillCost - Compute how expensive it would be to split the live range in 1018 /// SA around all use blocks instead of forming bundle regions. 1019 BlockFrequency RAGreedy::calcSpillCost() { 1020 BlockFrequency Cost = 0; 1021 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1022 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1023 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1024 unsigned Number = BI.MBB->getNumber(); 1025 // We normally only need one spill instruction - a load or a store. 1026 Cost += SpillPlacer->getBlockFrequency(Number); 1027 1028 // Unless the value is redefined in the block. 1029 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 1030 Cost += SpillPlacer->getBlockFrequency(Number); 1031 } 1032 return Cost; 1033 } 1034 1035 /// calcGlobalSplitCost - Return the global split cost of following the split 1036 /// pattern in LiveBundles. This cost should be added to the local cost of the 1037 /// interference pattern in SplitConstraints. 1038 /// 1039 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 1040 BlockFrequency GlobalCost = 0; 1041 const BitVector &LiveBundles = Cand.LiveBundles; 1042 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1043 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1044 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1045 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1046 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 1047 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 1048 unsigned Ins = 0; 1049 1050 if (BI.LiveIn) 1051 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1052 if (BI.LiveOut) 1053 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1054 while (Ins--) 1055 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1056 } 1057 1058 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1059 unsigned Number = Cand.ActiveBlocks[i]; 1060 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 1061 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 1062 if (!RegIn && !RegOut) 1063 continue; 1064 if (RegIn && RegOut) { 1065 // We need double spill code if this block has interference. 1066 Cand.Intf.moveToBlock(Number); 1067 if (Cand.Intf.hasInterference()) { 1068 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1069 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1070 } 1071 continue; 1072 } 1073 // live-in / stack-out or stack-in live-out. 1074 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1075 } 1076 return GlobalCost; 1077 } 1078 1079 /// splitAroundRegion - Split the current live range around the regions 1080 /// determined by BundleCand and GlobalCand. 1081 /// 1082 /// Before calling this function, GlobalCand and BundleCand must be initialized 1083 /// so each bundle is assigned to a valid candidate, or NoCand for the 1084 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1085 /// objects must be initialized for the current live range, and intervals 1086 /// created for the used candidates. 1087 /// 1088 /// @param LREdit The LiveRangeEdit object handling the current split. 1089 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1090 /// must appear in this list. 1091 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1092 ArrayRef<unsigned> UsedCands) { 1093 // These are the intervals created for new global ranges. We may create more 1094 // intervals for local ranges. 1095 const unsigned NumGlobalIntvs = LREdit.size(); 1096 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 1097 assert(NumGlobalIntvs && "No global intervals configured"); 1098 1099 // Isolate even single instructions when dealing with a proper sub-class. 1100 // That guarantees register class inflation for the stack interval because it 1101 // is all copies. 1102 unsigned Reg = SA->getParent().reg; 1103 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1104 1105 // First handle all the blocks with uses. 1106 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1107 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1108 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1109 unsigned Number = BI.MBB->getNumber(); 1110 unsigned IntvIn = 0, IntvOut = 0; 1111 SlotIndex IntfIn, IntfOut; 1112 if (BI.LiveIn) { 1113 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1114 if (CandIn != NoCand) { 1115 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1116 IntvIn = Cand.IntvIdx; 1117 Cand.Intf.moveToBlock(Number); 1118 IntfIn = Cand.Intf.first(); 1119 } 1120 } 1121 if (BI.LiveOut) { 1122 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1123 if (CandOut != NoCand) { 1124 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1125 IntvOut = Cand.IntvIdx; 1126 Cand.Intf.moveToBlock(Number); 1127 IntfOut = Cand.Intf.last(); 1128 } 1129 } 1130 1131 // Create separate intervals for isolated blocks with multiple uses. 1132 if (!IntvIn && !IntvOut) { 1133 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1134 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1135 SE->splitSingleBlock(BI); 1136 continue; 1137 } 1138 1139 if (IntvIn && IntvOut) 1140 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1141 else if (IntvIn) 1142 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1143 else 1144 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1145 } 1146 1147 // Handle live-through blocks. The relevant live-through blocks are stored in 1148 // the ActiveBlocks list with each candidate. We need to filter out 1149 // duplicates. 1150 BitVector Todo = SA->getThroughBlocks(); 1151 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1152 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1153 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1154 unsigned Number = Blocks[i]; 1155 if (!Todo.test(Number)) 1156 continue; 1157 Todo.reset(Number); 1158 1159 unsigned IntvIn = 0, IntvOut = 0; 1160 SlotIndex IntfIn, IntfOut; 1161 1162 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1163 if (CandIn != NoCand) { 1164 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1165 IntvIn = Cand.IntvIdx; 1166 Cand.Intf.moveToBlock(Number); 1167 IntfIn = Cand.Intf.first(); 1168 } 1169 1170 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1171 if (CandOut != NoCand) { 1172 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1173 IntvOut = Cand.IntvIdx; 1174 Cand.Intf.moveToBlock(Number); 1175 IntfOut = Cand.Intf.last(); 1176 } 1177 if (!IntvIn && !IntvOut) 1178 continue; 1179 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1180 } 1181 } 1182 1183 ++NumGlobalSplits; 1184 1185 SmallVector<unsigned, 8> IntvMap; 1186 SE->finish(&IntvMap); 1187 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1188 1189 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1190 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1191 1192 // Sort out the new intervals created by splitting. We get four kinds: 1193 // - Remainder intervals should not be split again. 1194 // - Candidate intervals can be assigned to Cand.PhysReg. 1195 // - Block-local splits are candidates for local splitting. 1196 // - DCE leftovers should go back on the queue. 1197 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1198 LiveInterval &Reg = LIS->getInterval(LREdit.get(i)); 1199 1200 // Ignore old intervals from DCE. 1201 if (getStage(Reg) != RS_New) 1202 continue; 1203 1204 // Remainder interval. Don't try splitting again, spill if it doesn't 1205 // allocate. 1206 if (IntvMap[i] == 0) { 1207 setStage(Reg, RS_Spill); 1208 continue; 1209 } 1210 1211 // Global intervals. Allow repeated splitting as long as the number of live 1212 // blocks is strictly decreasing. 1213 if (IntvMap[i] < NumGlobalIntvs) { 1214 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1215 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1216 << " blocks as original.\n"); 1217 // Don't allow repeated splitting as a safe guard against looping. 1218 setStage(Reg, RS_Split2); 1219 } 1220 continue; 1221 } 1222 1223 // Other intervals are treated as new. This includes local intervals created 1224 // for blocks with multiple uses, and anything created by DCE. 1225 } 1226 1227 if (VerifyEnabled) 1228 MF->verify(this, "After splitting live range around region"); 1229 } 1230 1231 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1232 SmallVectorImpl<unsigned> &NewVRegs) { 1233 unsigned NumCands = 0; 1234 unsigned BestCand = NoCand; 1235 BlockFrequency BestCost; 1236 SmallVector<unsigned, 8> UsedCands; 1237 1238 // Check if we can split this live range around a compact region. 1239 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1240 if (HasCompact) { 1241 // Yes, keep GlobalCand[0] as the compact region candidate. 1242 NumCands = 1; 1243 BestCost = BlockFrequency::getMaxFrequency(); 1244 } else { 1245 // No benefit from the compact region, our fallback will be per-block 1246 // splitting. Make sure we find a solution that is cheaper than spilling. 1247 BestCost = calcSpillCost(); 1248 DEBUG(dbgs() << "Cost of isolating all blocks = "; 1249 MBFI->printBlockFreq(dbgs(), BestCost) << '\n'); 1250 } 1251 1252 Order.rewind(); 1253 while (unsigned PhysReg = Order.next()) { 1254 // Discard bad candidates before we run out of interference cache cursors. 1255 // This will only affect register classes with a lot of registers (>32). 1256 if (NumCands == IntfCache.getMaxCursors()) { 1257 unsigned WorstCount = ~0u; 1258 unsigned Worst = 0; 1259 for (unsigned i = 0; i != NumCands; ++i) { 1260 if (i == BestCand || !GlobalCand[i].PhysReg) 1261 continue; 1262 unsigned Count = GlobalCand[i].LiveBundles.count(); 1263 if (Count < WorstCount) 1264 Worst = i, WorstCount = Count; 1265 } 1266 --NumCands; 1267 GlobalCand[Worst] = GlobalCand[NumCands]; 1268 if (BestCand == NumCands) 1269 BestCand = Worst; 1270 } 1271 1272 if (GlobalCand.size() <= NumCands) 1273 GlobalCand.resize(NumCands+1); 1274 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1275 Cand.reset(IntfCache, PhysReg); 1276 1277 SpillPlacer->prepare(Cand.LiveBundles); 1278 BlockFrequency Cost; 1279 if (!addSplitConstraints(Cand.Intf, Cost)) { 1280 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1281 continue; 1282 } 1283 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = "; 1284 MBFI->printBlockFreq(dbgs(), Cost)); 1285 if (Cost >= BestCost) { 1286 DEBUG({ 1287 if (BestCand == NoCand) 1288 dbgs() << " worse than no bundles\n"; 1289 else 1290 dbgs() << " worse than " 1291 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1292 }); 1293 continue; 1294 } 1295 growRegion(Cand); 1296 1297 SpillPlacer->finish(); 1298 1299 // No live bundles, defer to splitSingleBlocks(). 1300 if (!Cand.LiveBundles.any()) { 1301 DEBUG(dbgs() << " no bundles.\n"); 1302 continue; 1303 } 1304 1305 Cost += calcGlobalSplitCost(Cand); 1306 DEBUG({ 1307 dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost) 1308 << " with bundles"; 1309 for (int i = Cand.LiveBundles.find_first(); i>=0; 1310 i = Cand.LiveBundles.find_next(i)) 1311 dbgs() << " EB#" << i; 1312 dbgs() << ".\n"; 1313 }); 1314 if (Cost < BestCost) { 1315 BestCand = NumCands; 1316 BestCost = Cost; 1317 } 1318 ++NumCands; 1319 } 1320 1321 // No solutions found, fall back to single block splitting. 1322 if (!HasCompact && BestCand == NoCand) 1323 return 0; 1324 1325 // Prepare split editor. 1326 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1327 SE->reset(LREdit, SplitSpillMode); 1328 1329 // Assign all edge bundles to the preferred candidate, or NoCand. 1330 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1331 1332 // Assign bundles for the best candidate region. 1333 if (BestCand != NoCand) { 1334 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1335 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1336 UsedCands.push_back(BestCand); 1337 Cand.IntvIdx = SE->openIntv(); 1338 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1339 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1340 (void)B; 1341 } 1342 } 1343 1344 // Assign bundles for the compact region. 1345 if (HasCompact) { 1346 GlobalSplitCandidate &Cand = GlobalCand.front(); 1347 assert(!Cand.PhysReg && "Compact region has no physreg"); 1348 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1349 UsedCands.push_back(0); 1350 Cand.IntvIdx = SE->openIntv(); 1351 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1352 << Cand.IntvIdx << ".\n"); 1353 (void)B; 1354 } 1355 } 1356 1357 splitAroundRegion(LREdit, UsedCands); 1358 return 0; 1359 } 1360 1361 1362 //===----------------------------------------------------------------------===// 1363 // Per-Block Splitting 1364 //===----------------------------------------------------------------------===// 1365 1366 /// tryBlockSplit - Split a global live range around every block with uses. This 1367 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1368 /// they don't allocate. 1369 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1370 SmallVectorImpl<unsigned> &NewVRegs) { 1371 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1372 unsigned Reg = VirtReg.reg; 1373 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1374 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1375 SE->reset(LREdit, SplitSpillMode); 1376 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1377 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1378 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1379 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1380 SE->splitSingleBlock(BI); 1381 } 1382 // No blocks were split. 1383 if (LREdit.empty()) 1384 return 0; 1385 1386 // We did split for some blocks. 1387 SmallVector<unsigned, 8> IntvMap; 1388 SE->finish(&IntvMap); 1389 1390 // Tell LiveDebugVariables about the new ranges. 1391 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1392 1393 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1394 1395 // Sort out the new intervals created by splitting. The remainder interval 1396 // goes straight to spilling, the new local ranges get to stay RS_New. 1397 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1398 LiveInterval &LI = LIS->getInterval(LREdit.get(i)); 1399 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1400 setStage(LI, RS_Spill); 1401 } 1402 1403 if (VerifyEnabled) 1404 MF->verify(this, "After splitting live range around basic blocks"); 1405 return 0; 1406 } 1407 1408 1409 //===----------------------------------------------------------------------===// 1410 // Per-Instruction Splitting 1411 //===----------------------------------------------------------------------===// 1412 1413 /// Get the number of allocatable registers that match the constraints of \p Reg 1414 /// on \p MI and that are also in \p SuperRC. 1415 static unsigned getNumAllocatableRegsForConstraints( 1416 const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC, 1417 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, 1418 const RegisterClassInfo &RCI) { 1419 assert(SuperRC && "Invalid register class"); 1420 1421 const TargetRegisterClass *ConstrainedRC = 1422 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI, 1423 /* ExploreBundle */ true); 1424 if (!ConstrainedRC) 1425 return 0; 1426 return RCI.getNumAllocatableRegs(ConstrainedRC); 1427 } 1428 1429 /// tryInstructionSplit - Split a live range around individual instructions. 1430 /// This is normally not worthwhile since the spiller is doing essentially the 1431 /// same thing. However, when the live range is in a constrained register 1432 /// class, it may help to insert copies such that parts of the live range can 1433 /// be moved to a larger register class. 1434 /// 1435 /// This is similar to spilling to a larger register class. 1436 unsigned 1437 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1438 SmallVectorImpl<unsigned> &NewVRegs) { 1439 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg); 1440 // There is no point to this if there are no larger sub-classes. 1441 if (!RegClassInfo.isProperSubClass(CurRC)) 1442 return 0; 1443 1444 // Always enable split spill mode, since we're effectively spilling to a 1445 // register. 1446 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1447 SE->reset(LREdit, SplitEditor::SM_Size); 1448 1449 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1450 if (Uses.size() <= 1) 1451 return 0; 1452 1453 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1454 1455 const TargetRegisterClass *SuperRC = TRI->getLargestLegalSuperClass(CurRC); 1456 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC); 1457 // Split around every non-copy instruction if this split will relax 1458 // the constraints on the virtual register. 1459 // Otherwise, splitting just inserts uncoalescable copies that do not help 1460 // the allocation. 1461 for (unsigned i = 0; i != Uses.size(); ++i) { 1462 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1463 if (MI->isFullCopy() || 1464 SuperRCNumAllocatableRegs == 1465 getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII, 1466 TRI, RCI)) { 1467 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1468 continue; 1469 } 1470 SE->openIntv(); 1471 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1472 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1473 SE->useIntv(SegStart, SegStop); 1474 } 1475 1476 if (LREdit.empty()) { 1477 DEBUG(dbgs() << "All uses were copies.\n"); 1478 return 0; 1479 } 1480 1481 SmallVector<unsigned, 8> IntvMap; 1482 SE->finish(&IntvMap); 1483 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1484 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1485 1486 // Assign all new registers to RS_Spill. This was the last chance. 1487 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1488 return 0; 1489 } 1490 1491 1492 //===----------------------------------------------------------------------===// 1493 // Local Splitting 1494 //===----------------------------------------------------------------------===// 1495 1496 1497 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1498 /// in order to use PhysReg between two entries in SA->UseSlots. 1499 /// 1500 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1501 /// 1502 void RAGreedy::calcGapWeights(unsigned PhysReg, 1503 SmallVectorImpl<float> &GapWeight) { 1504 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1505 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1506 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1507 const unsigned NumGaps = Uses.size()-1; 1508 1509 // Start and end points for the interference check. 1510 SlotIndex StartIdx = 1511 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1512 SlotIndex StopIdx = 1513 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1514 1515 GapWeight.assign(NumGaps, 0.0f); 1516 1517 // Add interference from each overlapping register. 1518 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1519 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1520 .checkInterference()) 1521 continue; 1522 1523 // We know that VirtReg is a continuous interval from FirstInstr to 1524 // LastInstr, so we don't need InterferenceQuery. 1525 // 1526 // Interference that overlaps an instruction is counted in both gaps 1527 // surrounding the instruction. The exception is interference before 1528 // StartIdx and after StopIdx. 1529 // 1530 LiveIntervalUnion::SegmentIter IntI = 1531 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1532 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1533 // Skip the gaps before IntI. 1534 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1535 if (++Gap == NumGaps) 1536 break; 1537 if (Gap == NumGaps) 1538 break; 1539 1540 // Update the gaps covered by IntI. 1541 const float weight = IntI.value()->weight; 1542 for (; Gap != NumGaps; ++Gap) { 1543 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1544 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1545 break; 1546 } 1547 if (Gap == NumGaps) 1548 break; 1549 } 1550 } 1551 1552 // Add fixed interference. 1553 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1554 const LiveRange &LR = LIS->getRegUnit(*Units); 1555 LiveRange::const_iterator I = LR.find(StartIdx); 1556 LiveRange::const_iterator E = LR.end(); 1557 1558 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1559 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1560 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1561 if (++Gap == NumGaps) 1562 break; 1563 if (Gap == NumGaps) 1564 break; 1565 1566 for (; Gap != NumGaps; ++Gap) { 1567 GapWeight[Gap] = llvm::huge_valf; 1568 if (Uses[Gap+1].getBaseIndex() >= I->end) 1569 break; 1570 } 1571 if (Gap == NumGaps) 1572 break; 1573 } 1574 } 1575 } 1576 1577 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1578 /// basic block. 1579 /// 1580 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1581 SmallVectorImpl<unsigned> &NewVRegs) { 1582 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1583 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1584 1585 // Note that it is possible to have an interval that is live-in or live-out 1586 // while only covering a single block - A phi-def can use undef values from 1587 // predecessors, and the block could be a single-block loop. 1588 // We don't bother doing anything clever about such a case, we simply assume 1589 // that the interval is continuous from FirstInstr to LastInstr. We should 1590 // make sure that we don't do anything illegal to such an interval, though. 1591 1592 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1593 if (Uses.size() <= 2) 1594 return 0; 1595 const unsigned NumGaps = Uses.size()-1; 1596 1597 DEBUG({ 1598 dbgs() << "tryLocalSplit: "; 1599 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1600 dbgs() << ' ' << Uses[i]; 1601 dbgs() << '\n'; 1602 }); 1603 1604 // If VirtReg is live across any register mask operands, compute a list of 1605 // gaps with register masks. 1606 SmallVector<unsigned, 8> RegMaskGaps; 1607 if (Matrix->checkRegMaskInterference(VirtReg)) { 1608 // Get regmask slots for the whole block. 1609 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1610 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1611 // Constrain to VirtReg's live range. 1612 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1613 Uses.front().getRegSlot()) - RMS.begin(); 1614 unsigned re = RMS.size(); 1615 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1616 // Look for Uses[i] <= RMS <= Uses[i+1]. 1617 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1618 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1619 continue; 1620 // Skip a regmask on the same instruction as the last use. It doesn't 1621 // overlap the live range. 1622 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1623 break; 1624 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1625 RegMaskGaps.push_back(i); 1626 // Advance ri to the next gap. A regmask on one of the uses counts in 1627 // both gaps. 1628 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1629 ++ri; 1630 } 1631 DEBUG(dbgs() << '\n'); 1632 } 1633 1634 // Since we allow local split results to be split again, there is a risk of 1635 // creating infinite loops. It is tempting to require that the new live 1636 // ranges have less instructions than the original. That would guarantee 1637 // convergence, but it is too strict. A live range with 3 instructions can be 1638 // split 2+3 (including the COPY), and we want to allow that. 1639 // 1640 // Instead we use these rules: 1641 // 1642 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1643 // noop split, of course). 1644 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1645 // the new ranges must have fewer instructions than before the split. 1646 // 3. New ranges with the same number of instructions are marked RS_Split2, 1647 // smaller ranges are marked RS_New. 1648 // 1649 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1650 // excessive splitting and infinite loops. 1651 // 1652 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1653 1654 // Best split candidate. 1655 unsigned BestBefore = NumGaps; 1656 unsigned BestAfter = 0; 1657 float BestDiff = 0; 1658 1659 const float blockFreq = 1660 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 1661 (1.0f / MBFI->getEntryFreq()); 1662 SmallVector<float, 8> GapWeight; 1663 1664 Order.rewind(); 1665 while (unsigned PhysReg = Order.next()) { 1666 // Keep track of the largest spill weight that would need to be evicted in 1667 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1668 calcGapWeights(PhysReg, GapWeight); 1669 1670 // Remove any gaps with regmask clobbers. 1671 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1672 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1673 GapWeight[RegMaskGaps[i]] = llvm::huge_valf; 1674 1675 // Try to find the best sequence of gaps to close. 1676 // The new spill weight must be larger than any gap interference. 1677 1678 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1679 unsigned SplitBefore = 0, SplitAfter = 1; 1680 1681 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1682 // It is the spill weight that needs to be evicted. 1683 float MaxGap = GapWeight[0]; 1684 1685 for (;;) { 1686 // Live before/after split? 1687 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1688 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1689 1690 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1691 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1692 << " i=" << MaxGap); 1693 1694 // Stop before the interval gets so big we wouldn't be making progress. 1695 if (!LiveBefore && !LiveAfter) { 1696 DEBUG(dbgs() << " all\n"); 1697 break; 1698 } 1699 // Should the interval be extended or shrunk? 1700 bool Shrink = true; 1701 1702 // How many gaps would the new range have? 1703 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1704 1705 // Legally, without causing looping? 1706 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1707 1708 if (Legal && MaxGap < llvm::huge_valf) { 1709 // Estimate the new spill weight. Each instruction reads or writes the 1710 // register. Conservatively assume there are no read-modify-write 1711 // instructions. 1712 // 1713 // Try to guess the size of the new interval. 1714 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1715 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1716 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1717 // Would this split be possible to allocate? 1718 // Never allocate all gaps, we wouldn't be making progress. 1719 DEBUG(dbgs() << " w=" << EstWeight); 1720 if (EstWeight * Hysteresis >= MaxGap) { 1721 Shrink = false; 1722 float Diff = EstWeight - MaxGap; 1723 if (Diff > BestDiff) { 1724 DEBUG(dbgs() << " (best)"); 1725 BestDiff = Hysteresis * Diff; 1726 BestBefore = SplitBefore; 1727 BestAfter = SplitAfter; 1728 } 1729 } 1730 } 1731 1732 // Try to shrink. 1733 if (Shrink) { 1734 if (++SplitBefore < SplitAfter) { 1735 DEBUG(dbgs() << " shrink\n"); 1736 // Recompute the max when necessary. 1737 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1738 MaxGap = GapWeight[SplitBefore]; 1739 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1740 MaxGap = std::max(MaxGap, GapWeight[i]); 1741 } 1742 continue; 1743 } 1744 MaxGap = 0; 1745 } 1746 1747 // Try to extend the interval. 1748 if (SplitAfter >= NumGaps) { 1749 DEBUG(dbgs() << " end\n"); 1750 break; 1751 } 1752 1753 DEBUG(dbgs() << " extend\n"); 1754 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1755 } 1756 } 1757 1758 // Didn't find any candidates? 1759 if (BestBefore == NumGaps) 1760 return 0; 1761 1762 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1763 << '-' << Uses[BestAfter] << ", " << BestDiff 1764 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1765 1766 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1767 SE->reset(LREdit); 1768 1769 SE->openIntv(); 1770 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1771 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1772 SE->useIntv(SegStart, SegStop); 1773 SmallVector<unsigned, 8> IntvMap; 1774 SE->finish(&IntvMap); 1775 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1776 1777 // If the new range has the same number of instructions as before, mark it as 1778 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1779 // leave the new intervals as RS_New so they can compete. 1780 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1781 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1782 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1783 if (NewGaps >= NumGaps) { 1784 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1785 assert(!ProgressRequired && "Didn't make progress when it was required."); 1786 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1787 if (IntvMap[i] == 1) { 1788 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2); 1789 DEBUG(dbgs() << PrintReg(LREdit.get(i))); 1790 } 1791 DEBUG(dbgs() << '\n'); 1792 } 1793 ++NumLocalSplits; 1794 1795 return 0; 1796 } 1797 1798 //===----------------------------------------------------------------------===// 1799 // Live Range Splitting 1800 //===----------------------------------------------------------------------===// 1801 1802 /// trySplit - Try to split VirtReg or one of its interferences, making it 1803 /// assignable. 1804 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1805 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1806 SmallVectorImpl<unsigned>&NewVRegs) { 1807 // Ranges must be Split2 or less. 1808 if (getStage(VirtReg) >= RS_Spill) 1809 return 0; 1810 1811 // Local intervals are handled separately. 1812 if (LIS->intervalIsInOneMBB(VirtReg)) { 1813 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1814 SA->analyze(&VirtReg); 1815 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1816 if (PhysReg || !NewVRegs.empty()) 1817 return PhysReg; 1818 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1819 } 1820 1821 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1822 1823 SA->analyze(&VirtReg); 1824 1825 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1826 // coalescer. That may cause the range to become allocatable which means that 1827 // tryRegionSplit won't be making progress. This check should be replaced with 1828 // an assertion when the coalescer is fixed. 1829 if (SA->didRepairRange()) { 1830 // VirtReg has changed, so all cached queries are invalid. 1831 Matrix->invalidateVirtRegs(); 1832 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1833 return PhysReg; 1834 } 1835 1836 // First try to split around a region spanning multiple blocks. RS_Split2 1837 // ranges already made dubious progress with region splitting, so they go 1838 // straight to single block splitting. 1839 if (getStage(VirtReg) < RS_Split2) { 1840 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1841 if (PhysReg || !NewVRegs.empty()) 1842 return PhysReg; 1843 } 1844 1845 // Then isolate blocks. 1846 return tryBlockSplit(VirtReg, Order, NewVRegs); 1847 } 1848 1849 //===----------------------------------------------------------------------===// 1850 // Last Chance Recoloring 1851 //===----------------------------------------------------------------------===// 1852 1853 /// mayRecolorAllInterferences - Check if the virtual registers that 1854 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be 1855 /// recolored to free \p PhysReg. 1856 /// When true is returned, \p RecoloringCandidates has been augmented with all 1857 /// the live intervals that need to be recolored in order to free \p PhysReg 1858 /// for \p VirtReg. 1859 /// \p FixedRegisters contains all the virtual registers that cannot be 1860 /// recolored. 1861 bool 1862 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg, 1863 SmallLISet &RecoloringCandidates, 1864 const SmallVirtRegSet &FixedRegisters) { 1865 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg); 1866 1867 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1868 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 1869 // If there is LastChanceRecoloringMaxInterference or more interferences, 1870 // chances are one would not be recolorable. 1871 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >= 1872 LastChanceRecoloringMaxInterference) { 1873 DEBUG(dbgs() << "Early abort: too many interferences.\n"); 1874 return false; 1875 } 1876 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 1877 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 1878 // If Intf is done and sit on the same register class as VirtReg, 1879 // it would not be recolorable as it is in the same state as VirtReg. 1880 if ((getStage(*Intf) == RS_Done && 1881 MRI->getRegClass(Intf->reg) == CurRC) || 1882 FixedRegisters.count(Intf->reg)) { 1883 DEBUG(dbgs() << "Early abort: the inteference is not recolorable.\n"); 1884 return false; 1885 } 1886 RecoloringCandidates.insert(Intf); 1887 } 1888 } 1889 return true; 1890 } 1891 1892 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring 1893 /// its interferences. 1894 /// Last chance recoloring chooses a color for \p VirtReg and recolors every 1895 /// virtual register that was using it. The recoloring process may recursively 1896 /// use the last chance recoloring. Therefore, when a virtual register has been 1897 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot 1898 /// be last-chance-recolored again during this recoloring "session". 1899 /// E.g., 1900 /// Let 1901 /// vA can use {R1, R2 } 1902 /// vB can use { R2, R3} 1903 /// vC can use {R1 } 1904 /// Where vA, vB, and vC cannot be split anymore (they are reloads for 1905 /// instance) and they all interfere. 1906 /// 1907 /// vA is assigned R1 1908 /// vB is assigned R2 1909 /// vC tries to evict vA but vA is already done. 1910 /// Regular register allocation fails. 1911 /// 1912 /// Last chance recoloring kicks in: 1913 /// vC does as if vA was evicted => vC uses R1. 1914 /// vC is marked as fixed. 1915 /// vA needs to find a color. 1916 /// None are available. 1917 /// vA cannot evict vC: vC is a fixed virtual register now. 1918 /// vA does as if vB was evicted => vA uses R2. 1919 /// vB needs to find a color. 1920 /// R3 is available. 1921 /// Recoloring => vC = R1, vA = R2, vB = R3 1922 /// 1923 /// \p Order defines the preferred allocation order for \p VirtReg. 1924 /// \p NewRegs will contain any new virtual register that have been created 1925 /// (split, spill) during the process and that must be assigned. 1926 /// \p FixedRegisters contains all the virtual registers that cannot be 1927 /// recolored. 1928 /// \p Depth gives the current depth of the last chance recoloring. 1929 /// \return a physical register that can be used for VirtReg or ~0u if none 1930 /// exists. 1931 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg, 1932 AllocationOrder &Order, 1933 SmallVectorImpl<unsigned> &NewVRegs, 1934 SmallVirtRegSet &FixedRegisters, 1935 unsigned Depth) { 1936 DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n'); 1937 // Ranges must be Done. 1938 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) && 1939 "Last chance recoloring should really be last chance"); 1940 // Set the max depth to LastChanceRecoloringMaxDepth. 1941 // We may want to reconsider that if we end up with a too large search space 1942 // for target with hundreds of registers. 1943 // Indeed, in that case we may want to cut the search space earlier. 1944 if (Depth >= LastChanceRecoloringMaxDepth) { 1945 DEBUG(dbgs() << "Abort because max depth has been reached.\n"); 1946 return ~0u; 1947 } 1948 1949 // Set of Live intervals that will need to be recolored. 1950 SmallLISet RecoloringCandidates; 1951 // Record the original mapping virtual register to physical register in case 1952 // the recoloring fails. 1953 DenseMap<unsigned, unsigned> VirtRegToPhysReg; 1954 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in 1955 // this recoloring "session". 1956 FixedRegisters.insert(VirtReg.reg); 1957 1958 Order.rewind(); 1959 while (unsigned PhysReg = Order.next()) { 1960 DEBUG(dbgs() << "Try to assign: " << VirtReg << " to " 1961 << PrintReg(PhysReg, TRI) << '\n'); 1962 RecoloringCandidates.clear(); 1963 VirtRegToPhysReg.clear(); 1964 1965 // It is only possible to recolor virtual register interference. 1966 if (Matrix->checkInterference(VirtReg, PhysReg) > 1967 LiveRegMatrix::IK_VirtReg) { 1968 DEBUG(dbgs() << "Some inteferences are not with virtual registers.\n"); 1969 1970 continue; 1971 } 1972 1973 // Early give up on this PhysReg if it is obvious we cannot recolor all 1974 // the interferences. 1975 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates, 1976 FixedRegisters)) { 1977 DEBUG(dbgs() << "Some inteferences cannot be recolored.\n"); 1978 continue; 1979 } 1980 1981 // RecoloringCandidates contains all the virtual registers that interfer 1982 // with VirtReg on PhysReg (or one of its aliases). 1983 // Enqueue them for recoloring and perform the actual recoloring. 1984 PQueue RecoloringQueue; 1985 for (SmallLISet::iterator It = RecoloringCandidates.begin(), 1986 EndIt = RecoloringCandidates.end(); 1987 It != EndIt; ++It) { 1988 unsigned ItVirtReg = (*It)->reg; 1989 enqueue(RecoloringQueue, *It); 1990 assert(VRM->hasPhys(ItVirtReg) && 1991 "Interferences are supposed to be with allocated vairables"); 1992 1993 // Record the current allocation. 1994 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg); 1995 // unset the related struct. 1996 Matrix->unassign(**It); 1997 } 1998 1999 // Do as if VirtReg was assigned to PhysReg so that the underlying 2000 // recoloring has the right information about the interferes and 2001 // available colors. 2002 Matrix->assign(VirtReg, PhysReg); 2003 2004 // Save the current recoloring state. 2005 // If we cannot recolor all the interferences, we will have to start again 2006 // at this point for the next physical register. 2007 SmallVirtRegSet SaveFixedRegisters(FixedRegisters); 2008 if (tryRecoloringCandidates(RecoloringQueue, NewVRegs, FixedRegisters, 2009 Depth)) { 2010 // Do not mess up with the global assignment process. 2011 // I.e., VirtReg must be unassigned. 2012 Matrix->unassign(VirtReg); 2013 return PhysReg; 2014 } 2015 2016 DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to " 2017 << PrintReg(PhysReg, TRI) << '\n'); 2018 2019 // The recoloring attempt failed, undo the changes. 2020 FixedRegisters = SaveFixedRegisters; 2021 Matrix->unassign(VirtReg); 2022 2023 for (SmallLISet::iterator It = RecoloringCandidates.begin(), 2024 EndIt = RecoloringCandidates.end(); 2025 It != EndIt; ++It) { 2026 unsigned ItVirtReg = (*It)->reg; 2027 if (VRM->hasPhys(ItVirtReg)) 2028 Matrix->unassign(**It); 2029 Matrix->assign(**It, VirtRegToPhysReg[ItVirtReg]); 2030 } 2031 } 2032 2033 // Last chance recoloring did not worked either, give up. 2034 return ~0u; 2035 } 2036 2037 /// tryRecoloringCandidates - Try to assign a new color to every register 2038 /// in \RecoloringQueue. 2039 /// \p NewRegs will contain any new virtual register created during the 2040 /// recoloring process. 2041 /// \p FixedRegisters[in/out] contains all the registers that have been 2042 /// recolored. 2043 /// \return true if all virtual registers in RecoloringQueue were successfully 2044 /// recolored, false otherwise. 2045 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue, 2046 SmallVectorImpl<unsigned> &NewVRegs, 2047 SmallVirtRegSet &FixedRegisters, 2048 unsigned Depth) { 2049 while (!RecoloringQueue.empty()) { 2050 LiveInterval *LI = dequeue(RecoloringQueue); 2051 DEBUG(dbgs() << "Try to recolor: " << *LI << '\n'); 2052 unsigned PhysReg; 2053 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1); 2054 if (PhysReg == ~0u || !PhysReg) 2055 return false; 2056 DEBUG(dbgs() << "Recoloring of " << *LI 2057 << " succeeded with: " << PrintReg(PhysReg, TRI) << '\n'); 2058 Matrix->assign(*LI, PhysReg); 2059 FixedRegisters.insert(LI->reg); 2060 } 2061 return true; 2062 } 2063 2064 //===----------------------------------------------------------------------===// 2065 // Main Entry Point 2066 //===----------------------------------------------------------------------===// 2067 2068 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 2069 SmallVectorImpl<unsigned> &NewVRegs) { 2070 SmallVirtRegSet FixedRegisters; 2071 return selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters); 2072 } 2073 2074 unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg, 2075 SmallVectorImpl<unsigned> &NewVRegs, 2076 SmallVirtRegSet &FixedRegisters, 2077 unsigned Depth) { 2078 // First try assigning a free register. 2079 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 2080 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 2081 return PhysReg; 2082 2083 LiveRangeStage Stage = getStage(VirtReg); 2084 DEBUG(dbgs() << StageName[Stage] 2085 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 2086 2087 // Try to evict a less worthy live range, but only for ranges from the primary 2088 // queue. The RS_Split ranges already failed to do this, and they should not 2089 // get a second chance until they have been split. 2090 if (Stage != RS_Split) 2091 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 2092 return PhysReg; 2093 2094 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 2095 2096 // The first time we see a live range, don't try to split or spill. 2097 // Wait until the second time, when all smaller ranges have been allocated. 2098 // This gives a better picture of the interference to split around. 2099 if (Stage < RS_Split) { 2100 setStage(VirtReg, RS_Split); 2101 DEBUG(dbgs() << "wait for second round\n"); 2102 NewVRegs.push_back(VirtReg.reg); 2103 return 0; 2104 } 2105 2106 // If we couldn't allocate a register from spilling, there is probably some 2107 // invalid inline assembly. The base class wil report it. 2108 if (Stage >= RS_Done || !VirtReg.isSpillable()) 2109 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters, 2110 Depth); 2111 2112 // Try splitting VirtReg or interferences. 2113 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 2114 if (PhysReg || !NewVRegs.empty()) 2115 return PhysReg; 2116 2117 // Finally spill VirtReg itself. 2118 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 2119 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 2120 spiller().spill(LRE); 2121 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 2122 2123 if (VerifyEnabled) 2124 MF->verify(this, "After spilling"); 2125 2126 // The live virtual register requesting allocation was spilled, so tell 2127 // the caller not to allocate anything during this round. 2128 return 0; 2129 } 2130 2131 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 2132 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 2133 << "********** Function: " << mf.getName() << '\n'); 2134 2135 MF = &mf; 2136 TRI = MF->getTarget().getRegisterInfo(); 2137 TII = MF->getTarget().getInstrInfo(); 2138 RCI.runOnMachineFunction(mf); 2139 if (VerifyEnabled) 2140 MF->verify(this, "Before greedy register allocator"); 2141 2142 RegAllocBase::init(getAnalysis<VirtRegMap>(), 2143 getAnalysis<LiveIntervals>(), 2144 getAnalysis<LiveRegMatrix>()); 2145 Indexes = &getAnalysis<SlotIndexes>(); 2146 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 2147 DomTree = &getAnalysis<MachineDominatorTree>(); 2148 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 2149 Loops = &getAnalysis<MachineLoopInfo>(); 2150 Bundles = &getAnalysis<EdgeBundles>(); 2151 SpillPlacer = &getAnalysis<SpillPlacement>(); 2152 DebugVars = &getAnalysis<LiveDebugVariables>(); 2153 2154 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI); 2155 2156 DEBUG(LIS->dump()); 2157 2158 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 2159 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI)); 2160 ExtraRegInfo.clear(); 2161 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 2162 NextCascade = 1; 2163 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 2164 GlobalCand.resize(32); // This will grow as needed. 2165 2166 allocatePhysRegs(); 2167 releaseMemory(); 2168 return true; 2169 } 2170