1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "AllocationOrder.h" 17 #include "InterferenceCache.h" 18 #include "LiveDebugVariables.h" 19 #include "LiveRangeEdit.h" 20 #include "RegAllocBase.h" 21 #include "Spiller.h" 22 #include "SpillPlacement.h" 23 #include "SplitKit.h" 24 #include "VirtRegMap.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Function.h" 28 #include "llvm/PassAnalysisSupport.h" 29 #include "llvm/CodeGen/CalcSpillWeights.h" 30 #include "llvm/CodeGen/EdgeBundles.h" 31 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 32 #include "llvm/CodeGen/LiveStackAnalysis.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/Passes.h" 38 #include "llvm/CodeGen/RegAllocRegistry.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Support/Timer.h" 45 46 #include <queue> 47 48 using namespace llvm; 49 50 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52 STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54 /// EnableMachineSched - temporary flag to enable the machine scheduling pass 55 /// until we complete the register allocation pass configuration cleanup. 56 static cl::opt<bool> 57 EnableMachineSched("enable-misched", 58 cl::desc("Enable the machine instruction scheduling pass."), 59 cl::init(false), cl::Hidden); 60 61 static cl::opt<SplitEditor::ComplementSpillMode> 62 SplitSpillMode("split-spill-mode", cl::Hidden, 63 cl::desc("Spill mode for splitting live ranges"), 64 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 65 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 66 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 67 clEnumValEnd), 68 cl::init(SplitEditor::SM_Partition)); 69 70 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 71 createGreedyRegisterAllocator); 72 73 namespace { 74 class RAGreedy : public MachineFunctionPass, 75 public RegAllocBase, 76 private LiveRangeEdit::Delegate { 77 78 // context 79 MachineFunction *MF; 80 81 // analyses 82 SlotIndexes *Indexes; 83 LiveStacks *LS; 84 MachineDominatorTree *DomTree; 85 MachineLoopInfo *Loops; 86 EdgeBundles *Bundles; 87 SpillPlacement *SpillPlacer; 88 LiveDebugVariables *DebugVars; 89 90 // state 91 std::auto_ptr<Spiller> SpillerInstance; 92 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 93 unsigned NextCascade; 94 95 // Live ranges pass through a number of stages as we try to allocate them. 96 // Some of the stages may also create new live ranges: 97 // 98 // - Region splitting. 99 // - Per-block splitting. 100 // - Local splitting. 101 // - Spilling. 102 // 103 // Ranges produced by one of the stages skip the previous stages when they are 104 // dequeued. This improves performance because we can skip interference checks 105 // that are unlikely to give any results. It also guarantees that the live 106 // range splitting algorithm terminates, something that is otherwise hard to 107 // ensure. 108 enum LiveRangeStage { 109 /// Newly created live range that has never been queued. 110 RS_New, 111 112 /// Only attempt assignment and eviction. Then requeue as RS_Split. 113 RS_Assign, 114 115 /// Attempt live range splitting if assignment is impossible. 116 RS_Split, 117 118 /// Attempt more aggressive live range splitting that is guaranteed to make 119 /// progress. This is used for split products that may not be making 120 /// progress. 121 RS_Split2, 122 123 /// Live range will be spilled. No more splitting will be attempted. 124 RS_Spill, 125 126 /// There is nothing more we can do to this live range. Abort compilation 127 /// if it can't be assigned. 128 RS_Done 129 }; 130 131 static const char *const StageName[]; 132 133 // RegInfo - Keep additional information about each live range. 134 struct RegInfo { 135 LiveRangeStage Stage; 136 137 // Cascade - Eviction loop prevention. See canEvictInterference(). 138 unsigned Cascade; 139 140 RegInfo() : Stage(RS_New), Cascade(0) {} 141 }; 142 143 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 144 145 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 146 return ExtraRegInfo[VirtReg.reg].Stage; 147 } 148 149 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 150 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 151 ExtraRegInfo[VirtReg.reg].Stage = Stage; 152 } 153 154 template<typename Iterator> 155 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 156 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 157 for (;Begin != End; ++Begin) { 158 unsigned Reg = (*Begin)->reg; 159 if (ExtraRegInfo[Reg].Stage == RS_New) 160 ExtraRegInfo[Reg].Stage = NewStage; 161 } 162 } 163 164 /// Cost of evicting interference. 165 struct EvictionCost { 166 unsigned BrokenHints; ///< Total number of broken hints. 167 float MaxWeight; ///< Maximum spill weight evicted. 168 169 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 170 171 bool operator<(const EvictionCost &O) const { 172 if (BrokenHints != O.BrokenHints) 173 return BrokenHints < O.BrokenHints; 174 return MaxWeight < O.MaxWeight; 175 } 176 }; 177 178 // splitting state. 179 std::auto_ptr<SplitAnalysis> SA; 180 std::auto_ptr<SplitEditor> SE; 181 182 /// Cached per-block interference maps 183 InterferenceCache IntfCache; 184 185 /// All basic blocks where the current register has uses. 186 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 187 188 /// Global live range splitting candidate info. 189 struct GlobalSplitCandidate { 190 // Register intended for assignment, or 0. 191 unsigned PhysReg; 192 193 // SplitKit interval index for this candidate. 194 unsigned IntvIdx; 195 196 // Interference for PhysReg. 197 InterferenceCache::Cursor Intf; 198 199 // Bundles where this candidate should be live. 200 BitVector LiveBundles; 201 SmallVector<unsigned, 8> ActiveBlocks; 202 203 void reset(InterferenceCache &Cache, unsigned Reg) { 204 PhysReg = Reg; 205 IntvIdx = 0; 206 Intf.setPhysReg(Cache, Reg); 207 LiveBundles.clear(); 208 ActiveBlocks.clear(); 209 } 210 211 // Set B[i] = C for every live bundle where B[i] was NoCand. 212 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 213 unsigned Count = 0; 214 for (int i = LiveBundles.find_first(); i >= 0; 215 i = LiveBundles.find_next(i)) 216 if (B[i] == NoCand) { 217 B[i] = C; 218 Count++; 219 } 220 return Count; 221 } 222 }; 223 224 /// Candidate info for for each PhysReg in AllocationOrder. 225 /// This vector never shrinks, but grows to the size of the largest register 226 /// class. 227 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 228 229 enum { NoCand = ~0u }; 230 231 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 232 /// NoCand which indicates the stack interval. 233 SmallVector<unsigned, 32> BundleCand; 234 235 public: 236 RAGreedy(); 237 238 /// Return the pass name. 239 virtual const char* getPassName() const { 240 return "Greedy Register Allocator"; 241 } 242 243 /// RAGreedy analysis usage. 244 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 245 virtual void releaseMemory(); 246 virtual Spiller &spiller() { return *SpillerInstance; } 247 virtual void enqueue(LiveInterval *LI); 248 virtual LiveInterval *dequeue(); 249 virtual unsigned selectOrSplit(LiveInterval&, 250 SmallVectorImpl<LiveInterval*>&); 251 252 /// Perform register allocation. 253 virtual bool runOnMachineFunction(MachineFunction &mf); 254 255 static char ID; 256 257 private: 258 bool LRE_CanEraseVirtReg(unsigned); 259 void LRE_WillShrinkVirtReg(unsigned); 260 void LRE_DidCloneVirtReg(unsigned, unsigned); 261 262 float calcSpillCost(); 263 bool addSplitConstraints(InterferenceCache::Cursor, float&); 264 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 265 void growRegion(GlobalSplitCandidate &Cand); 266 float calcGlobalSplitCost(GlobalSplitCandidate&); 267 bool calcCompactRegion(GlobalSplitCandidate&); 268 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 269 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 270 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 271 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 272 void evictInterference(LiveInterval&, unsigned, 273 SmallVectorImpl<LiveInterval*>&); 274 275 unsigned tryAssign(LiveInterval&, AllocationOrder&, 276 SmallVectorImpl<LiveInterval*>&); 277 unsigned tryEvict(LiveInterval&, AllocationOrder&, 278 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 279 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 280 SmallVectorImpl<LiveInterval*>&); 281 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 282 SmallVectorImpl<LiveInterval*>&); 283 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 284 SmallVectorImpl<LiveInterval*>&); 285 unsigned trySplit(LiveInterval&, AllocationOrder&, 286 SmallVectorImpl<LiveInterval*>&); 287 }; 288 } // end anonymous namespace 289 290 char RAGreedy::ID = 0; 291 292 #ifndef NDEBUG 293 const char *const RAGreedy::StageName[] = { 294 "RS_New", 295 "RS_Assign", 296 "RS_Split", 297 "RS_Split2", 298 "RS_Spill", 299 "RS_Done" 300 }; 301 #endif 302 303 // Hysteresis to use when comparing floats. 304 // This helps stabilize decisions based on float comparisons. 305 const float Hysteresis = 0.98f; 306 307 308 FunctionPass* llvm::createGreedyRegisterAllocator() { 309 return new RAGreedy(); 310 } 311 312 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 313 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 314 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 315 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 316 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 317 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry()); 318 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 319 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 320 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 321 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 322 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 323 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 324 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 325 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 326 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 327 } 328 329 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 330 AU.setPreservesCFG(); 331 AU.addRequired<AliasAnalysis>(); 332 AU.addPreserved<AliasAnalysis>(); 333 AU.addRequired<LiveIntervals>(); 334 AU.addRequired<SlotIndexes>(); 335 AU.addPreserved<SlotIndexes>(); 336 AU.addRequired<LiveDebugVariables>(); 337 AU.addPreserved<LiveDebugVariables>(); 338 if (StrongPHIElim) 339 AU.addRequiredID(StrongPHIEliminationID); 340 AU.addRequiredTransitiveID(RegisterCoalescerPassID); 341 if (EnableMachineSched) 342 AU.addRequiredID(MachineSchedulerID); 343 AU.addRequired<CalculateSpillWeights>(); 344 AU.addRequired<LiveStacks>(); 345 AU.addPreserved<LiveStacks>(); 346 AU.addRequired<MachineDominatorTree>(); 347 AU.addPreserved<MachineDominatorTree>(); 348 AU.addRequired<MachineLoopInfo>(); 349 AU.addPreserved<MachineLoopInfo>(); 350 AU.addRequired<VirtRegMap>(); 351 AU.addPreserved<VirtRegMap>(); 352 AU.addRequired<EdgeBundles>(); 353 AU.addRequired<SpillPlacement>(); 354 MachineFunctionPass::getAnalysisUsage(AU); 355 } 356 357 358 //===----------------------------------------------------------------------===// 359 // LiveRangeEdit delegate methods 360 //===----------------------------------------------------------------------===// 361 362 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 363 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 364 unassign(LIS->getInterval(VirtReg), PhysReg); 365 return true; 366 } 367 // Unassigned virtreg is probably in the priority queue. 368 // RegAllocBase will erase it after dequeueing. 369 return false; 370 } 371 372 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 373 unsigned PhysReg = VRM->getPhys(VirtReg); 374 if (!PhysReg) 375 return; 376 377 // Register is assigned, put it back on the queue for reassignment. 378 LiveInterval &LI = LIS->getInterval(VirtReg); 379 unassign(LI, PhysReg); 380 enqueue(&LI); 381 } 382 383 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 384 // Cloning a register we haven't even heard about yet? Just ignore it. 385 if (!ExtraRegInfo.inBounds(Old)) 386 return; 387 388 // LRE may clone a virtual register because dead code elimination causes it to 389 // be split into connected components. The new components are much smaller 390 // than the original, so they should get a new chance at being assigned. 391 // same stage as the parent. 392 ExtraRegInfo[Old].Stage = RS_Assign; 393 ExtraRegInfo.grow(New); 394 ExtraRegInfo[New] = ExtraRegInfo[Old]; 395 } 396 397 void RAGreedy::releaseMemory() { 398 SpillerInstance.reset(0); 399 ExtraRegInfo.clear(); 400 GlobalCand.clear(); 401 RegAllocBase::releaseMemory(); 402 } 403 404 void RAGreedy::enqueue(LiveInterval *LI) { 405 // Prioritize live ranges by size, assigning larger ranges first. 406 // The queue holds (size, reg) pairs. 407 const unsigned Size = LI->getSize(); 408 const unsigned Reg = LI->reg; 409 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 410 "Can only enqueue virtual registers"); 411 unsigned Prio; 412 413 ExtraRegInfo.grow(Reg); 414 if (ExtraRegInfo[Reg].Stage == RS_New) 415 ExtraRegInfo[Reg].Stage = RS_Assign; 416 417 if (ExtraRegInfo[Reg].Stage == RS_Split) { 418 // Unsplit ranges that couldn't be allocated immediately are deferred until 419 // everything else has been allocated. 420 Prio = Size; 421 } else { 422 // Everything is allocated in long->short order. Long ranges that don't fit 423 // should be spilled (or split) ASAP so they don't create interference. 424 Prio = (1u << 31) + Size; 425 426 // Boost ranges that have a physical register hint. 427 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 428 Prio |= (1u << 30); 429 } 430 431 Queue.push(std::make_pair(Prio, Reg)); 432 } 433 434 LiveInterval *RAGreedy::dequeue() { 435 if (Queue.empty()) 436 return 0; 437 LiveInterval *LI = &LIS->getInterval(Queue.top().second); 438 Queue.pop(); 439 return LI; 440 } 441 442 443 //===----------------------------------------------------------------------===// 444 // Direct Assignment 445 //===----------------------------------------------------------------------===// 446 447 /// tryAssign - Try to assign VirtReg to an available register. 448 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 449 AllocationOrder &Order, 450 SmallVectorImpl<LiveInterval*> &NewVRegs) { 451 Order.rewind(); 452 unsigned PhysReg; 453 while ((PhysReg = Order.next())) 454 if (!checkPhysRegInterference(VirtReg, PhysReg)) 455 break; 456 if (!PhysReg || Order.isHint(PhysReg)) 457 return PhysReg; 458 459 // PhysReg is available, but there may be a better choice. 460 461 // If we missed a simple hint, try to cheaply evict interference from the 462 // preferred register. 463 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 464 if (Order.isHint(Hint)) { 465 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 466 EvictionCost MaxCost(1); 467 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 468 evictInterference(VirtReg, Hint, NewVRegs); 469 return Hint; 470 } 471 } 472 473 // Try to evict interference from a cheaper alternative. 474 unsigned Cost = TRI->getCostPerUse(PhysReg); 475 476 // Most registers have 0 additional cost. 477 if (!Cost) 478 return PhysReg; 479 480 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 481 << '\n'); 482 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 483 return CheapReg ? CheapReg : PhysReg; 484 } 485 486 487 //===----------------------------------------------------------------------===// 488 // Interference eviction 489 //===----------------------------------------------------------------------===// 490 491 /// shouldEvict - determine if A should evict the assigned live range B. The 492 /// eviction policy defined by this function together with the allocation order 493 /// defined by enqueue() decides which registers ultimately end up being split 494 /// and spilled. 495 /// 496 /// Cascade numbers are used to prevent infinite loops if this function is a 497 /// cyclic relation. 498 /// 499 /// @param A The live range to be assigned. 500 /// @param IsHint True when A is about to be assigned to its preferred 501 /// register. 502 /// @param B The live range to be evicted. 503 /// @param BreaksHint True when B is already assigned to its preferred register. 504 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 505 LiveInterval &B, bool BreaksHint) { 506 bool CanSplit = getStage(B) < RS_Spill; 507 508 // Be fairly aggressive about following hints as long as the evictee can be 509 // split. 510 if (CanSplit && IsHint && !BreaksHint) 511 return true; 512 513 return A.weight > B.weight; 514 } 515 516 /// canEvictInterference - Return true if all interferences between VirtReg and 517 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 518 /// 519 /// @param VirtReg Live range that is about to be assigned. 520 /// @param PhysReg Desired register for assignment. 521 /// @prarm IsHint True when PhysReg is VirtReg's preferred register. 522 /// @param MaxCost Only look for cheaper candidates and update with new cost 523 /// when returning true. 524 /// @returns True when interference can be evicted cheaper than MaxCost. 525 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 526 bool IsHint, EvictionCost &MaxCost) { 527 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 528 // involved in an eviction before. If a cascade number was assigned, deny 529 // evicting anything with the same or a newer cascade number. This prevents 530 // infinite eviction loops. 531 // 532 // This works out so a register without a cascade number is allowed to evict 533 // anything, and it can be evicted by anything. 534 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 535 if (!Cascade) 536 Cascade = NextCascade; 537 538 EvictionCost Cost; 539 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 540 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 541 // If there is 10 or more interferences, chances are one is heavier. 542 if (Q.collectInterferingVRegs(10) >= 10) 543 return false; 544 545 // Check if any interfering live range is heavier than MaxWeight. 546 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 547 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 548 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 549 return false; 550 // Never evict spill products. They cannot split or spill. 551 if (getStage(*Intf) == RS_Done) 552 return false; 553 // Once a live range becomes small enough, it is urgent that we find a 554 // register for it. This is indicated by an infinite spill weight. These 555 // urgent live ranges get to evict almost anything. 556 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable(); 557 // Only evict older cascades or live ranges without a cascade. 558 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 559 if (Cascade <= IntfCascade) { 560 if (!Urgent) 561 return false; 562 // We permit breaking cascades for urgent evictions. It should be the 563 // last resort, though, so make it really expensive. 564 Cost.BrokenHints += 10; 565 } 566 // Would this break a satisfied hint? 567 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 568 // Update eviction cost. 569 Cost.BrokenHints += BreaksHint; 570 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 571 // Abort if this would be too expensive. 572 if (!(Cost < MaxCost)) 573 return false; 574 // Finally, apply the eviction policy for non-urgent evictions. 575 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 576 return false; 577 } 578 } 579 MaxCost = Cost; 580 return true; 581 } 582 583 /// evictInterference - Evict any interferring registers that prevent VirtReg 584 /// from being assigned to Physreg. This assumes that canEvictInterference 585 /// returned true. 586 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 587 SmallVectorImpl<LiveInterval*> &NewVRegs) { 588 // Make sure that VirtReg has a cascade number, and assign that cascade 589 // number to every evicted register. These live ranges than then only be 590 // evicted by a newer cascade, preventing infinite loops. 591 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 592 if (!Cascade) 593 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 594 595 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 596 << " interference: Cascade " << Cascade << '\n'); 597 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 598 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 599 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 600 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 601 LiveInterval *Intf = Q.interferingVRegs()[i]; 602 unassign(*Intf, VRM->getPhys(Intf->reg)); 603 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 604 VirtReg.isSpillable() < Intf->isSpillable()) && 605 "Cannot decrease cascade number, illegal eviction"); 606 ExtraRegInfo[Intf->reg].Cascade = Cascade; 607 ++NumEvicted; 608 NewVRegs.push_back(Intf); 609 } 610 } 611 } 612 613 /// tryEvict - Try to evict all interferences for a physreg. 614 /// @param VirtReg Currently unassigned virtual register. 615 /// @param Order Physregs to try. 616 /// @return Physreg to assign VirtReg, or 0. 617 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 618 AllocationOrder &Order, 619 SmallVectorImpl<LiveInterval*> &NewVRegs, 620 unsigned CostPerUseLimit) { 621 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 622 623 // Keep track of the cheapest interference seen so far. 624 EvictionCost BestCost(~0u); 625 unsigned BestPhys = 0; 626 627 // When we are just looking for a reduced cost per use, don't break any 628 // hints, and only evict smaller spill weights. 629 if (CostPerUseLimit < ~0u) { 630 BestCost.BrokenHints = 0; 631 BestCost.MaxWeight = VirtReg.weight; 632 } 633 634 Order.rewind(); 635 while (unsigned PhysReg = Order.next()) { 636 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 637 continue; 638 // The first use of a callee-saved register in a function has cost 1. 639 // Don't start using a CSR when the CostPerUseLimit is low. 640 if (CostPerUseLimit == 1) 641 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 642 if (!MRI->isPhysRegUsed(CSR)) { 643 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 644 << PrintReg(CSR, TRI) << '\n'); 645 continue; 646 } 647 648 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 649 continue; 650 651 // Best so far. 652 BestPhys = PhysReg; 653 654 // Stop if the hint can be used. 655 if (Order.isHint(PhysReg)) 656 break; 657 } 658 659 if (!BestPhys) 660 return 0; 661 662 evictInterference(VirtReg, BestPhys, NewVRegs); 663 return BestPhys; 664 } 665 666 667 //===----------------------------------------------------------------------===// 668 // Region Splitting 669 //===----------------------------------------------------------------------===// 670 671 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 672 /// interference pattern in Physreg and its aliases. Add the constraints to 673 /// SpillPlacement and return the static cost of this split in Cost, assuming 674 /// that all preferences in SplitConstraints are met. 675 /// Return false if there are no bundles with positive bias. 676 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 677 float &Cost) { 678 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 679 680 // Reset interference dependent info. 681 SplitConstraints.resize(UseBlocks.size()); 682 float StaticCost = 0; 683 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 684 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 685 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 686 687 BC.Number = BI.MBB->getNumber(); 688 Intf.moveToBlock(BC.Number); 689 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 690 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 691 BC.ChangesValue = BI.FirstDef; 692 693 if (!Intf.hasInterference()) 694 continue; 695 696 // Number of spill code instructions to insert. 697 unsigned Ins = 0; 698 699 // Interference for the live-in value. 700 if (BI.LiveIn) { 701 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 702 BC.Entry = SpillPlacement::MustSpill, ++Ins; 703 else if (Intf.first() < BI.FirstInstr) 704 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 705 else if (Intf.first() < BI.LastInstr) 706 ++Ins; 707 } 708 709 // Interference for the live-out value. 710 if (BI.LiveOut) { 711 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 712 BC.Exit = SpillPlacement::MustSpill, ++Ins; 713 else if (Intf.last() > BI.LastInstr) 714 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 715 else if (Intf.last() > BI.FirstInstr) 716 ++Ins; 717 } 718 719 // Accumulate the total frequency of inserted spill code. 720 if (Ins) 721 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 722 } 723 Cost = StaticCost; 724 725 // Add constraints for use-blocks. Note that these are the only constraints 726 // that may add a positive bias, it is downhill from here. 727 SpillPlacer->addConstraints(SplitConstraints); 728 return SpillPlacer->scanActiveBundles(); 729 } 730 731 732 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 733 /// live-through blocks in Blocks. 734 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 735 ArrayRef<unsigned> Blocks) { 736 const unsigned GroupSize = 8; 737 SpillPlacement::BlockConstraint BCS[GroupSize]; 738 unsigned TBS[GroupSize]; 739 unsigned B = 0, T = 0; 740 741 for (unsigned i = 0; i != Blocks.size(); ++i) { 742 unsigned Number = Blocks[i]; 743 Intf.moveToBlock(Number); 744 745 if (!Intf.hasInterference()) { 746 assert(T < GroupSize && "Array overflow"); 747 TBS[T] = Number; 748 if (++T == GroupSize) { 749 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 750 T = 0; 751 } 752 continue; 753 } 754 755 assert(B < GroupSize && "Array overflow"); 756 BCS[B].Number = Number; 757 758 // Interference for the live-in value. 759 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 760 BCS[B].Entry = SpillPlacement::MustSpill; 761 else 762 BCS[B].Entry = SpillPlacement::PrefSpill; 763 764 // Interference for the live-out value. 765 if (Intf.last() >= SA->getLastSplitPoint(Number)) 766 BCS[B].Exit = SpillPlacement::MustSpill; 767 else 768 BCS[B].Exit = SpillPlacement::PrefSpill; 769 770 if (++B == GroupSize) { 771 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 772 SpillPlacer->addConstraints(Array); 773 B = 0; 774 } 775 } 776 777 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 778 SpillPlacer->addConstraints(Array); 779 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 780 } 781 782 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 783 // Keep track of through blocks that have not been added to SpillPlacer. 784 BitVector Todo = SA->getThroughBlocks(); 785 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 786 unsigned AddedTo = 0; 787 #ifndef NDEBUG 788 unsigned Visited = 0; 789 #endif 790 791 for (;;) { 792 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 793 // Find new through blocks in the periphery of PrefRegBundles. 794 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 795 unsigned Bundle = NewBundles[i]; 796 // Look at all blocks connected to Bundle in the full graph. 797 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 798 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 799 I != E; ++I) { 800 unsigned Block = *I; 801 if (!Todo.test(Block)) 802 continue; 803 Todo.reset(Block); 804 // This is a new through block. Add it to SpillPlacer later. 805 ActiveBlocks.push_back(Block); 806 #ifndef NDEBUG 807 ++Visited; 808 #endif 809 } 810 } 811 // Any new blocks to add? 812 if (ActiveBlocks.size() == AddedTo) 813 break; 814 815 // Compute through constraints from the interference, or assume that all 816 // through blocks prefer spilling when forming compact regions. 817 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 818 if (Cand.PhysReg) 819 addThroughConstraints(Cand.Intf, NewBlocks); 820 else 821 // Provide a strong negative bias on through blocks to prevent unwanted 822 // liveness on loop backedges. 823 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 824 AddedTo = ActiveBlocks.size(); 825 826 // Perhaps iterating can enable more bundles? 827 SpillPlacer->iterate(); 828 } 829 DEBUG(dbgs() << ", v=" << Visited); 830 } 831 832 /// calcCompactRegion - Compute the set of edge bundles that should be live 833 /// when splitting the current live range into compact regions. Compact 834 /// regions can be computed without looking at interference. They are the 835 /// regions formed by removing all the live-through blocks from the live range. 836 /// 837 /// Returns false if the current live range is already compact, or if the 838 /// compact regions would form single block regions anyway. 839 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 840 // Without any through blocks, the live range is already compact. 841 if (!SA->getNumThroughBlocks()) 842 return false; 843 844 // Compact regions don't correspond to any physreg. 845 Cand.reset(IntfCache, 0); 846 847 DEBUG(dbgs() << "Compact region bundles"); 848 849 // Use the spill placer to determine the live bundles. GrowRegion pretends 850 // that all the through blocks have interference when PhysReg is unset. 851 SpillPlacer->prepare(Cand.LiveBundles); 852 853 // The static split cost will be zero since Cand.Intf reports no interference. 854 float Cost; 855 if (!addSplitConstraints(Cand.Intf, Cost)) { 856 DEBUG(dbgs() << ", none.\n"); 857 return false; 858 } 859 860 growRegion(Cand); 861 SpillPlacer->finish(); 862 863 if (!Cand.LiveBundles.any()) { 864 DEBUG(dbgs() << ", none.\n"); 865 return false; 866 } 867 868 DEBUG({ 869 for (int i = Cand.LiveBundles.find_first(); i>=0; 870 i = Cand.LiveBundles.find_next(i)) 871 dbgs() << " EB#" << i; 872 dbgs() << ".\n"; 873 }); 874 return true; 875 } 876 877 /// calcSpillCost - Compute how expensive it would be to split the live range in 878 /// SA around all use blocks instead of forming bundle regions. 879 float RAGreedy::calcSpillCost() { 880 float Cost = 0; 881 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 882 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 883 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 884 unsigned Number = BI.MBB->getNumber(); 885 // We normally only need one spill instruction - a load or a store. 886 Cost += SpillPlacer->getBlockFrequency(Number); 887 888 // Unless the value is redefined in the block. 889 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 890 Cost += SpillPlacer->getBlockFrequency(Number); 891 } 892 return Cost; 893 } 894 895 /// calcGlobalSplitCost - Return the global split cost of following the split 896 /// pattern in LiveBundles. This cost should be added to the local cost of the 897 /// interference pattern in SplitConstraints. 898 /// 899 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 900 float GlobalCost = 0; 901 const BitVector &LiveBundles = Cand.LiveBundles; 902 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 903 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 904 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 905 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 906 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 907 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 908 unsigned Ins = 0; 909 910 if (BI.LiveIn) 911 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 912 if (BI.LiveOut) 913 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 914 if (Ins) 915 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 916 } 917 918 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 919 unsigned Number = Cand.ActiveBlocks[i]; 920 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 921 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 922 if (!RegIn && !RegOut) 923 continue; 924 if (RegIn && RegOut) { 925 // We need double spill code if this block has interference. 926 Cand.Intf.moveToBlock(Number); 927 if (Cand.Intf.hasInterference()) 928 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 929 continue; 930 } 931 // live-in / stack-out or stack-in live-out. 932 GlobalCost += SpillPlacer->getBlockFrequency(Number); 933 } 934 return GlobalCost; 935 } 936 937 /// splitAroundRegion - Split the current live range around the regions 938 /// determined by BundleCand and GlobalCand. 939 /// 940 /// Before calling this function, GlobalCand and BundleCand must be initialized 941 /// so each bundle is assigned to a valid candidate, or NoCand for the 942 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 943 /// objects must be initialized for the current live range, and intervals 944 /// created for the used candidates. 945 /// 946 /// @param LREdit The LiveRangeEdit object handling the current split. 947 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 948 /// must appear in this list. 949 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 950 ArrayRef<unsigned> UsedCands) { 951 // These are the intervals created for new global ranges. We may create more 952 // intervals for local ranges. 953 const unsigned NumGlobalIntvs = LREdit.size(); 954 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 955 assert(NumGlobalIntvs && "No global intervals configured"); 956 957 // Isolate even single instructions when dealing with a proper sub-class. 958 // That guarantees register class inflation for the stack interval because it 959 // is all copies. 960 unsigned Reg = SA->getParent().reg; 961 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 962 963 // First handle all the blocks with uses. 964 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 965 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 966 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 967 unsigned Number = BI.MBB->getNumber(); 968 unsigned IntvIn = 0, IntvOut = 0; 969 SlotIndex IntfIn, IntfOut; 970 if (BI.LiveIn) { 971 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 972 if (CandIn != NoCand) { 973 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 974 IntvIn = Cand.IntvIdx; 975 Cand.Intf.moveToBlock(Number); 976 IntfIn = Cand.Intf.first(); 977 } 978 } 979 if (BI.LiveOut) { 980 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 981 if (CandOut != NoCand) { 982 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 983 IntvOut = Cand.IntvIdx; 984 Cand.Intf.moveToBlock(Number); 985 IntfOut = Cand.Intf.last(); 986 } 987 } 988 989 // Create separate intervals for isolated blocks with multiple uses. 990 if (!IntvIn && !IntvOut) { 991 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 992 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 993 SE->splitSingleBlock(BI); 994 continue; 995 } 996 997 if (IntvIn && IntvOut) 998 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 999 else if (IntvIn) 1000 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1001 else 1002 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1003 } 1004 1005 // Handle live-through blocks. The relevant live-through blocks are stored in 1006 // the ActiveBlocks list with each candidate. We need to filter out 1007 // duplicates. 1008 BitVector Todo = SA->getThroughBlocks(); 1009 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1010 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1011 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1012 unsigned Number = Blocks[i]; 1013 if (!Todo.test(Number)) 1014 continue; 1015 Todo.reset(Number); 1016 1017 unsigned IntvIn = 0, IntvOut = 0; 1018 SlotIndex IntfIn, IntfOut; 1019 1020 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1021 if (CandIn != NoCand) { 1022 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1023 IntvIn = Cand.IntvIdx; 1024 Cand.Intf.moveToBlock(Number); 1025 IntfIn = Cand.Intf.first(); 1026 } 1027 1028 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1029 if (CandOut != NoCand) { 1030 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1031 IntvOut = Cand.IntvIdx; 1032 Cand.Intf.moveToBlock(Number); 1033 IntfOut = Cand.Intf.last(); 1034 } 1035 if (!IntvIn && !IntvOut) 1036 continue; 1037 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1038 } 1039 } 1040 1041 ++NumGlobalSplits; 1042 1043 SmallVector<unsigned, 8> IntvMap; 1044 SE->finish(&IntvMap); 1045 DebugVars->splitRegister(Reg, LREdit.regs()); 1046 1047 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1048 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1049 1050 // Sort out the new intervals created by splitting. We get four kinds: 1051 // - Remainder intervals should not be split again. 1052 // - Candidate intervals can be assigned to Cand.PhysReg. 1053 // - Block-local splits are candidates for local splitting. 1054 // - DCE leftovers should go back on the queue. 1055 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1056 LiveInterval &Reg = *LREdit.get(i); 1057 1058 // Ignore old intervals from DCE. 1059 if (getStage(Reg) != RS_New) 1060 continue; 1061 1062 // Remainder interval. Don't try splitting again, spill if it doesn't 1063 // allocate. 1064 if (IntvMap[i] == 0) { 1065 setStage(Reg, RS_Spill); 1066 continue; 1067 } 1068 1069 // Global intervals. Allow repeated splitting as long as the number of live 1070 // blocks is strictly decreasing. 1071 if (IntvMap[i] < NumGlobalIntvs) { 1072 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1073 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1074 << " blocks as original.\n"); 1075 // Don't allow repeated splitting as a safe guard against looping. 1076 setStage(Reg, RS_Split2); 1077 } 1078 continue; 1079 } 1080 1081 // Other intervals are treated as new. This includes local intervals created 1082 // for blocks with multiple uses, and anything created by DCE. 1083 } 1084 1085 if (VerifyEnabled) 1086 MF->verify(this, "After splitting live range around region"); 1087 } 1088 1089 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1090 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1091 unsigned NumCands = 0; 1092 unsigned BestCand = NoCand; 1093 float BestCost; 1094 SmallVector<unsigned, 8> UsedCands; 1095 1096 // Check if we can split this live range around a compact region. 1097 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1098 if (HasCompact) { 1099 // Yes, keep GlobalCand[0] as the compact region candidate. 1100 NumCands = 1; 1101 BestCost = HUGE_VALF; 1102 } else { 1103 // No benefit from the compact region, our fallback will be per-block 1104 // splitting. Make sure we find a solution that is cheaper than spilling. 1105 BestCost = Hysteresis * calcSpillCost(); 1106 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1107 } 1108 1109 Order.rewind(); 1110 while (unsigned PhysReg = Order.next()) { 1111 // Discard bad candidates before we run out of interference cache cursors. 1112 // This will only affect register classes with a lot of registers (>32). 1113 if (NumCands == IntfCache.getMaxCursors()) { 1114 unsigned WorstCount = ~0u; 1115 unsigned Worst = 0; 1116 for (unsigned i = 0; i != NumCands; ++i) { 1117 if (i == BestCand || !GlobalCand[i].PhysReg) 1118 continue; 1119 unsigned Count = GlobalCand[i].LiveBundles.count(); 1120 if (Count < WorstCount) 1121 Worst = i, WorstCount = Count; 1122 } 1123 --NumCands; 1124 GlobalCand[Worst] = GlobalCand[NumCands]; 1125 if (BestCand == NumCands) 1126 BestCand = Worst; 1127 } 1128 1129 if (GlobalCand.size() <= NumCands) 1130 GlobalCand.resize(NumCands+1); 1131 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1132 Cand.reset(IntfCache, PhysReg); 1133 1134 SpillPlacer->prepare(Cand.LiveBundles); 1135 float Cost; 1136 if (!addSplitConstraints(Cand.Intf, Cost)) { 1137 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1138 continue; 1139 } 1140 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1141 if (Cost >= BestCost) { 1142 DEBUG({ 1143 if (BestCand == NoCand) 1144 dbgs() << " worse than no bundles\n"; 1145 else 1146 dbgs() << " worse than " 1147 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1148 }); 1149 continue; 1150 } 1151 growRegion(Cand); 1152 1153 SpillPlacer->finish(); 1154 1155 // No live bundles, defer to splitSingleBlocks(). 1156 if (!Cand.LiveBundles.any()) { 1157 DEBUG(dbgs() << " no bundles.\n"); 1158 continue; 1159 } 1160 1161 Cost += calcGlobalSplitCost(Cand); 1162 DEBUG({ 1163 dbgs() << ", total = " << Cost << " with bundles"; 1164 for (int i = Cand.LiveBundles.find_first(); i>=0; 1165 i = Cand.LiveBundles.find_next(i)) 1166 dbgs() << " EB#" << i; 1167 dbgs() << ".\n"; 1168 }); 1169 if (Cost < BestCost) { 1170 BestCand = NumCands; 1171 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1172 } 1173 ++NumCands; 1174 } 1175 1176 // No solutions found, fall back to single block splitting. 1177 if (!HasCompact && BestCand == NoCand) 1178 return 0; 1179 1180 // Prepare split editor. 1181 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1182 SE->reset(LREdit, SplitSpillMode); 1183 1184 // Assign all edge bundles to the preferred candidate, or NoCand. 1185 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1186 1187 // Assign bundles for the best candidate region. 1188 if (BestCand != NoCand) { 1189 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1190 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1191 UsedCands.push_back(BestCand); 1192 Cand.IntvIdx = SE->openIntv(); 1193 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1194 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1195 (void)B; 1196 } 1197 } 1198 1199 // Assign bundles for the compact region. 1200 if (HasCompact) { 1201 GlobalSplitCandidate &Cand = GlobalCand.front(); 1202 assert(!Cand.PhysReg && "Compact region has no physreg"); 1203 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1204 UsedCands.push_back(0); 1205 Cand.IntvIdx = SE->openIntv(); 1206 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1207 << Cand.IntvIdx << ".\n"); 1208 (void)B; 1209 } 1210 } 1211 1212 splitAroundRegion(LREdit, UsedCands); 1213 return 0; 1214 } 1215 1216 1217 //===----------------------------------------------------------------------===// 1218 // Per-Block Splitting 1219 //===----------------------------------------------------------------------===// 1220 1221 /// tryBlockSplit - Split a global live range around every block with uses. This 1222 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1223 /// they don't allocate. 1224 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1225 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1226 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1227 unsigned Reg = VirtReg.reg; 1228 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1229 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1230 SE->reset(LREdit, SplitSpillMode); 1231 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1232 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1233 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1234 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1235 SE->splitSingleBlock(BI); 1236 } 1237 // No blocks were split. 1238 if (LREdit.empty()) 1239 return 0; 1240 1241 // We did split for some blocks. 1242 SmallVector<unsigned, 8> IntvMap; 1243 SE->finish(&IntvMap); 1244 1245 // Tell LiveDebugVariables about the new ranges. 1246 DebugVars->splitRegister(Reg, LREdit.regs()); 1247 1248 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1249 1250 // Sort out the new intervals created by splitting. The remainder interval 1251 // goes straight to spilling, the new local ranges get to stay RS_New. 1252 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1253 LiveInterval &LI = *LREdit.get(i); 1254 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1255 setStage(LI, RS_Spill); 1256 } 1257 1258 if (VerifyEnabled) 1259 MF->verify(this, "After splitting live range around basic blocks"); 1260 return 0; 1261 } 1262 1263 //===----------------------------------------------------------------------===// 1264 // Local Splitting 1265 //===----------------------------------------------------------------------===// 1266 1267 1268 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1269 /// in order to use PhysReg between two entries in SA->UseSlots. 1270 /// 1271 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1272 /// 1273 void RAGreedy::calcGapWeights(unsigned PhysReg, 1274 SmallVectorImpl<float> &GapWeight) { 1275 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1276 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1277 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1278 const unsigned NumGaps = Uses.size()-1; 1279 1280 // Start and end points for the interference check. 1281 SlotIndex StartIdx = 1282 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1283 SlotIndex StopIdx = 1284 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1285 1286 GapWeight.assign(NumGaps, 0.0f); 1287 1288 // Add interference from each overlapping register. 1289 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { 1290 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1291 .checkInterference()) 1292 continue; 1293 1294 // We know that VirtReg is a continuous interval from FirstInstr to 1295 // LastInstr, so we don't need InterferenceQuery. 1296 // 1297 // Interference that overlaps an instruction is counted in both gaps 1298 // surrounding the instruction. The exception is interference before 1299 // StartIdx and after StopIdx. 1300 // 1301 LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx); 1302 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1303 // Skip the gaps before IntI. 1304 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1305 if (++Gap == NumGaps) 1306 break; 1307 if (Gap == NumGaps) 1308 break; 1309 1310 // Update the gaps covered by IntI. 1311 const float weight = IntI.value()->weight; 1312 for (; Gap != NumGaps; ++Gap) { 1313 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1314 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1315 break; 1316 } 1317 if (Gap == NumGaps) 1318 break; 1319 } 1320 } 1321 } 1322 1323 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1324 /// basic block. 1325 /// 1326 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1327 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1328 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1329 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1330 1331 // Note that it is possible to have an interval that is live-in or live-out 1332 // while only covering a single block - A phi-def can use undef values from 1333 // predecessors, and the block could be a single-block loop. 1334 // We don't bother doing anything clever about such a case, we simply assume 1335 // that the interval is continuous from FirstInstr to LastInstr. We should 1336 // make sure that we don't do anything illegal to such an interval, though. 1337 1338 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1339 if (Uses.size() <= 2) 1340 return 0; 1341 const unsigned NumGaps = Uses.size()-1; 1342 1343 DEBUG({ 1344 dbgs() << "tryLocalSplit: "; 1345 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1346 dbgs() << ' ' << Uses[i]; 1347 dbgs() << '\n'; 1348 }); 1349 1350 // Since we allow local split results to be split again, there is a risk of 1351 // creating infinite loops. It is tempting to require that the new live 1352 // ranges have less instructions than the original. That would guarantee 1353 // convergence, but it is too strict. A live range with 3 instructions can be 1354 // split 2+3 (including the COPY), and we want to allow that. 1355 // 1356 // Instead we use these rules: 1357 // 1358 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1359 // noop split, of course). 1360 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1361 // the new ranges must have fewer instructions than before the split. 1362 // 3. New ranges with the same number of instructions are marked RS_Split2, 1363 // smaller ranges are marked RS_New. 1364 // 1365 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1366 // excessive splitting and infinite loops. 1367 // 1368 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1369 1370 // Best split candidate. 1371 unsigned BestBefore = NumGaps; 1372 unsigned BestAfter = 0; 1373 float BestDiff = 0; 1374 1375 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1376 SmallVector<float, 8> GapWeight; 1377 1378 Order.rewind(); 1379 while (unsigned PhysReg = Order.next()) { 1380 // Keep track of the largest spill weight that would need to be evicted in 1381 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1382 calcGapWeights(PhysReg, GapWeight); 1383 1384 // Try to find the best sequence of gaps to close. 1385 // The new spill weight must be larger than any gap interference. 1386 1387 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1388 unsigned SplitBefore = 0, SplitAfter = 1; 1389 1390 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1391 // It is the spill weight that needs to be evicted. 1392 float MaxGap = GapWeight[0]; 1393 1394 for (;;) { 1395 // Live before/after split? 1396 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1397 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1398 1399 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1400 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1401 << " i=" << MaxGap); 1402 1403 // Stop before the interval gets so big we wouldn't be making progress. 1404 if (!LiveBefore && !LiveAfter) { 1405 DEBUG(dbgs() << " all\n"); 1406 break; 1407 } 1408 // Should the interval be extended or shrunk? 1409 bool Shrink = true; 1410 1411 // How many gaps would the new range have? 1412 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1413 1414 // Legally, without causing looping? 1415 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1416 1417 if (Legal && MaxGap < HUGE_VALF) { 1418 // Estimate the new spill weight. Each instruction reads or writes the 1419 // register. Conservatively assume there are no read-modify-write 1420 // instructions. 1421 // 1422 // Try to guess the size of the new interval. 1423 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1424 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1425 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1426 // Would this split be possible to allocate? 1427 // Never allocate all gaps, we wouldn't be making progress. 1428 DEBUG(dbgs() << " w=" << EstWeight); 1429 if (EstWeight * Hysteresis >= MaxGap) { 1430 Shrink = false; 1431 float Diff = EstWeight - MaxGap; 1432 if (Diff > BestDiff) { 1433 DEBUG(dbgs() << " (best)"); 1434 BestDiff = Hysteresis * Diff; 1435 BestBefore = SplitBefore; 1436 BestAfter = SplitAfter; 1437 } 1438 } 1439 } 1440 1441 // Try to shrink. 1442 if (Shrink) { 1443 if (++SplitBefore < SplitAfter) { 1444 DEBUG(dbgs() << " shrink\n"); 1445 // Recompute the max when necessary. 1446 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1447 MaxGap = GapWeight[SplitBefore]; 1448 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1449 MaxGap = std::max(MaxGap, GapWeight[i]); 1450 } 1451 continue; 1452 } 1453 MaxGap = 0; 1454 } 1455 1456 // Try to extend the interval. 1457 if (SplitAfter >= NumGaps) { 1458 DEBUG(dbgs() << " end\n"); 1459 break; 1460 } 1461 1462 DEBUG(dbgs() << " extend\n"); 1463 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1464 } 1465 } 1466 1467 // Didn't find any candidates? 1468 if (BestBefore == NumGaps) 1469 return 0; 1470 1471 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1472 << '-' << Uses[BestAfter] << ", " << BestDiff 1473 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1474 1475 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1476 SE->reset(LREdit); 1477 1478 SE->openIntv(); 1479 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1480 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1481 SE->useIntv(SegStart, SegStop); 1482 SmallVector<unsigned, 8> IntvMap; 1483 SE->finish(&IntvMap); 1484 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1485 1486 // If the new range has the same number of instructions as before, mark it as 1487 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1488 // leave the new intervals as RS_New so they can compete. 1489 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1490 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1491 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1492 if (NewGaps >= NumGaps) { 1493 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1494 assert(!ProgressRequired && "Didn't make progress when it was required."); 1495 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1496 if (IntvMap[i] == 1) { 1497 setStage(*LREdit.get(i), RS_Split2); 1498 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1499 } 1500 DEBUG(dbgs() << '\n'); 1501 } 1502 ++NumLocalSplits; 1503 1504 return 0; 1505 } 1506 1507 //===----------------------------------------------------------------------===// 1508 // Live Range Splitting 1509 //===----------------------------------------------------------------------===// 1510 1511 /// trySplit - Try to split VirtReg or one of its interferences, making it 1512 /// assignable. 1513 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1514 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1515 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1516 // Ranges must be Split2 or less. 1517 if (getStage(VirtReg) >= RS_Spill) 1518 return 0; 1519 1520 // Local intervals are handled separately. 1521 if (LIS->intervalIsInOneMBB(VirtReg)) { 1522 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1523 SA->analyze(&VirtReg); 1524 return tryLocalSplit(VirtReg, Order, NewVRegs); 1525 } 1526 1527 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1528 1529 SA->analyze(&VirtReg); 1530 1531 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1532 // coalescer. That may cause the range to become allocatable which means that 1533 // tryRegionSplit won't be making progress. This check should be replaced with 1534 // an assertion when the coalescer is fixed. 1535 if (SA->didRepairRange()) { 1536 // VirtReg has changed, so all cached queries are invalid. 1537 invalidateVirtRegs(); 1538 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1539 return PhysReg; 1540 } 1541 1542 // First try to split around a region spanning multiple blocks. RS_Split2 1543 // ranges already made dubious progress with region splitting, so they go 1544 // straight to single block splitting. 1545 if (getStage(VirtReg) < RS_Split2) { 1546 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1547 if (PhysReg || !NewVRegs.empty()) 1548 return PhysReg; 1549 } 1550 1551 // Then isolate blocks. 1552 return tryBlockSplit(VirtReg, Order, NewVRegs); 1553 } 1554 1555 1556 //===----------------------------------------------------------------------===// 1557 // Main Entry Point 1558 //===----------------------------------------------------------------------===// 1559 1560 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1561 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1562 // First try assigning a free register. 1563 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1564 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1565 return PhysReg; 1566 1567 LiveRangeStage Stage = getStage(VirtReg); 1568 DEBUG(dbgs() << StageName[Stage] 1569 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1570 1571 // Try to evict a less worthy live range, but only for ranges from the primary 1572 // queue. The RS_Split ranges already failed to do this, and they should not 1573 // get a second chance until they have been split. 1574 if (Stage != RS_Split) 1575 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1576 return PhysReg; 1577 1578 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1579 1580 // The first time we see a live range, don't try to split or spill. 1581 // Wait until the second time, when all smaller ranges have been allocated. 1582 // This gives a better picture of the interference to split around. 1583 if (Stage < RS_Split) { 1584 setStage(VirtReg, RS_Split); 1585 DEBUG(dbgs() << "wait for second round\n"); 1586 NewVRegs.push_back(&VirtReg); 1587 return 0; 1588 } 1589 1590 // If we couldn't allocate a register from spilling, there is probably some 1591 // invalid inline assembly. The base class wil report it. 1592 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1593 return ~0u; 1594 1595 // Try splitting VirtReg or interferences. 1596 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1597 if (PhysReg || !NewVRegs.empty()) 1598 return PhysReg; 1599 1600 // Finally spill VirtReg itself. 1601 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1602 LiveRangeEdit LRE(VirtReg, NewVRegs, this); 1603 spiller().spill(LRE); 1604 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1605 1606 if (VerifyEnabled) 1607 MF->verify(this, "After spilling"); 1608 1609 // The live virtual register requesting allocation was spilled, so tell 1610 // the caller not to allocate anything during this round. 1611 return 0; 1612 } 1613 1614 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1615 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1616 << "********** Function: " 1617 << ((Value*)mf.getFunction())->getName() << '\n'); 1618 1619 MF = &mf; 1620 if (VerifyEnabled) 1621 MF->verify(this, "Before greedy register allocator"); 1622 1623 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1624 Indexes = &getAnalysis<SlotIndexes>(); 1625 DomTree = &getAnalysis<MachineDominatorTree>(); 1626 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1627 Loops = &getAnalysis<MachineLoopInfo>(); 1628 Bundles = &getAnalysis<EdgeBundles>(); 1629 SpillPlacer = &getAnalysis<SpillPlacement>(); 1630 DebugVars = &getAnalysis<LiveDebugVariables>(); 1631 1632 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1633 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1634 ExtraRegInfo.clear(); 1635 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1636 NextCascade = 1; 1637 IntfCache.init(MF, &getLiveUnion(0), Indexes, TRI); 1638 GlobalCand.resize(32); // This will grow as needed. 1639 1640 allocatePhysRegs(); 1641 addMBBLiveIns(MF); 1642 LIS->addKillFlags(); 1643 1644 // Run rewriter 1645 { 1646 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1647 VRM->rewrite(Indexes); 1648 } 1649 1650 // Write out new DBG_VALUE instructions. 1651 { 1652 NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled); 1653 DebugVars->emitDebugValues(VRM); 1654 } 1655 1656 // The pass output is in VirtRegMap. Release all the transient data. 1657 releaseMemory(); 1658 1659 return true; 1660 } 1661