1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "AllocationOrder.h" 17 #include "InterferenceCache.h" 18 #include "LiveDebugVariables.h" 19 #include "LiveRangeEdit.h" 20 #include "RegAllocBase.h" 21 #include "Spiller.h" 22 #include "SpillPlacement.h" 23 #include "SplitKit.h" 24 #include "VirtRegMap.h" 25 #include "llvm/ADT/Statistic.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Function.h" 28 #include "llvm/PassAnalysisSupport.h" 29 #include "llvm/CodeGen/CalcSpillWeights.h" 30 #include "llvm/CodeGen/EdgeBundles.h" 31 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 32 #include "llvm/CodeGen/LiveStackAnalysis.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/Passes.h" 38 #include "llvm/CodeGen/RegAllocRegistry.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Support/Timer.h" 45 46 #include <queue> 47 48 using namespace llvm; 49 50 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52 STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54 cl::opt<bool> CompactRegions("compact-regions", cl::init(true)); 55 56 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 57 createGreedyRegisterAllocator); 58 59 namespace { 60 class RAGreedy : public MachineFunctionPass, 61 public RegAllocBase, 62 private LiveRangeEdit::Delegate { 63 64 // context 65 MachineFunction *MF; 66 67 // analyses 68 SlotIndexes *Indexes; 69 LiveStacks *LS; 70 MachineDominatorTree *DomTree; 71 MachineLoopInfo *Loops; 72 EdgeBundles *Bundles; 73 SpillPlacement *SpillPlacer; 74 LiveDebugVariables *DebugVars; 75 76 // state 77 std::auto_ptr<Spiller> SpillerInstance; 78 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 79 unsigned NextCascade; 80 81 // Live ranges pass through a number of stages as we try to allocate them. 82 // Some of the stages may also create new live ranges: 83 // 84 // - Region splitting. 85 // - Per-block splitting. 86 // - Local splitting. 87 // - Spilling. 88 // 89 // Ranges produced by one of the stages skip the previous stages when they are 90 // dequeued. This improves performance because we can skip interference checks 91 // that are unlikely to give any results. It also guarantees that the live 92 // range splitting algorithm terminates, something that is otherwise hard to 93 // ensure. 94 enum LiveRangeStage { 95 /// Newly created live range that has never been queued. 96 RS_New, 97 98 /// Only attempt assignment and eviction. Then requeue as RS_Split. 99 RS_Assign, 100 101 /// Attempt live range splitting if assignment is impossible. 102 RS_Split, 103 104 /// Attempt more aggressive live range splitting that is guaranteed to make 105 /// progress. This is used for split products that may not be making 106 /// progress. 107 RS_Split2, 108 109 /// Live range will be spilled. No more splitting will be attempted. 110 RS_Spill, 111 112 /// There is nothing more we can do to this live range. Abort compilation 113 /// if it can't be assigned. 114 RS_Done 115 }; 116 117 static const char *const StageName[]; 118 119 // RegInfo - Keep additional information about each live range. 120 struct RegInfo { 121 LiveRangeStage Stage; 122 123 // Cascade - Eviction loop prevention. See canEvictInterference(). 124 unsigned Cascade; 125 126 RegInfo() : Stage(RS_New), Cascade(0) {} 127 }; 128 129 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 130 131 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 132 return ExtraRegInfo[VirtReg.reg].Stage; 133 } 134 135 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 136 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 137 ExtraRegInfo[VirtReg.reg].Stage = Stage; 138 } 139 140 template<typename Iterator> 141 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 142 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 143 for (;Begin != End; ++Begin) { 144 unsigned Reg = (*Begin)->reg; 145 if (ExtraRegInfo[Reg].Stage == RS_New) 146 ExtraRegInfo[Reg].Stage = NewStage; 147 } 148 } 149 150 /// Cost of evicting interference. 151 struct EvictionCost { 152 unsigned BrokenHints; ///< Total number of broken hints. 153 float MaxWeight; ///< Maximum spill weight evicted. 154 155 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 156 157 bool operator<(const EvictionCost &O) const { 158 if (BrokenHints != O.BrokenHints) 159 return BrokenHints < O.BrokenHints; 160 return MaxWeight < O.MaxWeight; 161 } 162 }; 163 164 // splitting state. 165 std::auto_ptr<SplitAnalysis> SA; 166 std::auto_ptr<SplitEditor> SE; 167 168 /// Cached per-block interference maps 169 InterferenceCache IntfCache; 170 171 /// All basic blocks where the current register has uses. 172 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 173 174 /// Global live range splitting candidate info. 175 struct GlobalSplitCandidate { 176 // Register intended for assignment, or 0. 177 unsigned PhysReg; 178 179 // SplitKit interval index for this candidate. 180 unsigned IntvIdx; 181 182 // Interference for PhysReg. 183 InterferenceCache::Cursor Intf; 184 185 // Bundles where this candidate should be live. 186 BitVector LiveBundles; 187 SmallVector<unsigned, 8> ActiveBlocks; 188 189 void reset(InterferenceCache &Cache, unsigned Reg) { 190 PhysReg = Reg; 191 IntvIdx = 0; 192 Intf.setPhysReg(Cache, Reg); 193 LiveBundles.clear(); 194 ActiveBlocks.clear(); 195 } 196 197 // Set B[i] = C for every live bundle where B[i] was NoCand. 198 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 199 unsigned Count = 0; 200 for (int i = LiveBundles.find_first(); i >= 0; 201 i = LiveBundles.find_next(i)) 202 if (B[i] == NoCand) { 203 B[i] = C; 204 Count++; 205 } 206 return Count; 207 } 208 }; 209 210 /// Candidate info for for each PhysReg in AllocationOrder. 211 /// This vector never shrinks, but grows to the size of the largest register 212 /// class. 213 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 214 215 enum { NoCand = ~0u }; 216 217 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 218 /// NoCand which indicates the stack interval. 219 SmallVector<unsigned, 32> BundleCand; 220 221 public: 222 RAGreedy(); 223 224 /// Return the pass name. 225 virtual const char* getPassName() const { 226 return "Greedy Register Allocator"; 227 } 228 229 /// RAGreedy analysis usage. 230 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 231 virtual void releaseMemory(); 232 virtual Spiller &spiller() { return *SpillerInstance; } 233 virtual void enqueue(LiveInterval *LI); 234 virtual LiveInterval *dequeue(); 235 virtual unsigned selectOrSplit(LiveInterval&, 236 SmallVectorImpl<LiveInterval*>&); 237 238 /// Perform register allocation. 239 virtual bool runOnMachineFunction(MachineFunction &mf); 240 241 static char ID; 242 243 private: 244 void LRE_WillEraseInstruction(MachineInstr*); 245 bool LRE_CanEraseVirtReg(unsigned); 246 void LRE_WillShrinkVirtReg(unsigned); 247 void LRE_DidCloneVirtReg(unsigned, unsigned); 248 249 float calcSpillCost(); 250 bool addSplitConstraints(InterferenceCache::Cursor, float&); 251 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 252 void growRegion(GlobalSplitCandidate &Cand); 253 float calcGlobalSplitCost(GlobalSplitCandidate&); 254 bool calcCompactRegion(GlobalSplitCandidate&); 255 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 256 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 257 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 258 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 259 void evictInterference(LiveInterval&, unsigned, 260 SmallVectorImpl<LiveInterval*>&); 261 262 unsigned tryAssign(LiveInterval&, AllocationOrder&, 263 SmallVectorImpl<LiveInterval*>&); 264 unsigned tryEvict(LiveInterval&, AllocationOrder&, 265 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 266 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 267 SmallVectorImpl<LiveInterval*>&); 268 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 269 SmallVectorImpl<LiveInterval*>&); 270 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 271 SmallVectorImpl<LiveInterval*>&); 272 unsigned trySplit(LiveInterval&, AllocationOrder&, 273 SmallVectorImpl<LiveInterval*>&); 274 }; 275 } // end anonymous namespace 276 277 char RAGreedy::ID = 0; 278 279 #ifndef NDEBUG 280 const char *const RAGreedy::StageName[] = { 281 "RS_New", 282 "RS_Assign", 283 "RS_Split", 284 "RS_Split2", 285 "RS_Spill", 286 "RS_Done" 287 }; 288 #endif 289 290 // Hysteresis to use when comparing floats. 291 // This helps stabilize decisions based on float comparisons. 292 const float Hysteresis = 0.98f; 293 294 295 FunctionPass* llvm::createGreedyRegisterAllocator() { 296 return new RAGreedy(); 297 } 298 299 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 300 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 301 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 302 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 303 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 304 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry()); 305 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 306 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 307 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 308 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 309 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 310 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 311 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 312 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 313 } 314 315 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 316 AU.setPreservesCFG(); 317 AU.addRequired<AliasAnalysis>(); 318 AU.addPreserved<AliasAnalysis>(); 319 AU.addRequired<LiveIntervals>(); 320 AU.addRequired<SlotIndexes>(); 321 AU.addPreserved<SlotIndexes>(); 322 AU.addRequired<LiveDebugVariables>(); 323 AU.addPreserved<LiveDebugVariables>(); 324 if (StrongPHIElim) 325 AU.addRequiredID(StrongPHIEliminationID); 326 AU.addRequiredTransitiveID(RegisterCoalescerPassID); 327 AU.addRequired<CalculateSpillWeights>(); 328 AU.addRequired<LiveStacks>(); 329 AU.addPreserved<LiveStacks>(); 330 AU.addRequired<MachineDominatorTree>(); 331 AU.addPreserved<MachineDominatorTree>(); 332 AU.addRequired<MachineLoopInfo>(); 333 AU.addPreserved<MachineLoopInfo>(); 334 AU.addRequired<VirtRegMap>(); 335 AU.addPreserved<VirtRegMap>(); 336 AU.addRequired<EdgeBundles>(); 337 AU.addRequired<SpillPlacement>(); 338 MachineFunctionPass::getAnalysisUsage(AU); 339 } 340 341 342 //===----------------------------------------------------------------------===// 343 // LiveRangeEdit delegate methods 344 //===----------------------------------------------------------------------===// 345 346 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) { 347 // LRE itself will remove from SlotIndexes and parent basic block. 348 VRM->RemoveMachineInstrFromMaps(MI); 349 } 350 351 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 352 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 353 unassign(LIS->getInterval(VirtReg), PhysReg); 354 return true; 355 } 356 // Unassigned virtreg is probably in the priority queue. 357 // RegAllocBase will erase it after dequeueing. 358 return false; 359 } 360 361 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 362 unsigned PhysReg = VRM->getPhys(VirtReg); 363 if (!PhysReg) 364 return; 365 366 // Register is assigned, put it back on the queue for reassignment. 367 LiveInterval &LI = LIS->getInterval(VirtReg); 368 unassign(LI, PhysReg); 369 enqueue(&LI); 370 } 371 372 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 373 // LRE may clone a virtual register because dead code elimination causes it to 374 // be split into connected components. The new components are much smaller 375 // than the original, so they should get a new chance at being assigned. 376 // same stage as the parent. 377 ExtraRegInfo[Old].Stage = RS_Assign; 378 ExtraRegInfo.grow(New); 379 ExtraRegInfo[New] = ExtraRegInfo[Old]; 380 } 381 382 void RAGreedy::releaseMemory() { 383 SpillerInstance.reset(0); 384 ExtraRegInfo.clear(); 385 GlobalCand.clear(); 386 RegAllocBase::releaseMemory(); 387 } 388 389 void RAGreedy::enqueue(LiveInterval *LI) { 390 // Prioritize live ranges by size, assigning larger ranges first. 391 // The queue holds (size, reg) pairs. 392 const unsigned Size = LI->getSize(); 393 const unsigned Reg = LI->reg; 394 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 395 "Can only enqueue virtual registers"); 396 unsigned Prio; 397 398 ExtraRegInfo.grow(Reg); 399 if (ExtraRegInfo[Reg].Stage == RS_New) 400 ExtraRegInfo[Reg].Stage = RS_Assign; 401 402 if (ExtraRegInfo[Reg].Stage == RS_Split) { 403 // Unsplit ranges that couldn't be allocated immediately are deferred until 404 // everything else has been allocated. Long ranges are allocated last so 405 // they are split against realistic interference. 406 if (CompactRegions) 407 Prio = Size; 408 else 409 Prio = (1u << 31) - Size; 410 } else { 411 // Everything else is allocated in long->short order. Long ranges that don't 412 // fit should be spilled ASAP so they don't create interference. 413 Prio = (1u << 31) + Size; 414 415 // Boost ranges that have a physical register hint. 416 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 417 Prio |= (1u << 30); 418 } 419 420 Queue.push(std::make_pair(Prio, Reg)); 421 } 422 423 LiveInterval *RAGreedy::dequeue() { 424 if (Queue.empty()) 425 return 0; 426 LiveInterval *LI = &LIS->getInterval(Queue.top().second); 427 Queue.pop(); 428 return LI; 429 } 430 431 432 //===----------------------------------------------------------------------===// 433 // Direct Assignment 434 //===----------------------------------------------------------------------===// 435 436 /// tryAssign - Try to assign VirtReg to an available register. 437 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 438 AllocationOrder &Order, 439 SmallVectorImpl<LiveInterval*> &NewVRegs) { 440 Order.rewind(); 441 unsigned PhysReg; 442 while ((PhysReg = Order.next())) 443 if (!checkPhysRegInterference(VirtReg, PhysReg)) 444 break; 445 if (!PhysReg || Order.isHint(PhysReg)) 446 return PhysReg; 447 448 // PhysReg is available, but there may be a better choice. 449 450 // If we missed a simple hint, try to cheaply evict interference from the 451 // preferred register. 452 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 453 if (Order.isHint(Hint)) { 454 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 455 EvictionCost MaxCost(1); 456 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 457 evictInterference(VirtReg, Hint, NewVRegs); 458 return Hint; 459 } 460 } 461 462 // Try to evict interference from a cheaper alternative. 463 unsigned Cost = TRI->getCostPerUse(PhysReg); 464 465 // Most registers have 0 additional cost. 466 if (!Cost) 467 return PhysReg; 468 469 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 470 << '\n'); 471 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 472 return CheapReg ? CheapReg : PhysReg; 473 } 474 475 476 //===----------------------------------------------------------------------===// 477 // Interference eviction 478 //===----------------------------------------------------------------------===// 479 480 /// shouldEvict - determine if A should evict the assigned live range B. The 481 /// eviction policy defined by this function together with the allocation order 482 /// defined by enqueue() decides which registers ultimately end up being split 483 /// and spilled. 484 /// 485 /// Cascade numbers are used to prevent infinite loops if this function is a 486 /// cyclic relation. 487 /// 488 /// @param A The live range to be assigned. 489 /// @param IsHint True when A is about to be assigned to its preferred 490 /// register. 491 /// @param B The live range to be evicted. 492 /// @param BreaksHint True when B is already assigned to its preferred register. 493 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 494 LiveInterval &B, bool BreaksHint) { 495 bool CanSplit = getStage(B) < RS_Spill; 496 497 // Be fairly aggressive about following hints as long as the evictee can be 498 // split. 499 if (CanSplit && IsHint && !BreaksHint) 500 return true; 501 502 return A.weight > B.weight; 503 } 504 505 /// canEvictInterference - Return true if all interferences between VirtReg and 506 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 507 /// 508 /// @param VirtReg Live range that is about to be assigned. 509 /// @param PhysReg Desired register for assignment. 510 /// @prarm IsHint True when PhysReg is VirtReg's preferred register. 511 /// @param MaxCost Only look for cheaper candidates and update with new cost 512 /// when returning true. 513 /// @returns True when interference can be evicted cheaper than MaxCost. 514 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 515 bool IsHint, EvictionCost &MaxCost) { 516 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 517 // involved in an eviction before. If a cascade number was assigned, deny 518 // evicting anything with the same or a newer cascade number. This prevents 519 // infinite eviction loops. 520 // 521 // This works out so a register without a cascade number is allowed to evict 522 // anything, and it can be evicted by anything. 523 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 524 if (!Cascade) 525 Cascade = NextCascade; 526 527 EvictionCost Cost; 528 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 529 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 530 // If there is 10 or more interferences, chances are one is heavier. 531 if (Q.collectInterferingVRegs(10) >= 10) 532 return false; 533 534 // Check if any interfering live range is heavier than MaxWeight. 535 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 536 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 537 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 538 return false; 539 // Never evict spill products. They cannot split or spill. 540 if (getStage(*Intf) == RS_Done) 541 return false; 542 // Once a live range becomes small enough, it is urgent that we find a 543 // register for it. This is indicated by an infinite spill weight. These 544 // urgent live ranges get to evict almost anything. 545 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable(); 546 // Only evict older cascades or live ranges without a cascade. 547 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 548 if (Cascade <= IntfCascade) { 549 if (!Urgent) 550 return false; 551 // We permit breaking cascades for urgent evictions. It should be the 552 // last resort, though, so make it really expensive. 553 Cost.BrokenHints += 10; 554 } 555 // Would this break a satisfied hint? 556 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 557 // Update eviction cost. 558 Cost.BrokenHints += BreaksHint; 559 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 560 // Abort if this would be too expensive. 561 if (!(Cost < MaxCost)) 562 return false; 563 // Finally, apply the eviction policy for non-urgent evictions. 564 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 565 return false; 566 } 567 } 568 MaxCost = Cost; 569 return true; 570 } 571 572 /// evictInterference - Evict any interferring registers that prevent VirtReg 573 /// from being assigned to Physreg. This assumes that canEvictInterference 574 /// returned true. 575 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 576 SmallVectorImpl<LiveInterval*> &NewVRegs) { 577 // Make sure that VirtReg has a cascade number, and assign that cascade 578 // number to every evicted register. These live ranges than then only be 579 // evicted by a newer cascade, preventing infinite loops. 580 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 581 if (!Cascade) 582 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 583 584 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 585 << " interference: Cascade " << Cascade << '\n'); 586 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 587 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 588 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 589 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 590 LiveInterval *Intf = Q.interferingVRegs()[i]; 591 unassign(*Intf, VRM->getPhys(Intf->reg)); 592 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 593 VirtReg.isSpillable() < Intf->isSpillable()) && 594 "Cannot decrease cascade number, illegal eviction"); 595 ExtraRegInfo[Intf->reg].Cascade = Cascade; 596 ++NumEvicted; 597 NewVRegs.push_back(Intf); 598 } 599 } 600 } 601 602 /// tryEvict - Try to evict all interferences for a physreg. 603 /// @param VirtReg Currently unassigned virtual register. 604 /// @param Order Physregs to try. 605 /// @return Physreg to assign VirtReg, or 0. 606 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 607 AllocationOrder &Order, 608 SmallVectorImpl<LiveInterval*> &NewVRegs, 609 unsigned CostPerUseLimit) { 610 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 611 612 // Keep track of the cheapest interference seen so far. 613 EvictionCost BestCost(~0u); 614 unsigned BestPhys = 0; 615 616 // When we are just looking for a reduced cost per use, don't break any 617 // hints, and only evict smaller spill weights. 618 if (CostPerUseLimit < ~0u) { 619 BestCost.BrokenHints = 0; 620 BestCost.MaxWeight = VirtReg.weight; 621 } 622 623 Order.rewind(); 624 while (unsigned PhysReg = Order.next()) { 625 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 626 continue; 627 // The first use of a callee-saved register in a function has cost 1. 628 // Don't start using a CSR when the CostPerUseLimit is low. 629 if (CostPerUseLimit == 1) 630 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 631 if (!MRI->isPhysRegUsed(CSR)) { 632 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 633 << PrintReg(CSR, TRI) << '\n'); 634 continue; 635 } 636 637 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 638 continue; 639 640 // Best so far. 641 BestPhys = PhysReg; 642 643 // Stop if the hint can be used. 644 if (Order.isHint(PhysReg)) 645 break; 646 } 647 648 if (!BestPhys) 649 return 0; 650 651 evictInterference(VirtReg, BestPhys, NewVRegs); 652 return BestPhys; 653 } 654 655 656 //===----------------------------------------------------------------------===// 657 // Region Splitting 658 //===----------------------------------------------------------------------===// 659 660 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 661 /// interference pattern in Physreg and its aliases. Add the constraints to 662 /// SpillPlacement and return the static cost of this split in Cost, assuming 663 /// that all preferences in SplitConstraints are met. 664 /// Return false if there are no bundles with positive bias. 665 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 666 float &Cost) { 667 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 668 669 // Reset interference dependent info. 670 SplitConstraints.resize(UseBlocks.size()); 671 float StaticCost = 0; 672 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 673 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 674 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 675 676 BC.Number = BI.MBB->getNumber(); 677 Intf.moveToBlock(BC.Number); 678 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 679 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 680 BC.ChangesValue = BI.FirstDef; 681 682 if (!Intf.hasInterference()) 683 continue; 684 685 // Number of spill code instructions to insert. 686 unsigned Ins = 0; 687 688 // Interference for the live-in value. 689 if (BI.LiveIn) { 690 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 691 BC.Entry = SpillPlacement::MustSpill, ++Ins; 692 else if (Intf.first() < BI.FirstInstr) 693 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 694 else if (Intf.first() < BI.LastInstr) 695 ++Ins; 696 } 697 698 // Interference for the live-out value. 699 if (BI.LiveOut) { 700 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 701 BC.Exit = SpillPlacement::MustSpill, ++Ins; 702 else if (Intf.last() > BI.LastInstr) 703 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 704 else if (Intf.last() > BI.FirstInstr) 705 ++Ins; 706 } 707 708 // Accumulate the total frequency of inserted spill code. 709 if (Ins) 710 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 711 } 712 Cost = StaticCost; 713 714 // Add constraints for use-blocks. Note that these are the only constraints 715 // that may add a positive bias, it is downhill from here. 716 SpillPlacer->addConstraints(SplitConstraints); 717 return SpillPlacer->scanActiveBundles(); 718 } 719 720 721 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 722 /// live-through blocks in Blocks. 723 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 724 ArrayRef<unsigned> Blocks) { 725 const unsigned GroupSize = 8; 726 SpillPlacement::BlockConstraint BCS[GroupSize]; 727 unsigned TBS[GroupSize]; 728 unsigned B = 0, T = 0; 729 730 for (unsigned i = 0; i != Blocks.size(); ++i) { 731 unsigned Number = Blocks[i]; 732 Intf.moveToBlock(Number); 733 734 if (!Intf.hasInterference()) { 735 assert(T < GroupSize && "Array overflow"); 736 TBS[T] = Number; 737 if (++T == GroupSize) { 738 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 739 T = 0; 740 } 741 continue; 742 } 743 744 assert(B < GroupSize && "Array overflow"); 745 BCS[B].Number = Number; 746 747 // Interference for the live-in value. 748 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 749 BCS[B].Entry = SpillPlacement::MustSpill; 750 else 751 BCS[B].Entry = SpillPlacement::PrefSpill; 752 753 // Interference for the live-out value. 754 if (Intf.last() >= SA->getLastSplitPoint(Number)) 755 BCS[B].Exit = SpillPlacement::MustSpill; 756 else 757 BCS[B].Exit = SpillPlacement::PrefSpill; 758 759 if (++B == GroupSize) { 760 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 761 SpillPlacer->addConstraints(Array); 762 B = 0; 763 } 764 } 765 766 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 767 SpillPlacer->addConstraints(Array); 768 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 769 } 770 771 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 772 // Keep track of through blocks that have not been added to SpillPlacer. 773 BitVector Todo = SA->getThroughBlocks(); 774 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 775 unsigned AddedTo = 0; 776 #ifndef NDEBUG 777 unsigned Visited = 0; 778 #endif 779 780 for (;;) { 781 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 782 // Find new through blocks in the periphery of PrefRegBundles. 783 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 784 unsigned Bundle = NewBundles[i]; 785 // Look at all blocks connected to Bundle in the full graph. 786 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 787 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 788 I != E; ++I) { 789 unsigned Block = *I; 790 if (!Todo.test(Block)) 791 continue; 792 Todo.reset(Block); 793 // This is a new through block. Add it to SpillPlacer later. 794 ActiveBlocks.push_back(Block); 795 #ifndef NDEBUG 796 ++Visited; 797 #endif 798 } 799 } 800 // Any new blocks to add? 801 if (ActiveBlocks.size() == AddedTo) 802 break; 803 804 // Compute through constraints from the interference, or assume that all 805 // through blocks prefer spilling when forming compact regions. 806 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 807 if (Cand.PhysReg) 808 addThroughConstraints(Cand.Intf, NewBlocks); 809 else 810 // Provide a strong negative bias on through blocks to prevent unwanted 811 // liveness on loop backedges. 812 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 813 AddedTo = ActiveBlocks.size(); 814 815 // Perhaps iterating can enable more bundles? 816 SpillPlacer->iterate(); 817 } 818 DEBUG(dbgs() << ", v=" << Visited); 819 } 820 821 /// calcCompactRegion - Compute the set of edge bundles that should be live 822 /// when splitting the current live range into compact regions. Compact 823 /// regions can be computed without looking at interference. They are the 824 /// regions formed by removing all the live-through blocks from the live range. 825 /// 826 /// Returns false if the current live range is already compact, or if the 827 /// compact regions would form single block regions anyway. 828 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 829 // Without any through blocks, the live range is already compact. 830 if (!SA->getNumThroughBlocks()) 831 return false; 832 833 // Compact regions don't correspond to any physreg. 834 Cand.reset(IntfCache, 0); 835 836 DEBUG(dbgs() << "Compact region bundles"); 837 838 // Use the spill placer to determine the live bundles. GrowRegion pretends 839 // that all the through blocks have interference when PhysReg is unset. 840 SpillPlacer->prepare(Cand.LiveBundles); 841 842 // The static split cost will be zero since Cand.Intf reports no interference. 843 float Cost; 844 if (!addSplitConstraints(Cand.Intf, Cost)) { 845 DEBUG(dbgs() << ", none.\n"); 846 return false; 847 } 848 849 growRegion(Cand); 850 SpillPlacer->finish(); 851 852 if (!Cand.LiveBundles.any()) { 853 DEBUG(dbgs() << ", none.\n"); 854 return false; 855 } 856 857 DEBUG({ 858 for (int i = Cand.LiveBundles.find_first(); i>=0; 859 i = Cand.LiveBundles.find_next(i)) 860 dbgs() << " EB#" << i; 861 dbgs() << ".\n"; 862 }); 863 return true; 864 } 865 866 /// calcSpillCost - Compute how expensive it would be to split the live range in 867 /// SA around all use blocks instead of forming bundle regions. 868 float RAGreedy::calcSpillCost() { 869 float Cost = 0; 870 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 871 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 872 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 873 unsigned Number = BI.MBB->getNumber(); 874 // We normally only need one spill instruction - a load or a store. 875 Cost += SpillPlacer->getBlockFrequency(Number); 876 877 // Unless the value is redefined in the block. 878 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 879 Cost += SpillPlacer->getBlockFrequency(Number); 880 } 881 return Cost; 882 } 883 884 /// calcGlobalSplitCost - Return the global split cost of following the split 885 /// pattern in LiveBundles. This cost should be added to the local cost of the 886 /// interference pattern in SplitConstraints. 887 /// 888 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 889 float GlobalCost = 0; 890 const BitVector &LiveBundles = Cand.LiveBundles; 891 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 892 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 893 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 894 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 895 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 896 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 897 unsigned Ins = 0; 898 899 if (BI.LiveIn) 900 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 901 if (BI.LiveOut) 902 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 903 if (Ins) 904 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 905 } 906 907 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 908 unsigned Number = Cand.ActiveBlocks[i]; 909 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 910 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 911 if (!RegIn && !RegOut) 912 continue; 913 if (RegIn && RegOut) { 914 // We need double spill code if this block has interference. 915 Cand.Intf.moveToBlock(Number); 916 if (Cand.Intf.hasInterference()) 917 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 918 continue; 919 } 920 // live-in / stack-out or stack-in live-out. 921 GlobalCost += SpillPlacer->getBlockFrequency(Number); 922 } 923 return GlobalCost; 924 } 925 926 /// splitAroundRegion - Split the current live range around the regions 927 /// determined by BundleCand and GlobalCand. 928 /// 929 /// Before calling this function, GlobalCand and BundleCand must be initialized 930 /// so each bundle is assigned to a valid candidate, or NoCand for the 931 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 932 /// objects must be initialized for the current live range, and intervals 933 /// created for the used candidates. 934 /// 935 /// @param LREdit The LiveRangeEdit object handling the current split. 936 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 937 /// must appear in this list. 938 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 939 ArrayRef<unsigned> UsedCands) { 940 // These are the intervals created for new global ranges. We may create more 941 // intervals for local ranges. 942 const unsigned NumGlobalIntvs = LREdit.size(); 943 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 944 assert(NumGlobalIntvs && "No global intervals configured"); 945 946 // Isolate even single instructions when dealing with a proper sub-class. 947 // That guarantees register class inflation for the stack interval because it 948 // is all copies. 949 unsigned Reg = SA->getParent().reg; 950 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 951 952 // First handle all the blocks with uses. 953 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 954 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 955 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 956 unsigned Number = BI.MBB->getNumber(); 957 unsigned IntvIn = 0, IntvOut = 0; 958 SlotIndex IntfIn, IntfOut; 959 if (BI.LiveIn) { 960 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 961 if (CandIn != NoCand) { 962 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 963 IntvIn = Cand.IntvIdx; 964 Cand.Intf.moveToBlock(Number); 965 IntfIn = Cand.Intf.first(); 966 } 967 } 968 if (BI.LiveOut) { 969 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 970 if (CandOut != NoCand) { 971 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 972 IntvOut = Cand.IntvIdx; 973 Cand.Intf.moveToBlock(Number); 974 IntfOut = Cand.Intf.last(); 975 } 976 } 977 978 // Create separate intervals for isolated blocks with multiple uses. 979 if (!IntvIn && !IntvOut) { 980 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 981 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 982 SE->splitSingleBlock(BI); 983 continue; 984 } 985 986 if (IntvIn && IntvOut) 987 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 988 else if (IntvIn) 989 SE->splitRegInBlock(BI, IntvIn, IntfIn); 990 else 991 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 992 } 993 994 // Handle live-through blocks. The relevant live-through blocks are stored in 995 // the ActiveBlocks list with each candidate. We need to filter out 996 // duplicates. 997 BitVector Todo = SA->getThroughBlocks(); 998 for (unsigned c = 0; c != UsedCands.size(); ++c) { 999 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1000 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1001 unsigned Number = Blocks[i]; 1002 if (!Todo.test(Number)) 1003 continue; 1004 Todo.reset(Number); 1005 1006 unsigned IntvIn = 0, IntvOut = 0; 1007 SlotIndex IntfIn, IntfOut; 1008 1009 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1010 if (CandIn != NoCand) { 1011 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1012 IntvIn = Cand.IntvIdx; 1013 Cand.Intf.moveToBlock(Number); 1014 IntfIn = Cand.Intf.first(); 1015 } 1016 1017 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1018 if (CandOut != NoCand) { 1019 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1020 IntvOut = Cand.IntvIdx; 1021 Cand.Intf.moveToBlock(Number); 1022 IntfOut = Cand.Intf.last(); 1023 } 1024 if (!IntvIn && !IntvOut) 1025 continue; 1026 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1027 } 1028 } 1029 1030 ++NumGlobalSplits; 1031 1032 SmallVector<unsigned, 8> IntvMap; 1033 SE->finish(&IntvMap); 1034 DebugVars->splitRegister(Reg, LREdit.regs()); 1035 1036 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1037 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1038 1039 // Sort out the new intervals created by splitting. We get four kinds: 1040 // - Remainder intervals should not be split again. 1041 // - Candidate intervals can be assigned to Cand.PhysReg. 1042 // - Block-local splits are candidates for local splitting. 1043 // - DCE leftovers should go back on the queue. 1044 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1045 LiveInterval &Reg = *LREdit.get(i); 1046 1047 // Ignore old intervals from DCE. 1048 if (getStage(Reg) != RS_New) 1049 continue; 1050 1051 // Remainder interval. Don't try splitting again, spill if it doesn't 1052 // allocate. 1053 if (IntvMap[i] == 0) { 1054 setStage(Reg, RS_Spill); 1055 continue; 1056 } 1057 1058 // Global intervals. Allow repeated splitting as long as the number of live 1059 // blocks is strictly decreasing. 1060 if (IntvMap[i] < NumGlobalIntvs) { 1061 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1062 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1063 << " blocks as original.\n"); 1064 // Don't allow repeated splitting as a safe guard against looping. 1065 setStage(Reg, RS_Split2); 1066 } 1067 continue; 1068 } 1069 1070 // Other intervals are treated as new. This includes local intervals created 1071 // for blocks with multiple uses, and anything created by DCE. 1072 } 1073 1074 if (VerifyEnabled) 1075 MF->verify(this, "After splitting live range around region"); 1076 } 1077 1078 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1079 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1080 unsigned NumCands = 0; 1081 unsigned BestCand = NoCand; 1082 float BestCost; 1083 SmallVector<unsigned, 8> UsedCands; 1084 1085 // Check if we can split this live range around a compact region. 1086 bool HasCompact = CompactRegions && calcCompactRegion(GlobalCand.front()); 1087 if (HasCompact) { 1088 // Yes, keep GlobalCand[0] as the compact region candidate. 1089 NumCands = 1; 1090 BestCost = HUGE_VALF; 1091 } else { 1092 // No benefit from the compact region, our fallback will be per-block 1093 // splitting. Make sure we find a solution that is cheaper than spilling. 1094 BestCost = Hysteresis * calcSpillCost(); 1095 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1096 } 1097 1098 Order.rewind(); 1099 while (unsigned PhysReg = Order.next()) { 1100 // Discard bad candidates before we run out of interference cache cursors. 1101 // This will only affect register classes with a lot of registers (>32). 1102 if (NumCands == IntfCache.getMaxCursors()) { 1103 unsigned WorstCount = ~0u; 1104 unsigned Worst = 0; 1105 for (unsigned i = 0; i != NumCands; ++i) { 1106 if (i == BestCand || !GlobalCand[i].PhysReg) 1107 continue; 1108 unsigned Count = GlobalCand[i].LiveBundles.count(); 1109 if (Count < WorstCount) 1110 Worst = i, WorstCount = Count; 1111 } 1112 --NumCands; 1113 GlobalCand[Worst] = GlobalCand[NumCands]; 1114 } 1115 1116 if (GlobalCand.size() <= NumCands) 1117 GlobalCand.resize(NumCands+1); 1118 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1119 Cand.reset(IntfCache, PhysReg); 1120 1121 SpillPlacer->prepare(Cand.LiveBundles); 1122 float Cost; 1123 if (!addSplitConstraints(Cand.Intf, Cost)) { 1124 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1125 continue; 1126 } 1127 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1128 if (Cost >= BestCost) { 1129 DEBUG({ 1130 if (BestCand == NoCand) 1131 dbgs() << " worse than no bundles\n"; 1132 else 1133 dbgs() << " worse than " 1134 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1135 }); 1136 continue; 1137 } 1138 growRegion(Cand); 1139 1140 SpillPlacer->finish(); 1141 1142 // No live bundles, defer to splitSingleBlocks(). 1143 if (!Cand.LiveBundles.any()) { 1144 DEBUG(dbgs() << " no bundles.\n"); 1145 continue; 1146 } 1147 1148 Cost += calcGlobalSplitCost(Cand); 1149 DEBUG({ 1150 dbgs() << ", total = " << Cost << " with bundles"; 1151 for (int i = Cand.LiveBundles.find_first(); i>=0; 1152 i = Cand.LiveBundles.find_next(i)) 1153 dbgs() << " EB#" << i; 1154 dbgs() << ".\n"; 1155 }); 1156 if (Cost < BestCost) { 1157 BestCand = NumCands; 1158 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1159 } 1160 ++NumCands; 1161 } 1162 1163 // No solutions found, fall back to single block splitting. 1164 if (!HasCompact && BestCand == NoCand) 1165 return 0; 1166 1167 // Prepare split editor. 1168 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1169 SE->reset(LREdit); 1170 1171 // Assign all edge bundles to the preferred candidate, or NoCand. 1172 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1173 1174 // Assign bundles for the best candidate region. 1175 if (BestCand != NoCand) { 1176 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1177 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1178 UsedCands.push_back(BestCand); 1179 Cand.IntvIdx = SE->openIntv(); 1180 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1181 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1182 (void)B; 1183 } 1184 } 1185 1186 // Assign bundles for the compact region. 1187 if (HasCompact) { 1188 GlobalSplitCandidate &Cand = GlobalCand.front(); 1189 assert(!Cand.PhysReg && "Compact region has no physreg"); 1190 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1191 UsedCands.push_back(0); 1192 Cand.IntvIdx = SE->openIntv(); 1193 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1194 << Cand.IntvIdx << ".\n"); 1195 (void)B; 1196 } 1197 } 1198 1199 splitAroundRegion(LREdit, UsedCands); 1200 return 0; 1201 } 1202 1203 1204 //===----------------------------------------------------------------------===// 1205 // Per-Block Splitting 1206 //===----------------------------------------------------------------------===// 1207 1208 /// tryBlockSplit - Split a global live range around every block with uses. This 1209 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1210 /// they don't allocate. 1211 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1212 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1213 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1214 unsigned Reg = VirtReg.reg; 1215 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1216 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1217 SE->reset(LREdit); 1218 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1219 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1220 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1221 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1222 SE->splitSingleBlock(BI); 1223 } 1224 // No blocks were split. 1225 if (LREdit.empty()) 1226 return 0; 1227 1228 // We did split for some blocks. 1229 SmallVector<unsigned, 8> IntvMap; 1230 SE->finish(&IntvMap); 1231 1232 // Tell LiveDebugVariables about the new ranges. 1233 DebugVars->splitRegister(Reg, LREdit.regs()); 1234 1235 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1236 1237 // Sort out the new intervals created by splitting. The remainder interval 1238 // goes straight to spilling, the new local ranges get to stay RS_New. 1239 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1240 LiveInterval &LI = *LREdit.get(i); 1241 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1242 setStage(LI, RS_Spill); 1243 } 1244 1245 if (VerifyEnabled) 1246 MF->verify(this, "After splitting live range around basic blocks"); 1247 return 0; 1248 } 1249 1250 //===----------------------------------------------------------------------===// 1251 // Local Splitting 1252 //===----------------------------------------------------------------------===// 1253 1254 1255 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1256 /// in order to use PhysReg between two entries in SA->UseSlots. 1257 /// 1258 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1259 /// 1260 void RAGreedy::calcGapWeights(unsigned PhysReg, 1261 SmallVectorImpl<float> &GapWeight) { 1262 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1263 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1264 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 1265 const unsigned NumGaps = Uses.size()-1; 1266 1267 // Start and end points for the interference check. 1268 SlotIndex StartIdx = 1269 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1270 SlotIndex StopIdx = 1271 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1272 1273 GapWeight.assign(NumGaps, 0.0f); 1274 1275 // Add interference from each overlapping register. 1276 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { 1277 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1278 .checkInterference()) 1279 continue; 1280 1281 // We know that VirtReg is a continuous interval from FirstInstr to 1282 // LastInstr, so we don't need InterferenceQuery. 1283 // 1284 // Interference that overlaps an instruction is counted in both gaps 1285 // surrounding the instruction. The exception is interference before 1286 // StartIdx and after StopIdx. 1287 // 1288 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx); 1289 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1290 // Skip the gaps before IntI. 1291 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1292 if (++Gap == NumGaps) 1293 break; 1294 if (Gap == NumGaps) 1295 break; 1296 1297 // Update the gaps covered by IntI. 1298 const float weight = IntI.value()->weight; 1299 for (; Gap != NumGaps; ++Gap) { 1300 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1301 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1302 break; 1303 } 1304 if (Gap == NumGaps) 1305 break; 1306 } 1307 } 1308 } 1309 1310 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1311 /// basic block. 1312 /// 1313 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1314 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1315 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1316 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1317 1318 // Note that it is possible to have an interval that is live-in or live-out 1319 // while only covering a single block - A phi-def can use undef values from 1320 // predecessors, and the block could be a single-block loop. 1321 // We don't bother doing anything clever about such a case, we simply assume 1322 // that the interval is continuous from FirstInstr to LastInstr. We should 1323 // make sure that we don't do anything illegal to such an interval, though. 1324 1325 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 1326 if (Uses.size() <= 2) 1327 return 0; 1328 const unsigned NumGaps = Uses.size()-1; 1329 1330 DEBUG({ 1331 dbgs() << "tryLocalSplit: "; 1332 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1333 dbgs() << ' ' << SA->UseSlots[i]; 1334 dbgs() << '\n'; 1335 }); 1336 1337 // Since we allow local split results to be split again, there is a risk of 1338 // creating infinite loops. It is tempting to require that the new live 1339 // ranges have less instructions than the original. That would guarantee 1340 // convergence, but it is too strict. A live range with 3 instructions can be 1341 // split 2+3 (including the COPY), and we want to allow that. 1342 // 1343 // Instead we use these rules: 1344 // 1345 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1346 // noop split, of course). 1347 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1348 // the new ranges must have fewer instructions than before the split. 1349 // 3. New ranges with the same number of instructions are marked RS_Split2, 1350 // smaller ranges are marked RS_New. 1351 // 1352 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1353 // excessive splitting and infinite loops. 1354 // 1355 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1356 1357 // Best split candidate. 1358 unsigned BestBefore = NumGaps; 1359 unsigned BestAfter = 0; 1360 float BestDiff = 0; 1361 1362 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1363 SmallVector<float, 8> GapWeight; 1364 1365 Order.rewind(); 1366 while (unsigned PhysReg = Order.next()) { 1367 // Keep track of the largest spill weight that would need to be evicted in 1368 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1369 calcGapWeights(PhysReg, GapWeight); 1370 1371 // Try to find the best sequence of gaps to close. 1372 // The new spill weight must be larger than any gap interference. 1373 1374 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1375 unsigned SplitBefore = 0, SplitAfter = 1; 1376 1377 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1378 // It is the spill weight that needs to be evicted. 1379 float MaxGap = GapWeight[0]; 1380 1381 for (;;) { 1382 // Live before/after split? 1383 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1384 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1385 1386 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1387 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1388 << " i=" << MaxGap); 1389 1390 // Stop before the interval gets so big we wouldn't be making progress. 1391 if (!LiveBefore && !LiveAfter) { 1392 DEBUG(dbgs() << " all\n"); 1393 break; 1394 } 1395 // Should the interval be extended or shrunk? 1396 bool Shrink = true; 1397 1398 // How many gaps would the new range have? 1399 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1400 1401 // Legally, without causing looping? 1402 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1403 1404 if (Legal && MaxGap < HUGE_VALF) { 1405 // Estimate the new spill weight. Each instruction reads or writes the 1406 // register. Conservatively assume there are no read-modify-write 1407 // instructions. 1408 // 1409 // Try to guess the size of the new interval. 1410 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1411 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1412 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1413 // Would this split be possible to allocate? 1414 // Never allocate all gaps, we wouldn't be making progress. 1415 DEBUG(dbgs() << " w=" << EstWeight); 1416 if (EstWeight * Hysteresis >= MaxGap) { 1417 Shrink = false; 1418 float Diff = EstWeight - MaxGap; 1419 if (Diff > BestDiff) { 1420 DEBUG(dbgs() << " (best)"); 1421 BestDiff = Hysteresis * Diff; 1422 BestBefore = SplitBefore; 1423 BestAfter = SplitAfter; 1424 } 1425 } 1426 } 1427 1428 // Try to shrink. 1429 if (Shrink) { 1430 if (++SplitBefore < SplitAfter) { 1431 DEBUG(dbgs() << " shrink\n"); 1432 // Recompute the max when necessary. 1433 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1434 MaxGap = GapWeight[SplitBefore]; 1435 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1436 MaxGap = std::max(MaxGap, GapWeight[i]); 1437 } 1438 continue; 1439 } 1440 MaxGap = 0; 1441 } 1442 1443 // Try to extend the interval. 1444 if (SplitAfter >= NumGaps) { 1445 DEBUG(dbgs() << " end\n"); 1446 break; 1447 } 1448 1449 DEBUG(dbgs() << " extend\n"); 1450 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1451 } 1452 } 1453 1454 // Didn't find any candidates? 1455 if (BestBefore == NumGaps) 1456 return 0; 1457 1458 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1459 << '-' << Uses[BestAfter] << ", " << BestDiff 1460 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1461 1462 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1463 SE->reset(LREdit); 1464 1465 SE->openIntv(); 1466 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1467 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1468 SE->useIntv(SegStart, SegStop); 1469 SmallVector<unsigned, 8> IntvMap; 1470 SE->finish(&IntvMap); 1471 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1472 1473 // If the new range has the same number of instructions as before, mark it as 1474 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1475 // leave the new intervals as RS_New so they can compete. 1476 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1477 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1478 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1479 if (NewGaps >= NumGaps) { 1480 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1481 assert(!ProgressRequired && "Didn't make progress when it was required."); 1482 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1483 if (IntvMap[i] == 1) { 1484 setStage(*LREdit.get(i), RS_Split2); 1485 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1486 } 1487 DEBUG(dbgs() << '\n'); 1488 } 1489 ++NumLocalSplits; 1490 1491 return 0; 1492 } 1493 1494 //===----------------------------------------------------------------------===// 1495 // Live Range Splitting 1496 //===----------------------------------------------------------------------===// 1497 1498 /// trySplit - Try to split VirtReg or one of its interferences, making it 1499 /// assignable. 1500 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1501 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1502 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1503 // Ranges must be Split2 or less. 1504 if (getStage(VirtReg) >= RS_Spill) 1505 return 0; 1506 1507 // Local intervals are handled separately. 1508 if (LIS->intervalIsInOneMBB(VirtReg)) { 1509 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1510 SA->analyze(&VirtReg); 1511 return tryLocalSplit(VirtReg, Order, NewVRegs); 1512 } 1513 1514 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1515 1516 SA->analyze(&VirtReg); 1517 1518 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1519 // coalescer. That may cause the range to become allocatable which means that 1520 // tryRegionSplit won't be making progress. This check should be replaced with 1521 // an assertion when the coalescer is fixed. 1522 if (SA->didRepairRange()) { 1523 // VirtReg has changed, so all cached queries are invalid. 1524 invalidateVirtRegs(); 1525 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1526 return PhysReg; 1527 } 1528 1529 // First try to split around a region spanning multiple blocks. RS_Split2 1530 // ranges already made dubious progress with region splitting, so they go 1531 // straight to single block splitting. 1532 if (getStage(VirtReg) < RS_Split2) { 1533 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1534 if (PhysReg || !NewVRegs.empty()) 1535 return PhysReg; 1536 } 1537 1538 // Then isolate blocks. 1539 return tryBlockSplit(VirtReg, Order, NewVRegs); 1540 } 1541 1542 1543 //===----------------------------------------------------------------------===// 1544 // Main Entry Point 1545 //===----------------------------------------------------------------------===// 1546 1547 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1548 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1549 // First try assigning a free register. 1550 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1551 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1552 return PhysReg; 1553 1554 LiveRangeStage Stage = getStage(VirtReg); 1555 DEBUG(dbgs() << StageName[Stage] 1556 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1557 1558 // Try to evict a less worthy live range, but only for ranges from the primary 1559 // queue. The RS_Split ranges already failed to do this, and they should not 1560 // get a second chance until they have been split. 1561 if (Stage != RS_Split) 1562 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1563 return PhysReg; 1564 1565 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1566 1567 // The first time we see a live range, don't try to split or spill. 1568 // Wait until the second time, when all smaller ranges have been allocated. 1569 // This gives a better picture of the interference to split around. 1570 if (Stage < RS_Split) { 1571 setStage(VirtReg, RS_Split); 1572 DEBUG(dbgs() << "wait for second round\n"); 1573 NewVRegs.push_back(&VirtReg); 1574 return 0; 1575 } 1576 1577 // If we couldn't allocate a register from spilling, there is probably some 1578 // invalid inline assembly. The base class wil report it. 1579 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1580 return ~0u; 1581 1582 // Try splitting VirtReg or interferences. 1583 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1584 if (PhysReg || !NewVRegs.empty()) 1585 return PhysReg; 1586 1587 // Finally spill VirtReg itself. 1588 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1589 LiveRangeEdit LRE(VirtReg, NewVRegs, this); 1590 spiller().spill(LRE); 1591 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1592 1593 if (VerifyEnabled) 1594 MF->verify(this, "After spilling"); 1595 1596 // The live virtual register requesting allocation was spilled, so tell 1597 // the caller not to allocate anything during this round. 1598 return 0; 1599 } 1600 1601 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1602 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1603 << "********** Function: " 1604 << ((Value*)mf.getFunction())->getName() << '\n'); 1605 1606 MF = &mf; 1607 if (VerifyEnabled) 1608 MF->verify(this, "Before greedy register allocator"); 1609 1610 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1611 Indexes = &getAnalysis<SlotIndexes>(); 1612 DomTree = &getAnalysis<MachineDominatorTree>(); 1613 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1614 Loops = &getAnalysis<MachineLoopInfo>(); 1615 Bundles = &getAnalysis<EdgeBundles>(); 1616 SpillPlacer = &getAnalysis<SpillPlacement>(); 1617 DebugVars = &getAnalysis<LiveDebugVariables>(); 1618 1619 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1620 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1621 ExtraRegInfo.clear(); 1622 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1623 NextCascade = 1; 1624 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI); 1625 GlobalCand.resize(32); // This will grow as needed. 1626 1627 allocatePhysRegs(); 1628 addMBBLiveIns(MF); 1629 LIS->addKillFlags(); 1630 1631 // Run rewriter 1632 { 1633 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1634 VRM->rewrite(Indexes); 1635 } 1636 1637 // Write out new DBG_VALUE instructions. 1638 { 1639 NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled); 1640 DebugVars->emitDebugValues(VRM); 1641 } 1642 1643 // The pass output is in VirtRegMap. Release all the transient data. 1644 releaseMemory(); 1645 1646 return true; 1647 } 1648