1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "AllocationOrder.h" 17 #include "InterferenceCache.h" 18 #include "LiveDebugVariables.h" 19 #include "RegAllocBase.h" 20 #include "Spiller.h" 21 #include "SpillPlacement.h" 22 #include "SplitKit.h" 23 #include "VirtRegMap.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Function.h" 27 #include "llvm/PassAnalysisSupport.h" 28 #include "llvm/CodeGen/CalcSpillWeights.h" 29 #include "llvm/CodeGen/EdgeBundles.h" 30 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 31 #include "llvm/CodeGen/LiveRangeEdit.h" 32 #include "llvm/CodeGen/LiveStackAnalysis.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/Passes.h" 38 #include "llvm/CodeGen/RegAllocRegistry.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Support/Timer.h" 45 46 #include <queue> 47 48 using namespace llvm; 49 50 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52 STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54 static cl::opt<SplitEditor::ComplementSpillMode> 55 SplitSpillMode("split-spill-mode", cl::Hidden, 56 cl::desc("Spill mode for splitting live ranges"), 57 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 58 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 59 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 60 clEnumValEnd), 61 cl::init(SplitEditor::SM_Partition)); 62 63 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 64 createGreedyRegisterAllocator); 65 66 namespace { 67 class RAGreedy : public MachineFunctionPass, 68 public RegAllocBase, 69 private LiveRangeEdit::Delegate { 70 71 // context 72 MachineFunction *MF; 73 74 // analyses 75 SlotIndexes *Indexes; 76 LiveStacks *LS; 77 MachineDominatorTree *DomTree; 78 MachineLoopInfo *Loops; 79 EdgeBundles *Bundles; 80 SpillPlacement *SpillPlacer; 81 LiveDebugVariables *DebugVars; 82 83 // state 84 std::auto_ptr<Spiller> SpillerInstance; 85 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 86 unsigned NextCascade; 87 88 // Live ranges pass through a number of stages as we try to allocate them. 89 // Some of the stages may also create new live ranges: 90 // 91 // - Region splitting. 92 // - Per-block splitting. 93 // - Local splitting. 94 // - Spilling. 95 // 96 // Ranges produced by one of the stages skip the previous stages when they are 97 // dequeued. This improves performance because we can skip interference checks 98 // that are unlikely to give any results. It also guarantees that the live 99 // range splitting algorithm terminates, something that is otherwise hard to 100 // ensure. 101 enum LiveRangeStage { 102 /// Newly created live range that has never been queued. 103 RS_New, 104 105 /// Only attempt assignment and eviction. Then requeue as RS_Split. 106 RS_Assign, 107 108 /// Attempt live range splitting if assignment is impossible. 109 RS_Split, 110 111 /// Attempt more aggressive live range splitting that is guaranteed to make 112 /// progress. This is used for split products that may not be making 113 /// progress. 114 RS_Split2, 115 116 /// Live range will be spilled. No more splitting will be attempted. 117 RS_Spill, 118 119 /// There is nothing more we can do to this live range. Abort compilation 120 /// if it can't be assigned. 121 RS_Done 122 }; 123 124 static const char *const StageName[]; 125 126 // RegInfo - Keep additional information about each live range. 127 struct RegInfo { 128 LiveRangeStage Stage; 129 130 // Cascade - Eviction loop prevention. See canEvictInterference(). 131 unsigned Cascade; 132 133 RegInfo() : Stage(RS_New), Cascade(0) {} 134 }; 135 136 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 137 138 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 139 return ExtraRegInfo[VirtReg.reg].Stage; 140 } 141 142 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 143 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 144 ExtraRegInfo[VirtReg.reg].Stage = Stage; 145 } 146 147 template<typename Iterator> 148 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 149 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 150 for (;Begin != End; ++Begin) { 151 unsigned Reg = (*Begin)->reg; 152 if (ExtraRegInfo[Reg].Stage == RS_New) 153 ExtraRegInfo[Reg].Stage = NewStage; 154 } 155 } 156 157 /// Cost of evicting interference. 158 struct EvictionCost { 159 unsigned BrokenHints; ///< Total number of broken hints. 160 float MaxWeight; ///< Maximum spill weight evicted. 161 162 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 163 164 bool operator<(const EvictionCost &O) const { 165 if (BrokenHints != O.BrokenHints) 166 return BrokenHints < O.BrokenHints; 167 return MaxWeight < O.MaxWeight; 168 } 169 }; 170 171 // Register mask interference. The current VirtReg is checked for register 172 // mask interference on entry to selectOrSplit(). If there is no 173 // interference, UsableRegs is left empty. If there is interference, 174 // UsableRegs has a bit mask of registers that can be used without register 175 // mask interference. 176 BitVector UsableRegs; 177 178 /// clobberedByRegMask - Returns true if PhysReg is not directly usable 179 /// because of register mask clobbers. 180 bool clobberedByRegMask(unsigned PhysReg) const { 181 return !UsableRegs.empty() && !UsableRegs.test(PhysReg); 182 } 183 184 // splitting state. 185 std::auto_ptr<SplitAnalysis> SA; 186 std::auto_ptr<SplitEditor> SE; 187 188 /// Cached per-block interference maps 189 InterferenceCache IntfCache; 190 191 /// All basic blocks where the current register has uses. 192 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 193 194 /// Global live range splitting candidate info. 195 struct GlobalSplitCandidate { 196 // Register intended for assignment, or 0. 197 unsigned PhysReg; 198 199 // SplitKit interval index for this candidate. 200 unsigned IntvIdx; 201 202 // Interference for PhysReg. 203 InterferenceCache::Cursor Intf; 204 205 // Bundles where this candidate should be live. 206 BitVector LiveBundles; 207 SmallVector<unsigned, 8> ActiveBlocks; 208 209 void reset(InterferenceCache &Cache, unsigned Reg) { 210 PhysReg = Reg; 211 IntvIdx = 0; 212 Intf.setPhysReg(Cache, Reg); 213 LiveBundles.clear(); 214 ActiveBlocks.clear(); 215 } 216 217 // Set B[i] = C for every live bundle where B[i] was NoCand. 218 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 219 unsigned Count = 0; 220 for (int i = LiveBundles.find_first(); i >= 0; 221 i = LiveBundles.find_next(i)) 222 if (B[i] == NoCand) { 223 B[i] = C; 224 Count++; 225 } 226 return Count; 227 } 228 }; 229 230 /// Candidate info for for each PhysReg in AllocationOrder. 231 /// This vector never shrinks, but grows to the size of the largest register 232 /// class. 233 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 234 235 enum { NoCand = ~0u }; 236 237 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 238 /// NoCand which indicates the stack interval. 239 SmallVector<unsigned, 32> BundleCand; 240 241 public: 242 RAGreedy(); 243 244 /// Return the pass name. 245 virtual const char* getPassName() const { 246 return "Greedy Register Allocator"; 247 } 248 249 /// RAGreedy analysis usage. 250 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 251 virtual void releaseMemory(); 252 virtual Spiller &spiller() { return *SpillerInstance; } 253 virtual void enqueue(LiveInterval *LI); 254 virtual LiveInterval *dequeue(); 255 virtual unsigned selectOrSplit(LiveInterval&, 256 SmallVectorImpl<LiveInterval*>&); 257 258 /// Perform register allocation. 259 virtual bool runOnMachineFunction(MachineFunction &mf); 260 261 static char ID; 262 263 private: 264 bool LRE_CanEraseVirtReg(unsigned); 265 void LRE_WillShrinkVirtReg(unsigned); 266 void LRE_DidCloneVirtReg(unsigned, unsigned); 267 268 float calcSpillCost(); 269 bool addSplitConstraints(InterferenceCache::Cursor, float&); 270 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 271 void growRegion(GlobalSplitCandidate &Cand); 272 float calcGlobalSplitCost(GlobalSplitCandidate&); 273 bool calcCompactRegion(GlobalSplitCandidate&); 274 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 275 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 276 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 277 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 278 void evictInterference(LiveInterval&, unsigned, 279 SmallVectorImpl<LiveInterval*>&); 280 281 unsigned tryAssign(LiveInterval&, AllocationOrder&, 282 SmallVectorImpl<LiveInterval*>&); 283 unsigned tryEvict(LiveInterval&, AllocationOrder&, 284 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 285 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 286 SmallVectorImpl<LiveInterval*>&); 287 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 288 SmallVectorImpl<LiveInterval*>&); 289 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 290 SmallVectorImpl<LiveInterval*>&); 291 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 292 SmallVectorImpl<LiveInterval*>&); 293 unsigned trySplit(LiveInterval&, AllocationOrder&, 294 SmallVectorImpl<LiveInterval*>&); 295 }; 296 } // end anonymous namespace 297 298 char RAGreedy::ID = 0; 299 300 #ifndef NDEBUG 301 const char *const RAGreedy::StageName[] = { 302 "RS_New", 303 "RS_Assign", 304 "RS_Split", 305 "RS_Split2", 306 "RS_Spill", 307 "RS_Done" 308 }; 309 #endif 310 311 // Hysteresis to use when comparing floats. 312 // This helps stabilize decisions based on float comparisons. 313 const float Hysteresis = 0.98f; 314 315 316 FunctionPass* llvm::createGreedyRegisterAllocator() { 317 return new RAGreedy(); 318 } 319 320 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 321 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 322 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 323 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 324 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 325 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 326 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 327 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 328 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 329 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 330 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 331 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 332 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 333 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 334 } 335 336 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 337 AU.setPreservesCFG(); 338 AU.addRequired<AliasAnalysis>(); 339 AU.addPreserved<AliasAnalysis>(); 340 AU.addRequired<LiveIntervals>(); 341 AU.addRequired<SlotIndexes>(); 342 AU.addPreserved<SlotIndexes>(); 343 AU.addRequired<LiveDebugVariables>(); 344 AU.addPreserved<LiveDebugVariables>(); 345 AU.addRequired<CalculateSpillWeights>(); 346 AU.addRequired<LiveStacks>(); 347 AU.addPreserved<LiveStacks>(); 348 AU.addRequired<MachineDominatorTree>(); 349 AU.addPreserved<MachineDominatorTree>(); 350 AU.addRequired<MachineLoopInfo>(); 351 AU.addPreserved<MachineLoopInfo>(); 352 AU.addRequired<VirtRegMap>(); 353 AU.addPreserved<VirtRegMap>(); 354 AU.addRequired<EdgeBundles>(); 355 AU.addRequired<SpillPlacement>(); 356 MachineFunctionPass::getAnalysisUsage(AU); 357 } 358 359 360 //===----------------------------------------------------------------------===// 361 // LiveRangeEdit delegate methods 362 //===----------------------------------------------------------------------===// 363 364 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 365 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 366 unassign(LIS->getInterval(VirtReg), PhysReg); 367 return true; 368 } 369 // Unassigned virtreg is probably in the priority queue. 370 // RegAllocBase will erase it after dequeueing. 371 return false; 372 } 373 374 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 375 unsigned PhysReg = VRM->getPhys(VirtReg); 376 if (!PhysReg) 377 return; 378 379 // Register is assigned, put it back on the queue for reassignment. 380 LiveInterval &LI = LIS->getInterval(VirtReg); 381 unassign(LI, PhysReg); 382 enqueue(&LI); 383 } 384 385 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 386 // Cloning a register we haven't even heard about yet? Just ignore it. 387 if (!ExtraRegInfo.inBounds(Old)) 388 return; 389 390 // LRE may clone a virtual register because dead code elimination causes it to 391 // be split into connected components. The new components are much smaller 392 // than the original, so they should get a new chance at being assigned. 393 // same stage as the parent. 394 ExtraRegInfo[Old].Stage = RS_Assign; 395 ExtraRegInfo.grow(New); 396 ExtraRegInfo[New] = ExtraRegInfo[Old]; 397 } 398 399 void RAGreedy::releaseMemory() { 400 SpillerInstance.reset(0); 401 ExtraRegInfo.clear(); 402 GlobalCand.clear(); 403 RegAllocBase::releaseMemory(); 404 } 405 406 void RAGreedy::enqueue(LiveInterval *LI) { 407 // Prioritize live ranges by size, assigning larger ranges first. 408 // The queue holds (size, reg) pairs. 409 const unsigned Size = LI->getSize(); 410 const unsigned Reg = LI->reg; 411 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 412 "Can only enqueue virtual registers"); 413 unsigned Prio; 414 415 ExtraRegInfo.grow(Reg); 416 if (ExtraRegInfo[Reg].Stage == RS_New) 417 ExtraRegInfo[Reg].Stage = RS_Assign; 418 419 if (ExtraRegInfo[Reg].Stage == RS_Split) { 420 // Unsplit ranges that couldn't be allocated immediately are deferred until 421 // everything else has been allocated. 422 Prio = Size; 423 } else { 424 // Everything is allocated in long->short order. Long ranges that don't fit 425 // should be spilled (or split) ASAP so they don't create interference. 426 Prio = (1u << 31) + Size; 427 428 // Boost ranges that have a physical register hint. 429 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 430 Prio |= (1u << 30); 431 } 432 433 Queue.push(std::make_pair(Prio, ~Reg)); 434 } 435 436 LiveInterval *RAGreedy::dequeue() { 437 if (Queue.empty()) 438 return 0; 439 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 440 Queue.pop(); 441 return LI; 442 } 443 444 445 //===----------------------------------------------------------------------===// 446 // Direct Assignment 447 //===----------------------------------------------------------------------===// 448 449 /// tryAssign - Try to assign VirtReg to an available register. 450 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 451 AllocationOrder &Order, 452 SmallVectorImpl<LiveInterval*> &NewVRegs) { 453 Order.rewind(); 454 unsigned PhysReg; 455 while ((PhysReg = Order.next())) { 456 if (clobberedByRegMask(PhysReg)) 457 continue; 458 if (!checkPhysRegInterference(VirtReg, PhysReg)) 459 break; 460 } 461 if (!PhysReg || Order.isHint(PhysReg)) 462 return PhysReg; 463 464 // PhysReg is available, but there may be a better choice. 465 466 // If we missed a simple hint, try to cheaply evict interference from the 467 // preferred register. 468 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 469 if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) { 470 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 471 EvictionCost MaxCost(1); 472 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 473 evictInterference(VirtReg, Hint, NewVRegs); 474 return Hint; 475 } 476 } 477 478 // Try to evict interference from a cheaper alternative. 479 unsigned Cost = TRI->getCostPerUse(PhysReg); 480 481 // Most registers have 0 additional cost. 482 if (!Cost) 483 return PhysReg; 484 485 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 486 << '\n'); 487 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 488 return CheapReg ? CheapReg : PhysReg; 489 } 490 491 492 //===----------------------------------------------------------------------===// 493 // Interference eviction 494 //===----------------------------------------------------------------------===// 495 496 /// shouldEvict - determine if A should evict the assigned live range B. The 497 /// eviction policy defined by this function together with the allocation order 498 /// defined by enqueue() decides which registers ultimately end up being split 499 /// and spilled. 500 /// 501 /// Cascade numbers are used to prevent infinite loops if this function is a 502 /// cyclic relation. 503 /// 504 /// @param A The live range to be assigned. 505 /// @param IsHint True when A is about to be assigned to its preferred 506 /// register. 507 /// @param B The live range to be evicted. 508 /// @param BreaksHint True when B is already assigned to its preferred register. 509 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 510 LiveInterval &B, bool BreaksHint) { 511 bool CanSplit = getStage(B) < RS_Spill; 512 513 // Be fairly aggressive about following hints as long as the evictee can be 514 // split. 515 if (CanSplit && IsHint && !BreaksHint) 516 return true; 517 518 return A.weight > B.weight; 519 } 520 521 /// canEvictInterference - Return true if all interferences between VirtReg and 522 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 523 /// 524 /// @param VirtReg Live range that is about to be assigned. 525 /// @param PhysReg Desired register for assignment. 526 /// @prarm IsHint True when PhysReg is VirtReg's preferred register. 527 /// @param MaxCost Only look for cheaper candidates and update with new cost 528 /// when returning true. 529 /// @returns True when interference can be evicted cheaper than MaxCost. 530 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 531 bool IsHint, EvictionCost &MaxCost) { 532 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 533 // involved in an eviction before. If a cascade number was assigned, deny 534 // evicting anything with the same or a newer cascade number. This prevents 535 // infinite eviction loops. 536 // 537 // This works out so a register without a cascade number is allowed to evict 538 // anything, and it can be evicted by anything. 539 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 540 if (!Cascade) 541 Cascade = NextCascade; 542 543 EvictionCost Cost; 544 for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 545 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 546 // If there is 10 or more interferences, chances are one is heavier. 547 if (Q.collectInterferingVRegs(10) >= 10) 548 return false; 549 550 // Check if any interfering live range is heavier than MaxWeight. 551 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 552 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 553 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 554 return false; 555 // Never evict spill products. They cannot split or spill. 556 if (getStage(*Intf) == RS_Done) 557 return false; 558 // Once a live range becomes small enough, it is urgent that we find a 559 // register for it. This is indicated by an infinite spill weight. These 560 // urgent live ranges get to evict almost anything. 561 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable(); 562 // Only evict older cascades or live ranges without a cascade. 563 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 564 if (Cascade <= IntfCascade) { 565 if (!Urgent) 566 return false; 567 // We permit breaking cascades for urgent evictions. It should be the 568 // last resort, though, so make it really expensive. 569 Cost.BrokenHints += 10; 570 } 571 // Would this break a satisfied hint? 572 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 573 // Update eviction cost. 574 Cost.BrokenHints += BreaksHint; 575 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 576 // Abort if this would be too expensive. 577 if (!(Cost < MaxCost)) 578 return false; 579 // Finally, apply the eviction policy for non-urgent evictions. 580 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 581 return false; 582 } 583 } 584 MaxCost = Cost; 585 return true; 586 } 587 588 /// evictInterference - Evict any interferring registers that prevent VirtReg 589 /// from being assigned to Physreg. This assumes that canEvictInterference 590 /// returned true. 591 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 592 SmallVectorImpl<LiveInterval*> &NewVRegs) { 593 // Make sure that VirtReg has a cascade number, and assign that cascade 594 // number to every evicted register. These live ranges than then only be 595 // evicted by a newer cascade, preventing infinite loops. 596 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 597 if (!Cascade) 598 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 599 600 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 601 << " interference: Cascade " << Cascade << '\n'); 602 for (const uint16_t *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 603 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 604 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 605 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 606 LiveInterval *Intf = Q.interferingVRegs()[i]; 607 unassign(*Intf, VRM->getPhys(Intf->reg)); 608 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 609 VirtReg.isSpillable() < Intf->isSpillable()) && 610 "Cannot decrease cascade number, illegal eviction"); 611 ExtraRegInfo[Intf->reg].Cascade = Cascade; 612 ++NumEvicted; 613 NewVRegs.push_back(Intf); 614 } 615 } 616 } 617 618 /// tryEvict - Try to evict all interferences for a physreg. 619 /// @param VirtReg Currently unassigned virtual register. 620 /// @param Order Physregs to try. 621 /// @return Physreg to assign VirtReg, or 0. 622 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 623 AllocationOrder &Order, 624 SmallVectorImpl<LiveInterval*> &NewVRegs, 625 unsigned CostPerUseLimit) { 626 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 627 628 // Keep track of the cheapest interference seen so far. 629 EvictionCost BestCost(~0u); 630 unsigned BestPhys = 0; 631 632 // When we are just looking for a reduced cost per use, don't break any 633 // hints, and only evict smaller spill weights. 634 if (CostPerUseLimit < ~0u) { 635 BestCost.BrokenHints = 0; 636 BestCost.MaxWeight = VirtReg.weight; 637 } 638 639 Order.rewind(); 640 while (unsigned PhysReg = Order.next()) { 641 if (clobberedByRegMask(PhysReg)) 642 continue; 643 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 644 continue; 645 // The first use of a callee-saved register in a function has cost 1. 646 // Don't start using a CSR when the CostPerUseLimit is low. 647 if (CostPerUseLimit == 1) 648 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 649 if (!MRI->isPhysRegUsed(CSR)) { 650 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 651 << PrintReg(CSR, TRI) << '\n'); 652 continue; 653 } 654 655 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 656 continue; 657 658 // Best so far. 659 BestPhys = PhysReg; 660 661 // Stop if the hint can be used. 662 if (Order.isHint(PhysReg)) 663 break; 664 } 665 666 if (!BestPhys) 667 return 0; 668 669 evictInterference(VirtReg, BestPhys, NewVRegs); 670 return BestPhys; 671 } 672 673 674 //===----------------------------------------------------------------------===// 675 // Region Splitting 676 //===----------------------------------------------------------------------===// 677 678 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 679 /// interference pattern in Physreg and its aliases. Add the constraints to 680 /// SpillPlacement and return the static cost of this split in Cost, assuming 681 /// that all preferences in SplitConstraints are met. 682 /// Return false if there are no bundles with positive bias. 683 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 684 float &Cost) { 685 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 686 687 // Reset interference dependent info. 688 SplitConstraints.resize(UseBlocks.size()); 689 float StaticCost = 0; 690 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 691 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 692 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 693 694 BC.Number = BI.MBB->getNumber(); 695 Intf.moveToBlock(BC.Number); 696 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 697 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 698 BC.ChangesValue = BI.FirstDef; 699 700 if (!Intf.hasInterference()) 701 continue; 702 703 // Number of spill code instructions to insert. 704 unsigned Ins = 0; 705 706 // Interference for the live-in value. 707 if (BI.LiveIn) { 708 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 709 BC.Entry = SpillPlacement::MustSpill, ++Ins; 710 else if (Intf.first() < BI.FirstInstr) 711 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 712 else if (Intf.first() < BI.LastInstr) 713 ++Ins; 714 } 715 716 // Interference for the live-out value. 717 if (BI.LiveOut) { 718 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 719 BC.Exit = SpillPlacement::MustSpill, ++Ins; 720 else if (Intf.last() > BI.LastInstr) 721 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 722 else if (Intf.last() > BI.FirstInstr) 723 ++Ins; 724 } 725 726 // Accumulate the total frequency of inserted spill code. 727 if (Ins) 728 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 729 } 730 Cost = StaticCost; 731 732 // Add constraints for use-blocks. Note that these are the only constraints 733 // that may add a positive bias, it is downhill from here. 734 SpillPlacer->addConstraints(SplitConstraints); 735 return SpillPlacer->scanActiveBundles(); 736 } 737 738 739 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 740 /// live-through blocks in Blocks. 741 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 742 ArrayRef<unsigned> Blocks) { 743 const unsigned GroupSize = 8; 744 SpillPlacement::BlockConstraint BCS[GroupSize]; 745 unsigned TBS[GroupSize]; 746 unsigned B = 0, T = 0; 747 748 for (unsigned i = 0; i != Blocks.size(); ++i) { 749 unsigned Number = Blocks[i]; 750 Intf.moveToBlock(Number); 751 752 if (!Intf.hasInterference()) { 753 assert(T < GroupSize && "Array overflow"); 754 TBS[T] = Number; 755 if (++T == GroupSize) { 756 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 757 T = 0; 758 } 759 continue; 760 } 761 762 assert(B < GroupSize && "Array overflow"); 763 BCS[B].Number = Number; 764 765 // Interference for the live-in value. 766 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 767 BCS[B].Entry = SpillPlacement::MustSpill; 768 else 769 BCS[B].Entry = SpillPlacement::PrefSpill; 770 771 // Interference for the live-out value. 772 if (Intf.last() >= SA->getLastSplitPoint(Number)) 773 BCS[B].Exit = SpillPlacement::MustSpill; 774 else 775 BCS[B].Exit = SpillPlacement::PrefSpill; 776 777 if (++B == GroupSize) { 778 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 779 SpillPlacer->addConstraints(Array); 780 B = 0; 781 } 782 } 783 784 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 785 SpillPlacer->addConstraints(Array); 786 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 787 } 788 789 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 790 // Keep track of through blocks that have not been added to SpillPlacer. 791 BitVector Todo = SA->getThroughBlocks(); 792 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 793 unsigned AddedTo = 0; 794 #ifndef NDEBUG 795 unsigned Visited = 0; 796 #endif 797 798 for (;;) { 799 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 800 // Find new through blocks in the periphery of PrefRegBundles. 801 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 802 unsigned Bundle = NewBundles[i]; 803 // Look at all blocks connected to Bundle in the full graph. 804 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 805 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 806 I != E; ++I) { 807 unsigned Block = *I; 808 if (!Todo.test(Block)) 809 continue; 810 Todo.reset(Block); 811 // This is a new through block. Add it to SpillPlacer later. 812 ActiveBlocks.push_back(Block); 813 #ifndef NDEBUG 814 ++Visited; 815 #endif 816 } 817 } 818 // Any new blocks to add? 819 if (ActiveBlocks.size() == AddedTo) 820 break; 821 822 // Compute through constraints from the interference, or assume that all 823 // through blocks prefer spilling when forming compact regions. 824 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 825 if (Cand.PhysReg) 826 addThroughConstraints(Cand.Intf, NewBlocks); 827 else 828 // Provide a strong negative bias on through blocks to prevent unwanted 829 // liveness on loop backedges. 830 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 831 AddedTo = ActiveBlocks.size(); 832 833 // Perhaps iterating can enable more bundles? 834 SpillPlacer->iterate(); 835 } 836 DEBUG(dbgs() << ", v=" << Visited); 837 } 838 839 /// calcCompactRegion - Compute the set of edge bundles that should be live 840 /// when splitting the current live range into compact regions. Compact 841 /// regions can be computed without looking at interference. They are the 842 /// regions formed by removing all the live-through blocks from the live range. 843 /// 844 /// Returns false if the current live range is already compact, or if the 845 /// compact regions would form single block regions anyway. 846 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 847 // Without any through blocks, the live range is already compact. 848 if (!SA->getNumThroughBlocks()) 849 return false; 850 851 // Compact regions don't correspond to any physreg. 852 Cand.reset(IntfCache, 0); 853 854 DEBUG(dbgs() << "Compact region bundles"); 855 856 // Use the spill placer to determine the live bundles. GrowRegion pretends 857 // that all the through blocks have interference when PhysReg is unset. 858 SpillPlacer->prepare(Cand.LiveBundles); 859 860 // The static split cost will be zero since Cand.Intf reports no interference. 861 float Cost; 862 if (!addSplitConstraints(Cand.Intf, Cost)) { 863 DEBUG(dbgs() << ", none.\n"); 864 return false; 865 } 866 867 growRegion(Cand); 868 SpillPlacer->finish(); 869 870 if (!Cand.LiveBundles.any()) { 871 DEBUG(dbgs() << ", none.\n"); 872 return false; 873 } 874 875 DEBUG({ 876 for (int i = Cand.LiveBundles.find_first(); i>=0; 877 i = Cand.LiveBundles.find_next(i)) 878 dbgs() << " EB#" << i; 879 dbgs() << ".\n"; 880 }); 881 return true; 882 } 883 884 /// calcSpillCost - Compute how expensive it would be to split the live range in 885 /// SA around all use blocks instead of forming bundle regions. 886 float RAGreedy::calcSpillCost() { 887 float Cost = 0; 888 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 889 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 890 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 891 unsigned Number = BI.MBB->getNumber(); 892 // We normally only need one spill instruction - a load or a store. 893 Cost += SpillPlacer->getBlockFrequency(Number); 894 895 // Unless the value is redefined in the block. 896 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 897 Cost += SpillPlacer->getBlockFrequency(Number); 898 } 899 return Cost; 900 } 901 902 /// calcGlobalSplitCost - Return the global split cost of following the split 903 /// pattern in LiveBundles. This cost should be added to the local cost of the 904 /// interference pattern in SplitConstraints. 905 /// 906 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 907 float GlobalCost = 0; 908 const BitVector &LiveBundles = Cand.LiveBundles; 909 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 910 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 911 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 912 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 913 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 914 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 915 unsigned Ins = 0; 916 917 if (BI.LiveIn) 918 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 919 if (BI.LiveOut) 920 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 921 if (Ins) 922 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 923 } 924 925 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 926 unsigned Number = Cand.ActiveBlocks[i]; 927 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 928 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 929 if (!RegIn && !RegOut) 930 continue; 931 if (RegIn && RegOut) { 932 // We need double spill code if this block has interference. 933 Cand.Intf.moveToBlock(Number); 934 if (Cand.Intf.hasInterference()) 935 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 936 continue; 937 } 938 // live-in / stack-out or stack-in live-out. 939 GlobalCost += SpillPlacer->getBlockFrequency(Number); 940 } 941 return GlobalCost; 942 } 943 944 /// splitAroundRegion - Split the current live range around the regions 945 /// determined by BundleCand and GlobalCand. 946 /// 947 /// Before calling this function, GlobalCand and BundleCand must be initialized 948 /// so each bundle is assigned to a valid candidate, or NoCand for the 949 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 950 /// objects must be initialized for the current live range, and intervals 951 /// created for the used candidates. 952 /// 953 /// @param LREdit The LiveRangeEdit object handling the current split. 954 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 955 /// must appear in this list. 956 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 957 ArrayRef<unsigned> UsedCands) { 958 // These are the intervals created for new global ranges. We may create more 959 // intervals for local ranges. 960 const unsigned NumGlobalIntvs = LREdit.size(); 961 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 962 assert(NumGlobalIntvs && "No global intervals configured"); 963 964 // Isolate even single instructions when dealing with a proper sub-class. 965 // That guarantees register class inflation for the stack interval because it 966 // is all copies. 967 unsigned Reg = SA->getParent().reg; 968 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 969 970 // First handle all the blocks with uses. 971 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 972 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 973 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 974 unsigned Number = BI.MBB->getNumber(); 975 unsigned IntvIn = 0, IntvOut = 0; 976 SlotIndex IntfIn, IntfOut; 977 if (BI.LiveIn) { 978 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 979 if (CandIn != NoCand) { 980 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 981 IntvIn = Cand.IntvIdx; 982 Cand.Intf.moveToBlock(Number); 983 IntfIn = Cand.Intf.first(); 984 } 985 } 986 if (BI.LiveOut) { 987 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 988 if (CandOut != NoCand) { 989 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 990 IntvOut = Cand.IntvIdx; 991 Cand.Intf.moveToBlock(Number); 992 IntfOut = Cand.Intf.last(); 993 } 994 } 995 996 // Create separate intervals for isolated blocks with multiple uses. 997 if (!IntvIn && !IntvOut) { 998 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 999 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1000 SE->splitSingleBlock(BI); 1001 continue; 1002 } 1003 1004 if (IntvIn && IntvOut) 1005 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1006 else if (IntvIn) 1007 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1008 else 1009 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1010 } 1011 1012 // Handle live-through blocks. The relevant live-through blocks are stored in 1013 // the ActiveBlocks list with each candidate. We need to filter out 1014 // duplicates. 1015 BitVector Todo = SA->getThroughBlocks(); 1016 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1017 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1018 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1019 unsigned Number = Blocks[i]; 1020 if (!Todo.test(Number)) 1021 continue; 1022 Todo.reset(Number); 1023 1024 unsigned IntvIn = 0, IntvOut = 0; 1025 SlotIndex IntfIn, IntfOut; 1026 1027 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1028 if (CandIn != NoCand) { 1029 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1030 IntvIn = Cand.IntvIdx; 1031 Cand.Intf.moveToBlock(Number); 1032 IntfIn = Cand.Intf.first(); 1033 } 1034 1035 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1036 if (CandOut != NoCand) { 1037 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1038 IntvOut = Cand.IntvIdx; 1039 Cand.Intf.moveToBlock(Number); 1040 IntfOut = Cand.Intf.last(); 1041 } 1042 if (!IntvIn && !IntvOut) 1043 continue; 1044 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1045 } 1046 } 1047 1048 ++NumGlobalSplits; 1049 1050 SmallVector<unsigned, 8> IntvMap; 1051 SE->finish(&IntvMap); 1052 DebugVars->splitRegister(Reg, LREdit.regs()); 1053 1054 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1055 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1056 1057 // Sort out the new intervals created by splitting. We get four kinds: 1058 // - Remainder intervals should not be split again. 1059 // - Candidate intervals can be assigned to Cand.PhysReg. 1060 // - Block-local splits are candidates for local splitting. 1061 // - DCE leftovers should go back on the queue. 1062 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1063 LiveInterval &Reg = *LREdit.get(i); 1064 1065 // Ignore old intervals from DCE. 1066 if (getStage(Reg) != RS_New) 1067 continue; 1068 1069 // Remainder interval. Don't try splitting again, spill if it doesn't 1070 // allocate. 1071 if (IntvMap[i] == 0) { 1072 setStage(Reg, RS_Spill); 1073 continue; 1074 } 1075 1076 // Global intervals. Allow repeated splitting as long as the number of live 1077 // blocks is strictly decreasing. 1078 if (IntvMap[i] < NumGlobalIntvs) { 1079 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1080 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1081 << " blocks as original.\n"); 1082 // Don't allow repeated splitting as a safe guard against looping. 1083 setStage(Reg, RS_Split2); 1084 } 1085 continue; 1086 } 1087 1088 // Other intervals are treated as new. This includes local intervals created 1089 // for blocks with multiple uses, and anything created by DCE. 1090 } 1091 1092 if (VerifyEnabled) 1093 MF->verify(this, "After splitting live range around region"); 1094 } 1095 1096 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1097 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1098 unsigned NumCands = 0; 1099 unsigned BestCand = NoCand; 1100 float BestCost; 1101 SmallVector<unsigned, 8> UsedCands; 1102 1103 // Check if we can split this live range around a compact region. 1104 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1105 if (HasCompact) { 1106 // Yes, keep GlobalCand[0] as the compact region candidate. 1107 NumCands = 1; 1108 BestCost = HUGE_VALF; 1109 } else { 1110 // No benefit from the compact region, our fallback will be per-block 1111 // splitting. Make sure we find a solution that is cheaper than spilling. 1112 BestCost = Hysteresis * calcSpillCost(); 1113 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1114 } 1115 1116 Order.rewind(); 1117 while (unsigned PhysReg = Order.next()) { 1118 // Discard bad candidates before we run out of interference cache cursors. 1119 // This will only affect register classes with a lot of registers (>32). 1120 if (NumCands == IntfCache.getMaxCursors()) { 1121 unsigned WorstCount = ~0u; 1122 unsigned Worst = 0; 1123 for (unsigned i = 0; i != NumCands; ++i) { 1124 if (i == BestCand || !GlobalCand[i].PhysReg) 1125 continue; 1126 unsigned Count = GlobalCand[i].LiveBundles.count(); 1127 if (Count < WorstCount) 1128 Worst = i, WorstCount = Count; 1129 } 1130 --NumCands; 1131 GlobalCand[Worst] = GlobalCand[NumCands]; 1132 if (BestCand == NumCands) 1133 BestCand = Worst; 1134 } 1135 1136 if (GlobalCand.size() <= NumCands) 1137 GlobalCand.resize(NumCands+1); 1138 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1139 Cand.reset(IntfCache, PhysReg); 1140 1141 SpillPlacer->prepare(Cand.LiveBundles); 1142 float Cost; 1143 if (!addSplitConstraints(Cand.Intf, Cost)) { 1144 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1145 continue; 1146 } 1147 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1148 if (Cost >= BestCost) { 1149 DEBUG({ 1150 if (BestCand == NoCand) 1151 dbgs() << " worse than no bundles\n"; 1152 else 1153 dbgs() << " worse than " 1154 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1155 }); 1156 continue; 1157 } 1158 growRegion(Cand); 1159 1160 SpillPlacer->finish(); 1161 1162 // No live bundles, defer to splitSingleBlocks(). 1163 if (!Cand.LiveBundles.any()) { 1164 DEBUG(dbgs() << " no bundles.\n"); 1165 continue; 1166 } 1167 1168 Cost += calcGlobalSplitCost(Cand); 1169 DEBUG({ 1170 dbgs() << ", total = " << Cost << " with bundles"; 1171 for (int i = Cand.LiveBundles.find_first(); i>=0; 1172 i = Cand.LiveBundles.find_next(i)) 1173 dbgs() << " EB#" << i; 1174 dbgs() << ".\n"; 1175 }); 1176 if (Cost < BestCost) { 1177 BestCand = NumCands; 1178 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1179 } 1180 ++NumCands; 1181 } 1182 1183 // No solutions found, fall back to single block splitting. 1184 if (!HasCompact && BestCand == NoCand) 1185 return 0; 1186 1187 // Prepare split editor. 1188 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1189 SE->reset(LREdit, SplitSpillMode); 1190 1191 // Assign all edge bundles to the preferred candidate, or NoCand. 1192 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1193 1194 // Assign bundles for the best candidate region. 1195 if (BestCand != NoCand) { 1196 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1197 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1198 UsedCands.push_back(BestCand); 1199 Cand.IntvIdx = SE->openIntv(); 1200 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1201 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1202 (void)B; 1203 } 1204 } 1205 1206 // Assign bundles for the compact region. 1207 if (HasCompact) { 1208 GlobalSplitCandidate &Cand = GlobalCand.front(); 1209 assert(!Cand.PhysReg && "Compact region has no physreg"); 1210 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1211 UsedCands.push_back(0); 1212 Cand.IntvIdx = SE->openIntv(); 1213 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1214 << Cand.IntvIdx << ".\n"); 1215 (void)B; 1216 } 1217 } 1218 1219 splitAroundRegion(LREdit, UsedCands); 1220 return 0; 1221 } 1222 1223 1224 //===----------------------------------------------------------------------===// 1225 // Per-Block Splitting 1226 //===----------------------------------------------------------------------===// 1227 1228 /// tryBlockSplit - Split a global live range around every block with uses. This 1229 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1230 /// they don't allocate. 1231 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1232 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1233 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1234 unsigned Reg = VirtReg.reg; 1235 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1236 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1237 SE->reset(LREdit, SplitSpillMode); 1238 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1239 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1240 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1241 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1242 SE->splitSingleBlock(BI); 1243 } 1244 // No blocks were split. 1245 if (LREdit.empty()) 1246 return 0; 1247 1248 // We did split for some blocks. 1249 SmallVector<unsigned, 8> IntvMap; 1250 SE->finish(&IntvMap); 1251 1252 // Tell LiveDebugVariables about the new ranges. 1253 DebugVars->splitRegister(Reg, LREdit.regs()); 1254 1255 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1256 1257 // Sort out the new intervals created by splitting. The remainder interval 1258 // goes straight to spilling, the new local ranges get to stay RS_New. 1259 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1260 LiveInterval &LI = *LREdit.get(i); 1261 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1262 setStage(LI, RS_Spill); 1263 } 1264 1265 if (VerifyEnabled) 1266 MF->verify(this, "After splitting live range around basic blocks"); 1267 return 0; 1268 } 1269 1270 1271 //===----------------------------------------------------------------------===// 1272 // Per-Instruction Splitting 1273 //===----------------------------------------------------------------------===// 1274 1275 /// tryInstructionSplit - Split a live range around individual instructions. 1276 /// This is normally not worthwhile since the spiller is doing essentially the 1277 /// same thing. However, when the live range is in a constrained register 1278 /// class, it may help to insert copies such that parts of the live range can 1279 /// be moved to a larger register class. 1280 /// 1281 /// This is similar to spilling to a larger register class. 1282 unsigned 1283 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1284 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1285 // There is no point to this if there are no larger sub-classes. 1286 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1287 return 0; 1288 1289 // Always enable split spill mode, since we're effectively spilling to a 1290 // register. 1291 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1292 SE->reset(LREdit, SplitEditor::SM_Size); 1293 1294 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1295 if (Uses.size() <= 1) 1296 return 0; 1297 1298 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1299 1300 // Split around every non-copy instruction. 1301 for (unsigned i = 0; i != Uses.size(); ++i) { 1302 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1303 if (MI->isFullCopy()) { 1304 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1305 continue; 1306 } 1307 SE->openIntv(); 1308 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1309 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1310 SE->useIntv(SegStart, SegStop); 1311 } 1312 1313 if (LREdit.empty()) { 1314 DEBUG(dbgs() << "All uses were copies.\n"); 1315 return 0; 1316 } 1317 1318 SmallVector<unsigned, 8> IntvMap; 1319 SE->finish(&IntvMap); 1320 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1321 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1322 1323 // Assign all new registers to RS_Spill. This was the last chance. 1324 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1325 return 0; 1326 } 1327 1328 1329 //===----------------------------------------------------------------------===// 1330 // Local Splitting 1331 //===----------------------------------------------------------------------===// 1332 1333 1334 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1335 /// in order to use PhysReg between two entries in SA->UseSlots. 1336 /// 1337 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1338 /// 1339 void RAGreedy::calcGapWeights(unsigned PhysReg, 1340 SmallVectorImpl<float> &GapWeight) { 1341 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1342 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1343 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1344 const unsigned NumGaps = Uses.size()-1; 1345 1346 // Start and end points for the interference check. 1347 SlotIndex StartIdx = 1348 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1349 SlotIndex StopIdx = 1350 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1351 1352 GapWeight.assign(NumGaps, 0.0f); 1353 1354 // Add interference from each overlapping register. 1355 for (const uint16_t *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { 1356 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1357 .checkInterference()) 1358 continue; 1359 1360 // We know that VirtReg is a continuous interval from FirstInstr to 1361 // LastInstr, so we don't need InterferenceQuery. 1362 // 1363 // Interference that overlaps an instruction is counted in both gaps 1364 // surrounding the instruction. The exception is interference before 1365 // StartIdx and after StopIdx. 1366 // 1367 LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx); 1368 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1369 // Skip the gaps before IntI. 1370 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1371 if (++Gap == NumGaps) 1372 break; 1373 if (Gap == NumGaps) 1374 break; 1375 1376 // Update the gaps covered by IntI. 1377 const float weight = IntI.value()->weight; 1378 for (; Gap != NumGaps; ++Gap) { 1379 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1380 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1381 break; 1382 } 1383 if (Gap == NumGaps) 1384 break; 1385 } 1386 } 1387 } 1388 1389 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1390 /// basic block. 1391 /// 1392 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1393 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1394 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1395 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1396 1397 // Note that it is possible to have an interval that is live-in or live-out 1398 // while only covering a single block - A phi-def can use undef values from 1399 // predecessors, and the block could be a single-block loop. 1400 // We don't bother doing anything clever about such a case, we simply assume 1401 // that the interval is continuous from FirstInstr to LastInstr. We should 1402 // make sure that we don't do anything illegal to such an interval, though. 1403 1404 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1405 if (Uses.size() <= 2) 1406 return 0; 1407 const unsigned NumGaps = Uses.size()-1; 1408 1409 DEBUG({ 1410 dbgs() << "tryLocalSplit: "; 1411 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1412 dbgs() << ' ' << Uses[i]; 1413 dbgs() << '\n'; 1414 }); 1415 1416 // If VirtReg is live across any register mask operands, compute a list of 1417 // gaps with register masks. 1418 SmallVector<unsigned, 8> RegMaskGaps; 1419 if (!UsableRegs.empty()) { 1420 // Get regmask slots for the whole block. 1421 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1422 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1423 // Constrain to VirtReg's live range. 1424 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1425 Uses.front().getRegSlot()) - RMS.begin(); 1426 unsigned re = RMS.size(); 1427 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1428 // Look for Uses[i] <= RMS <= Uses[i+1]. 1429 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1430 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1431 continue; 1432 // Skip a regmask on the same instruction as the last use. It doesn't 1433 // overlap the live range. 1434 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1435 break; 1436 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1437 RegMaskGaps.push_back(i); 1438 // Advance ri to the next gap. A regmask on one of the uses counts in 1439 // both gaps. 1440 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1441 ++ri; 1442 } 1443 DEBUG(dbgs() << '\n'); 1444 } 1445 1446 // Since we allow local split results to be split again, there is a risk of 1447 // creating infinite loops. It is tempting to require that the new live 1448 // ranges have less instructions than the original. That would guarantee 1449 // convergence, but it is too strict. A live range with 3 instructions can be 1450 // split 2+3 (including the COPY), and we want to allow that. 1451 // 1452 // Instead we use these rules: 1453 // 1454 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1455 // noop split, of course). 1456 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1457 // the new ranges must have fewer instructions than before the split. 1458 // 3. New ranges with the same number of instructions are marked RS_Split2, 1459 // smaller ranges are marked RS_New. 1460 // 1461 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1462 // excessive splitting and infinite loops. 1463 // 1464 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1465 1466 // Best split candidate. 1467 unsigned BestBefore = NumGaps; 1468 unsigned BestAfter = 0; 1469 float BestDiff = 0; 1470 1471 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1472 SmallVector<float, 8> GapWeight; 1473 1474 Order.rewind(); 1475 while (unsigned PhysReg = Order.next()) { 1476 // Keep track of the largest spill weight that would need to be evicted in 1477 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1478 calcGapWeights(PhysReg, GapWeight); 1479 1480 // Remove any gaps with regmask clobbers. 1481 if (clobberedByRegMask(PhysReg)) 1482 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1483 GapWeight[RegMaskGaps[i]] = HUGE_VALF; 1484 1485 // Try to find the best sequence of gaps to close. 1486 // The new spill weight must be larger than any gap interference. 1487 1488 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1489 unsigned SplitBefore = 0, SplitAfter = 1; 1490 1491 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1492 // It is the spill weight that needs to be evicted. 1493 float MaxGap = GapWeight[0]; 1494 1495 for (;;) { 1496 // Live before/after split? 1497 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1498 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1499 1500 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1501 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1502 << " i=" << MaxGap); 1503 1504 // Stop before the interval gets so big we wouldn't be making progress. 1505 if (!LiveBefore && !LiveAfter) { 1506 DEBUG(dbgs() << " all\n"); 1507 break; 1508 } 1509 // Should the interval be extended or shrunk? 1510 bool Shrink = true; 1511 1512 // How many gaps would the new range have? 1513 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1514 1515 // Legally, without causing looping? 1516 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1517 1518 if (Legal && MaxGap < HUGE_VALF) { 1519 // Estimate the new spill weight. Each instruction reads or writes the 1520 // register. Conservatively assume there are no read-modify-write 1521 // instructions. 1522 // 1523 // Try to guess the size of the new interval. 1524 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1525 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1526 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1527 // Would this split be possible to allocate? 1528 // Never allocate all gaps, we wouldn't be making progress. 1529 DEBUG(dbgs() << " w=" << EstWeight); 1530 if (EstWeight * Hysteresis >= MaxGap) { 1531 Shrink = false; 1532 float Diff = EstWeight - MaxGap; 1533 if (Diff > BestDiff) { 1534 DEBUG(dbgs() << " (best)"); 1535 BestDiff = Hysteresis * Diff; 1536 BestBefore = SplitBefore; 1537 BestAfter = SplitAfter; 1538 } 1539 } 1540 } 1541 1542 // Try to shrink. 1543 if (Shrink) { 1544 if (++SplitBefore < SplitAfter) { 1545 DEBUG(dbgs() << " shrink\n"); 1546 // Recompute the max when necessary. 1547 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1548 MaxGap = GapWeight[SplitBefore]; 1549 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1550 MaxGap = std::max(MaxGap, GapWeight[i]); 1551 } 1552 continue; 1553 } 1554 MaxGap = 0; 1555 } 1556 1557 // Try to extend the interval. 1558 if (SplitAfter >= NumGaps) { 1559 DEBUG(dbgs() << " end\n"); 1560 break; 1561 } 1562 1563 DEBUG(dbgs() << " extend\n"); 1564 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1565 } 1566 } 1567 1568 // Didn't find any candidates? 1569 if (BestBefore == NumGaps) 1570 return 0; 1571 1572 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1573 << '-' << Uses[BestAfter] << ", " << BestDiff 1574 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1575 1576 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1577 SE->reset(LREdit); 1578 1579 SE->openIntv(); 1580 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1581 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1582 SE->useIntv(SegStart, SegStop); 1583 SmallVector<unsigned, 8> IntvMap; 1584 SE->finish(&IntvMap); 1585 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1586 1587 // If the new range has the same number of instructions as before, mark it as 1588 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1589 // leave the new intervals as RS_New so they can compete. 1590 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1591 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1592 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1593 if (NewGaps >= NumGaps) { 1594 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1595 assert(!ProgressRequired && "Didn't make progress when it was required."); 1596 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1597 if (IntvMap[i] == 1) { 1598 setStage(*LREdit.get(i), RS_Split2); 1599 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1600 } 1601 DEBUG(dbgs() << '\n'); 1602 } 1603 ++NumLocalSplits; 1604 1605 return 0; 1606 } 1607 1608 //===----------------------------------------------------------------------===// 1609 // Live Range Splitting 1610 //===----------------------------------------------------------------------===// 1611 1612 /// trySplit - Try to split VirtReg or one of its interferences, making it 1613 /// assignable. 1614 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1615 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1616 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1617 // Ranges must be Split2 or less. 1618 if (getStage(VirtReg) >= RS_Spill) 1619 return 0; 1620 1621 // Local intervals are handled separately. 1622 if (LIS->intervalIsInOneMBB(VirtReg)) { 1623 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1624 SA->analyze(&VirtReg); 1625 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1626 if (PhysReg || !NewVRegs.empty()) 1627 return PhysReg; 1628 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1629 } 1630 1631 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1632 1633 SA->analyze(&VirtReg); 1634 1635 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1636 // coalescer. That may cause the range to become allocatable which means that 1637 // tryRegionSplit won't be making progress. This check should be replaced with 1638 // an assertion when the coalescer is fixed. 1639 if (SA->didRepairRange()) { 1640 // VirtReg has changed, so all cached queries are invalid. 1641 invalidateVirtRegs(); 1642 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1643 return PhysReg; 1644 } 1645 1646 // First try to split around a region spanning multiple blocks. RS_Split2 1647 // ranges already made dubious progress with region splitting, so they go 1648 // straight to single block splitting. 1649 if (getStage(VirtReg) < RS_Split2) { 1650 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1651 if (PhysReg || !NewVRegs.empty()) 1652 return PhysReg; 1653 } 1654 1655 // Then isolate blocks. 1656 return tryBlockSplit(VirtReg, Order, NewVRegs); 1657 } 1658 1659 1660 //===----------------------------------------------------------------------===// 1661 // Main Entry Point 1662 //===----------------------------------------------------------------------===// 1663 1664 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1665 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1666 // Check if VirtReg is live across any calls. 1667 UsableRegs.clear(); 1668 if (LIS->checkRegMaskInterference(VirtReg, UsableRegs)) 1669 DEBUG(dbgs() << "Live across regmasks.\n"); 1670 1671 // First try assigning a free register. 1672 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1673 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1674 return PhysReg; 1675 1676 LiveRangeStage Stage = getStage(VirtReg); 1677 DEBUG(dbgs() << StageName[Stage] 1678 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1679 1680 // Try to evict a less worthy live range, but only for ranges from the primary 1681 // queue. The RS_Split ranges already failed to do this, and they should not 1682 // get a second chance until they have been split. 1683 if (Stage != RS_Split) 1684 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1685 return PhysReg; 1686 1687 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1688 1689 // The first time we see a live range, don't try to split or spill. 1690 // Wait until the second time, when all smaller ranges have been allocated. 1691 // This gives a better picture of the interference to split around. 1692 if (Stage < RS_Split) { 1693 setStage(VirtReg, RS_Split); 1694 DEBUG(dbgs() << "wait for second round\n"); 1695 NewVRegs.push_back(&VirtReg); 1696 return 0; 1697 } 1698 1699 // If we couldn't allocate a register from spilling, there is probably some 1700 // invalid inline assembly. The base class wil report it. 1701 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1702 return ~0u; 1703 1704 // Try splitting VirtReg or interferences. 1705 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1706 if (PhysReg || !NewVRegs.empty()) 1707 return PhysReg; 1708 1709 // Finally spill VirtReg itself. 1710 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1711 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1712 spiller().spill(LRE); 1713 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1714 1715 if (VerifyEnabled) 1716 MF->verify(this, "After spilling"); 1717 1718 // The live virtual register requesting allocation was spilled, so tell 1719 // the caller not to allocate anything during this round. 1720 return 0; 1721 } 1722 1723 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1724 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1725 << "********** Function: " 1726 << ((Value*)mf.getFunction())->getName() << '\n'); 1727 1728 MF = &mf; 1729 if (VerifyEnabled) 1730 MF->verify(this, "Before greedy register allocator"); 1731 1732 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1733 Indexes = &getAnalysis<SlotIndexes>(); 1734 DomTree = &getAnalysis<MachineDominatorTree>(); 1735 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1736 Loops = &getAnalysis<MachineLoopInfo>(); 1737 Bundles = &getAnalysis<EdgeBundles>(); 1738 SpillPlacer = &getAnalysis<SpillPlacement>(); 1739 DebugVars = &getAnalysis<LiveDebugVariables>(); 1740 1741 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1742 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1743 ExtraRegInfo.clear(); 1744 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1745 NextCascade = 1; 1746 IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI); 1747 GlobalCand.resize(32); // This will grow as needed. 1748 1749 allocatePhysRegs(); 1750 addMBBLiveIns(MF); 1751 LIS->addKillFlags(); 1752 1753 // Run rewriter 1754 { 1755 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1756 VRM->rewrite(Indexes); 1757 } 1758 1759 // Write out new DBG_VALUE instructions. 1760 { 1761 NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled); 1762 DebugVars->emitDebugValues(VRM); 1763 } 1764 1765 // All machine operands and other references to virtual registers have been 1766 // replaced. Remove the virtual registers and release all the transient data. 1767 VRM->clearAllVirt(); 1768 MRI->clearVirtRegs(); 1769 releaseMemory(); 1770 1771 return true; 1772 } 1773