1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "AllocationOrder.h" 17 #include "InterferenceCache.h" 18 #include "LiveDebugVariables.h" 19 #include "RegAllocBase.h" 20 #include "Spiller.h" 21 #include "SpillPlacement.h" 22 #include "SplitKit.h" 23 #include "VirtRegMap.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/Function.h" 27 #include "llvm/PassAnalysisSupport.h" 28 #include "llvm/CodeGen/CalcSpillWeights.h" 29 #include "llvm/CodeGen/EdgeBundles.h" 30 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 31 #include "llvm/CodeGen/LiveRangeEdit.h" 32 #include "llvm/CodeGen/LiveStackAnalysis.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/Passes.h" 38 #include "llvm/CodeGen/RegAllocRegistry.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/raw_ostream.h" 44 #include "llvm/Support/Timer.h" 45 46 #include <queue> 47 48 using namespace llvm; 49 50 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52 STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54 static cl::opt<SplitEditor::ComplementSpillMode> 55 SplitSpillMode("split-spill-mode", cl::Hidden, 56 cl::desc("Spill mode for splitting live ranges"), 57 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 58 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 59 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 60 clEnumValEnd), 61 cl::init(SplitEditor::SM_Partition)); 62 63 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 64 createGreedyRegisterAllocator); 65 66 namespace { 67 class RAGreedy : public MachineFunctionPass, 68 public RegAllocBase, 69 private LiveRangeEdit::Delegate { 70 71 // context 72 MachineFunction *MF; 73 74 // analyses 75 SlotIndexes *Indexes; 76 MachineDominatorTree *DomTree; 77 MachineLoopInfo *Loops; 78 EdgeBundles *Bundles; 79 SpillPlacement *SpillPlacer; 80 LiveDebugVariables *DebugVars; 81 82 // state 83 std::auto_ptr<Spiller> SpillerInstance; 84 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 85 unsigned NextCascade; 86 87 // Live ranges pass through a number of stages as we try to allocate them. 88 // Some of the stages may also create new live ranges: 89 // 90 // - Region splitting. 91 // - Per-block splitting. 92 // - Local splitting. 93 // - Spilling. 94 // 95 // Ranges produced by one of the stages skip the previous stages when they are 96 // dequeued. This improves performance because we can skip interference checks 97 // that are unlikely to give any results. It also guarantees that the live 98 // range splitting algorithm terminates, something that is otherwise hard to 99 // ensure. 100 enum LiveRangeStage { 101 /// Newly created live range that has never been queued. 102 RS_New, 103 104 /// Only attempt assignment and eviction. Then requeue as RS_Split. 105 RS_Assign, 106 107 /// Attempt live range splitting if assignment is impossible. 108 RS_Split, 109 110 /// Attempt more aggressive live range splitting that is guaranteed to make 111 /// progress. This is used for split products that may not be making 112 /// progress. 113 RS_Split2, 114 115 /// Live range will be spilled. No more splitting will be attempted. 116 RS_Spill, 117 118 /// There is nothing more we can do to this live range. Abort compilation 119 /// if it can't be assigned. 120 RS_Done 121 }; 122 123 static const char *const StageName[]; 124 125 // RegInfo - Keep additional information about each live range. 126 struct RegInfo { 127 LiveRangeStage Stage; 128 129 // Cascade - Eviction loop prevention. See canEvictInterference(). 130 unsigned Cascade; 131 132 RegInfo() : Stage(RS_New), Cascade(0) {} 133 }; 134 135 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 136 137 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 138 return ExtraRegInfo[VirtReg.reg].Stage; 139 } 140 141 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 142 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 143 ExtraRegInfo[VirtReg.reg].Stage = Stage; 144 } 145 146 template<typename Iterator> 147 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 148 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 149 for (;Begin != End; ++Begin) { 150 unsigned Reg = (*Begin)->reg; 151 if (ExtraRegInfo[Reg].Stage == RS_New) 152 ExtraRegInfo[Reg].Stage = NewStage; 153 } 154 } 155 156 /// Cost of evicting interference. 157 struct EvictionCost { 158 unsigned BrokenHints; ///< Total number of broken hints. 159 float MaxWeight; ///< Maximum spill weight evicted. 160 161 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 162 163 bool operator<(const EvictionCost &O) const { 164 if (BrokenHints != O.BrokenHints) 165 return BrokenHints < O.BrokenHints; 166 return MaxWeight < O.MaxWeight; 167 } 168 }; 169 170 // Register mask interference. The current VirtReg is checked for register 171 // mask interference on entry to selectOrSplit(). If there is no 172 // interference, UsableRegs is left empty. If there is interference, 173 // UsableRegs has a bit mask of registers that can be used without register 174 // mask interference. 175 BitVector UsableRegs; 176 177 /// clobberedByRegMask - Returns true if PhysReg is not directly usable 178 /// because of register mask clobbers. 179 bool clobberedByRegMask(unsigned PhysReg) const { 180 return !UsableRegs.empty() && !UsableRegs.test(PhysReg); 181 } 182 183 // splitting state. 184 std::auto_ptr<SplitAnalysis> SA; 185 std::auto_ptr<SplitEditor> SE; 186 187 /// Cached per-block interference maps 188 InterferenceCache IntfCache; 189 190 /// All basic blocks where the current register has uses. 191 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 192 193 /// Global live range splitting candidate info. 194 struct GlobalSplitCandidate { 195 // Register intended for assignment, or 0. 196 unsigned PhysReg; 197 198 // SplitKit interval index for this candidate. 199 unsigned IntvIdx; 200 201 // Interference for PhysReg. 202 InterferenceCache::Cursor Intf; 203 204 // Bundles where this candidate should be live. 205 BitVector LiveBundles; 206 SmallVector<unsigned, 8> ActiveBlocks; 207 208 void reset(InterferenceCache &Cache, unsigned Reg) { 209 PhysReg = Reg; 210 IntvIdx = 0; 211 Intf.setPhysReg(Cache, Reg); 212 LiveBundles.clear(); 213 ActiveBlocks.clear(); 214 } 215 216 // Set B[i] = C for every live bundle where B[i] was NoCand. 217 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 218 unsigned Count = 0; 219 for (int i = LiveBundles.find_first(); i >= 0; 220 i = LiveBundles.find_next(i)) 221 if (B[i] == NoCand) { 222 B[i] = C; 223 Count++; 224 } 225 return Count; 226 } 227 }; 228 229 /// Candidate info for for each PhysReg in AllocationOrder. 230 /// This vector never shrinks, but grows to the size of the largest register 231 /// class. 232 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 233 234 enum { NoCand = ~0u }; 235 236 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 237 /// NoCand which indicates the stack interval. 238 SmallVector<unsigned, 32> BundleCand; 239 240 public: 241 RAGreedy(); 242 243 /// Return the pass name. 244 virtual const char* getPassName() const { 245 return "Greedy Register Allocator"; 246 } 247 248 /// RAGreedy analysis usage. 249 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 250 virtual void releaseMemory(); 251 virtual Spiller &spiller() { return *SpillerInstance; } 252 virtual void enqueue(LiveInterval *LI); 253 virtual LiveInterval *dequeue(); 254 virtual unsigned selectOrSplit(LiveInterval&, 255 SmallVectorImpl<LiveInterval*>&); 256 257 /// Perform register allocation. 258 virtual bool runOnMachineFunction(MachineFunction &mf); 259 260 static char ID; 261 262 private: 263 bool LRE_CanEraseVirtReg(unsigned); 264 void LRE_WillShrinkVirtReg(unsigned); 265 void LRE_DidCloneVirtReg(unsigned, unsigned); 266 267 float calcSpillCost(); 268 bool addSplitConstraints(InterferenceCache::Cursor, float&); 269 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 270 void growRegion(GlobalSplitCandidate &Cand); 271 float calcGlobalSplitCost(GlobalSplitCandidate&); 272 bool calcCompactRegion(GlobalSplitCandidate&); 273 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 274 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 275 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 276 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 277 void evictInterference(LiveInterval&, unsigned, 278 SmallVectorImpl<LiveInterval*>&); 279 280 unsigned tryAssign(LiveInterval&, AllocationOrder&, 281 SmallVectorImpl<LiveInterval*>&); 282 unsigned tryEvict(LiveInterval&, AllocationOrder&, 283 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 284 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 285 SmallVectorImpl<LiveInterval*>&); 286 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 287 SmallVectorImpl<LiveInterval*>&); 288 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 289 SmallVectorImpl<LiveInterval*>&); 290 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 291 SmallVectorImpl<LiveInterval*>&); 292 unsigned trySplit(LiveInterval&, AllocationOrder&, 293 SmallVectorImpl<LiveInterval*>&); 294 }; 295 } // end anonymous namespace 296 297 char RAGreedy::ID = 0; 298 299 #ifndef NDEBUG 300 const char *const RAGreedy::StageName[] = { 301 "RS_New", 302 "RS_Assign", 303 "RS_Split", 304 "RS_Split2", 305 "RS_Spill", 306 "RS_Done" 307 }; 308 #endif 309 310 // Hysteresis to use when comparing floats. 311 // This helps stabilize decisions based on float comparisons. 312 const float Hysteresis = 0.98f; 313 314 315 FunctionPass* llvm::createGreedyRegisterAllocator() { 316 return new RAGreedy(); 317 } 318 319 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 320 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 321 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 322 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 323 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 324 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 325 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 326 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 327 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 328 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 329 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 330 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 331 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 332 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 333 } 334 335 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 336 AU.setPreservesCFG(); 337 AU.addRequired<AliasAnalysis>(); 338 AU.addPreserved<AliasAnalysis>(); 339 AU.addRequired<LiveIntervals>(); 340 AU.addPreserved<LiveIntervals>(); 341 AU.addRequired<SlotIndexes>(); 342 AU.addPreserved<SlotIndexes>(); 343 AU.addRequired<LiveDebugVariables>(); 344 AU.addPreserved<LiveDebugVariables>(); 345 AU.addRequired<CalculateSpillWeights>(); 346 AU.addRequired<LiveStacks>(); 347 AU.addPreserved<LiveStacks>(); 348 AU.addRequired<MachineDominatorTree>(); 349 AU.addPreserved<MachineDominatorTree>(); 350 AU.addRequired<MachineLoopInfo>(); 351 AU.addPreserved<MachineLoopInfo>(); 352 AU.addRequired<VirtRegMap>(); 353 AU.addPreserved<VirtRegMap>(); 354 AU.addRequired<EdgeBundles>(); 355 AU.addRequired<SpillPlacement>(); 356 MachineFunctionPass::getAnalysisUsage(AU); 357 } 358 359 360 //===----------------------------------------------------------------------===// 361 // LiveRangeEdit delegate methods 362 //===----------------------------------------------------------------------===// 363 364 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 365 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 366 unassign(LIS->getInterval(VirtReg), PhysReg); 367 return true; 368 } 369 // Unassigned virtreg is probably in the priority queue. 370 // RegAllocBase will erase it after dequeueing. 371 return false; 372 } 373 374 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 375 unsigned PhysReg = VRM->getPhys(VirtReg); 376 if (!PhysReg) 377 return; 378 379 // Register is assigned, put it back on the queue for reassignment. 380 LiveInterval &LI = LIS->getInterval(VirtReg); 381 unassign(LI, PhysReg); 382 enqueue(&LI); 383 } 384 385 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 386 // Cloning a register we haven't even heard about yet? Just ignore it. 387 if (!ExtraRegInfo.inBounds(Old)) 388 return; 389 390 // LRE may clone a virtual register because dead code elimination causes it to 391 // be split into connected components. The new components are much smaller 392 // than the original, so they should get a new chance at being assigned. 393 // same stage as the parent. 394 ExtraRegInfo[Old].Stage = RS_Assign; 395 ExtraRegInfo.grow(New); 396 ExtraRegInfo[New] = ExtraRegInfo[Old]; 397 } 398 399 void RAGreedy::releaseMemory() { 400 SpillerInstance.reset(0); 401 ExtraRegInfo.clear(); 402 GlobalCand.clear(); 403 RegAllocBase::releaseMemory(); 404 } 405 406 void RAGreedy::enqueue(LiveInterval *LI) { 407 // Prioritize live ranges by size, assigning larger ranges first. 408 // The queue holds (size, reg) pairs. 409 const unsigned Size = LI->getSize(); 410 const unsigned Reg = LI->reg; 411 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 412 "Can only enqueue virtual registers"); 413 unsigned Prio; 414 415 ExtraRegInfo.grow(Reg); 416 if (ExtraRegInfo[Reg].Stage == RS_New) 417 ExtraRegInfo[Reg].Stage = RS_Assign; 418 419 if (ExtraRegInfo[Reg].Stage == RS_Split) { 420 // Unsplit ranges that couldn't be allocated immediately are deferred until 421 // everything else has been allocated. 422 Prio = Size; 423 } else { 424 // Everything is allocated in long->short order. Long ranges that don't fit 425 // should be spilled (or split) ASAP so they don't create interference. 426 Prio = (1u << 31) + Size; 427 428 // Boost ranges that have a physical register hint. 429 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 430 Prio |= (1u << 30); 431 } 432 433 Queue.push(std::make_pair(Prio, ~Reg)); 434 } 435 436 LiveInterval *RAGreedy::dequeue() { 437 if (Queue.empty()) 438 return 0; 439 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 440 Queue.pop(); 441 return LI; 442 } 443 444 445 //===----------------------------------------------------------------------===// 446 // Direct Assignment 447 //===----------------------------------------------------------------------===// 448 449 /// tryAssign - Try to assign VirtReg to an available register. 450 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 451 AllocationOrder &Order, 452 SmallVectorImpl<LiveInterval*> &NewVRegs) { 453 Order.rewind(); 454 unsigned PhysReg; 455 while ((PhysReg = Order.next())) { 456 if (clobberedByRegMask(PhysReg)) 457 continue; 458 if (!checkPhysRegInterference(VirtReg, PhysReg)) 459 break; 460 } 461 if (!PhysReg || Order.isHint(PhysReg)) 462 return PhysReg; 463 464 // PhysReg is available, but there may be a better choice. 465 466 // If we missed a simple hint, try to cheaply evict interference from the 467 // preferred register. 468 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 469 if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) { 470 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 471 EvictionCost MaxCost(1); 472 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 473 evictInterference(VirtReg, Hint, NewVRegs); 474 return Hint; 475 } 476 } 477 478 // Try to evict interference from a cheaper alternative. 479 unsigned Cost = TRI->getCostPerUse(PhysReg); 480 481 // Most registers have 0 additional cost. 482 if (!Cost) 483 return PhysReg; 484 485 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 486 << '\n'); 487 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 488 return CheapReg ? CheapReg : PhysReg; 489 } 490 491 492 //===----------------------------------------------------------------------===// 493 // Interference eviction 494 //===----------------------------------------------------------------------===// 495 496 /// shouldEvict - determine if A should evict the assigned live range B. The 497 /// eviction policy defined by this function together with the allocation order 498 /// defined by enqueue() decides which registers ultimately end up being split 499 /// and spilled. 500 /// 501 /// Cascade numbers are used to prevent infinite loops if this function is a 502 /// cyclic relation. 503 /// 504 /// @param A The live range to be assigned. 505 /// @param IsHint True when A is about to be assigned to its preferred 506 /// register. 507 /// @param B The live range to be evicted. 508 /// @param BreaksHint True when B is already assigned to its preferred register. 509 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 510 LiveInterval &B, bool BreaksHint) { 511 bool CanSplit = getStage(B) < RS_Spill; 512 513 // Be fairly aggressive about following hints as long as the evictee can be 514 // split. 515 if (CanSplit && IsHint && !BreaksHint) 516 return true; 517 518 return A.weight > B.weight; 519 } 520 521 /// canEvictInterference - Return true if all interferences between VirtReg and 522 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 523 /// 524 /// @param VirtReg Live range that is about to be assigned. 525 /// @param PhysReg Desired register for assignment. 526 /// @prarm IsHint True when PhysReg is VirtReg's preferred register. 527 /// @param MaxCost Only look for cheaper candidates and update with new cost 528 /// when returning true. 529 /// @returns True when interference can be evicted cheaper than MaxCost. 530 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 531 bool IsHint, EvictionCost &MaxCost) { 532 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 533 // involved in an eviction before. If a cascade number was assigned, deny 534 // evicting anything with the same or a newer cascade number. This prevents 535 // infinite eviction loops. 536 // 537 // This works out so a register without a cascade number is allowed to evict 538 // anything, and it can be evicted by anything. 539 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 540 if (!Cascade) 541 Cascade = NextCascade; 542 543 EvictionCost Cost; 544 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) { 545 LiveIntervalUnion::Query &Q = query(VirtReg, *AI); 546 // If there is 10 or more interferences, chances are one is heavier. 547 if (Q.collectInterferingVRegs(10) >= 10) 548 return false; 549 550 // Check if any interfering live range is heavier than MaxWeight. 551 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 552 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 553 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 554 return false; 555 // Never evict spill products. They cannot split or spill. 556 if (getStage(*Intf) == RS_Done) 557 return false; 558 // Once a live range becomes small enough, it is urgent that we find a 559 // register for it. This is indicated by an infinite spill weight. These 560 // urgent live ranges get to evict almost anything. 561 // 562 // Also allow urgent evictions of unspillable ranges from a strictly 563 // larger allocation order. 564 bool Urgent = !VirtReg.isSpillable() && 565 (Intf->isSpillable() || 566 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 567 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 568 // Only evict older cascades or live ranges without a cascade. 569 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 570 if (Cascade <= IntfCascade) { 571 if (!Urgent) 572 return false; 573 // We permit breaking cascades for urgent evictions. It should be the 574 // last resort, though, so make it really expensive. 575 Cost.BrokenHints += 10; 576 } 577 // Would this break a satisfied hint? 578 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 579 // Update eviction cost. 580 Cost.BrokenHints += BreaksHint; 581 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 582 // Abort if this would be too expensive. 583 if (!(Cost < MaxCost)) 584 return false; 585 // Finally, apply the eviction policy for non-urgent evictions. 586 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 587 return false; 588 } 589 } 590 MaxCost = Cost; 591 return true; 592 } 593 594 /// evictInterference - Evict any interferring registers that prevent VirtReg 595 /// from being assigned to Physreg. This assumes that canEvictInterference 596 /// returned true. 597 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 598 SmallVectorImpl<LiveInterval*> &NewVRegs) { 599 // Make sure that VirtReg has a cascade number, and assign that cascade 600 // number to every evicted register. These live ranges than then only be 601 // evicted by a newer cascade, preventing infinite loops. 602 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 603 if (!Cascade) 604 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 605 606 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 607 << " interference: Cascade " << Cascade << '\n'); 608 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) { 609 LiveIntervalUnion::Query &Q = query(VirtReg, *AI); 610 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 611 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 612 LiveInterval *Intf = Q.interferingVRegs()[i]; 613 unassign(*Intf, VRM->getPhys(Intf->reg)); 614 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 615 VirtReg.isSpillable() < Intf->isSpillable()) && 616 "Cannot decrease cascade number, illegal eviction"); 617 ExtraRegInfo[Intf->reg].Cascade = Cascade; 618 ++NumEvicted; 619 NewVRegs.push_back(Intf); 620 } 621 } 622 } 623 624 /// tryEvict - Try to evict all interferences for a physreg. 625 /// @param VirtReg Currently unassigned virtual register. 626 /// @param Order Physregs to try. 627 /// @return Physreg to assign VirtReg, or 0. 628 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 629 AllocationOrder &Order, 630 SmallVectorImpl<LiveInterval*> &NewVRegs, 631 unsigned CostPerUseLimit) { 632 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 633 634 // Keep track of the cheapest interference seen so far. 635 EvictionCost BestCost(~0u); 636 unsigned BestPhys = 0; 637 638 // When we are just looking for a reduced cost per use, don't break any 639 // hints, and only evict smaller spill weights. 640 if (CostPerUseLimit < ~0u) { 641 BestCost.BrokenHints = 0; 642 BestCost.MaxWeight = VirtReg.weight; 643 } 644 645 Order.rewind(); 646 while (unsigned PhysReg = Order.next()) { 647 if (clobberedByRegMask(PhysReg)) 648 continue; 649 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 650 continue; 651 // The first use of a callee-saved register in a function has cost 1. 652 // Don't start using a CSR when the CostPerUseLimit is low. 653 if (CostPerUseLimit == 1) 654 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 655 if (!MRI->isPhysRegUsed(CSR)) { 656 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 657 << PrintReg(CSR, TRI) << '\n'); 658 continue; 659 } 660 661 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 662 continue; 663 664 // Best so far. 665 BestPhys = PhysReg; 666 667 // Stop if the hint can be used. 668 if (Order.isHint(PhysReg)) 669 break; 670 } 671 672 if (!BestPhys) 673 return 0; 674 675 evictInterference(VirtReg, BestPhys, NewVRegs); 676 return BestPhys; 677 } 678 679 680 //===----------------------------------------------------------------------===// 681 // Region Splitting 682 //===----------------------------------------------------------------------===// 683 684 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 685 /// interference pattern in Physreg and its aliases. Add the constraints to 686 /// SpillPlacement and return the static cost of this split in Cost, assuming 687 /// that all preferences in SplitConstraints are met. 688 /// Return false if there are no bundles with positive bias. 689 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 690 float &Cost) { 691 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 692 693 // Reset interference dependent info. 694 SplitConstraints.resize(UseBlocks.size()); 695 float StaticCost = 0; 696 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 697 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 698 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 699 700 BC.Number = BI.MBB->getNumber(); 701 Intf.moveToBlock(BC.Number); 702 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 703 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 704 BC.ChangesValue = BI.FirstDef; 705 706 if (!Intf.hasInterference()) 707 continue; 708 709 // Number of spill code instructions to insert. 710 unsigned Ins = 0; 711 712 // Interference for the live-in value. 713 if (BI.LiveIn) { 714 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 715 BC.Entry = SpillPlacement::MustSpill, ++Ins; 716 else if (Intf.first() < BI.FirstInstr) 717 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 718 else if (Intf.first() < BI.LastInstr) 719 ++Ins; 720 } 721 722 // Interference for the live-out value. 723 if (BI.LiveOut) { 724 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 725 BC.Exit = SpillPlacement::MustSpill, ++Ins; 726 else if (Intf.last() > BI.LastInstr) 727 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 728 else if (Intf.last() > BI.FirstInstr) 729 ++Ins; 730 } 731 732 // Accumulate the total frequency of inserted spill code. 733 if (Ins) 734 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 735 } 736 Cost = StaticCost; 737 738 // Add constraints for use-blocks. Note that these are the only constraints 739 // that may add a positive bias, it is downhill from here. 740 SpillPlacer->addConstraints(SplitConstraints); 741 return SpillPlacer->scanActiveBundles(); 742 } 743 744 745 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 746 /// live-through blocks in Blocks. 747 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 748 ArrayRef<unsigned> Blocks) { 749 const unsigned GroupSize = 8; 750 SpillPlacement::BlockConstraint BCS[GroupSize]; 751 unsigned TBS[GroupSize]; 752 unsigned B = 0, T = 0; 753 754 for (unsigned i = 0; i != Blocks.size(); ++i) { 755 unsigned Number = Blocks[i]; 756 Intf.moveToBlock(Number); 757 758 if (!Intf.hasInterference()) { 759 assert(T < GroupSize && "Array overflow"); 760 TBS[T] = Number; 761 if (++T == GroupSize) { 762 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 763 T = 0; 764 } 765 continue; 766 } 767 768 assert(B < GroupSize && "Array overflow"); 769 BCS[B].Number = Number; 770 771 // Interference for the live-in value. 772 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 773 BCS[B].Entry = SpillPlacement::MustSpill; 774 else 775 BCS[B].Entry = SpillPlacement::PrefSpill; 776 777 // Interference for the live-out value. 778 if (Intf.last() >= SA->getLastSplitPoint(Number)) 779 BCS[B].Exit = SpillPlacement::MustSpill; 780 else 781 BCS[B].Exit = SpillPlacement::PrefSpill; 782 783 if (++B == GroupSize) { 784 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 785 SpillPlacer->addConstraints(Array); 786 B = 0; 787 } 788 } 789 790 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 791 SpillPlacer->addConstraints(Array); 792 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 793 } 794 795 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 796 // Keep track of through blocks that have not been added to SpillPlacer. 797 BitVector Todo = SA->getThroughBlocks(); 798 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 799 unsigned AddedTo = 0; 800 #ifndef NDEBUG 801 unsigned Visited = 0; 802 #endif 803 804 for (;;) { 805 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 806 // Find new through blocks in the periphery of PrefRegBundles. 807 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 808 unsigned Bundle = NewBundles[i]; 809 // Look at all blocks connected to Bundle in the full graph. 810 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 811 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 812 I != E; ++I) { 813 unsigned Block = *I; 814 if (!Todo.test(Block)) 815 continue; 816 Todo.reset(Block); 817 // This is a new through block. Add it to SpillPlacer later. 818 ActiveBlocks.push_back(Block); 819 #ifndef NDEBUG 820 ++Visited; 821 #endif 822 } 823 } 824 // Any new blocks to add? 825 if (ActiveBlocks.size() == AddedTo) 826 break; 827 828 // Compute through constraints from the interference, or assume that all 829 // through blocks prefer spilling when forming compact regions. 830 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 831 if (Cand.PhysReg) 832 addThroughConstraints(Cand.Intf, NewBlocks); 833 else 834 // Provide a strong negative bias on through blocks to prevent unwanted 835 // liveness on loop backedges. 836 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 837 AddedTo = ActiveBlocks.size(); 838 839 // Perhaps iterating can enable more bundles? 840 SpillPlacer->iterate(); 841 } 842 DEBUG(dbgs() << ", v=" << Visited); 843 } 844 845 /// calcCompactRegion - Compute the set of edge bundles that should be live 846 /// when splitting the current live range into compact regions. Compact 847 /// regions can be computed without looking at interference. They are the 848 /// regions formed by removing all the live-through blocks from the live range. 849 /// 850 /// Returns false if the current live range is already compact, or if the 851 /// compact regions would form single block regions anyway. 852 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 853 // Without any through blocks, the live range is already compact. 854 if (!SA->getNumThroughBlocks()) 855 return false; 856 857 // Compact regions don't correspond to any physreg. 858 Cand.reset(IntfCache, 0); 859 860 DEBUG(dbgs() << "Compact region bundles"); 861 862 // Use the spill placer to determine the live bundles. GrowRegion pretends 863 // that all the through blocks have interference when PhysReg is unset. 864 SpillPlacer->prepare(Cand.LiveBundles); 865 866 // The static split cost will be zero since Cand.Intf reports no interference. 867 float Cost; 868 if (!addSplitConstraints(Cand.Intf, Cost)) { 869 DEBUG(dbgs() << ", none.\n"); 870 return false; 871 } 872 873 growRegion(Cand); 874 SpillPlacer->finish(); 875 876 if (!Cand.LiveBundles.any()) { 877 DEBUG(dbgs() << ", none.\n"); 878 return false; 879 } 880 881 DEBUG({ 882 for (int i = Cand.LiveBundles.find_first(); i>=0; 883 i = Cand.LiveBundles.find_next(i)) 884 dbgs() << " EB#" << i; 885 dbgs() << ".\n"; 886 }); 887 return true; 888 } 889 890 /// calcSpillCost - Compute how expensive it would be to split the live range in 891 /// SA around all use blocks instead of forming bundle regions. 892 float RAGreedy::calcSpillCost() { 893 float Cost = 0; 894 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 895 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 896 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 897 unsigned Number = BI.MBB->getNumber(); 898 // We normally only need one spill instruction - a load or a store. 899 Cost += SpillPlacer->getBlockFrequency(Number); 900 901 // Unless the value is redefined in the block. 902 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 903 Cost += SpillPlacer->getBlockFrequency(Number); 904 } 905 return Cost; 906 } 907 908 /// calcGlobalSplitCost - Return the global split cost of following the split 909 /// pattern in LiveBundles. This cost should be added to the local cost of the 910 /// interference pattern in SplitConstraints. 911 /// 912 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 913 float GlobalCost = 0; 914 const BitVector &LiveBundles = Cand.LiveBundles; 915 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 916 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 917 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 918 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 919 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 920 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 921 unsigned Ins = 0; 922 923 if (BI.LiveIn) 924 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 925 if (BI.LiveOut) 926 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 927 if (Ins) 928 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 929 } 930 931 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 932 unsigned Number = Cand.ActiveBlocks[i]; 933 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 934 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 935 if (!RegIn && !RegOut) 936 continue; 937 if (RegIn && RegOut) { 938 // We need double spill code if this block has interference. 939 Cand.Intf.moveToBlock(Number); 940 if (Cand.Intf.hasInterference()) 941 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 942 continue; 943 } 944 // live-in / stack-out or stack-in live-out. 945 GlobalCost += SpillPlacer->getBlockFrequency(Number); 946 } 947 return GlobalCost; 948 } 949 950 /// splitAroundRegion - Split the current live range around the regions 951 /// determined by BundleCand and GlobalCand. 952 /// 953 /// Before calling this function, GlobalCand and BundleCand must be initialized 954 /// so each bundle is assigned to a valid candidate, or NoCand for the 955 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 956 /// objects must be initialized for the current live range, and intervals 957 /// created for the used candidates. 958 /// 959 /// @param LREdit The LiveRangeEdit object handling the current split. 960 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 961 /// must appear in this list. 962 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 963 ArrayRef<unsigned> UsedCands) { 964 // These are the intervals created for new global ranges. We may create more 965 // intervals for local ranges. 966 const unsigned NumGlobalIntvs = LREdit.size(); 967 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 968 assert(NumGlobalIntvs && "No global intervals configured"); 969 970 // Isolate even single instructions when dealing with a proper sub-class. 971 // That guarantees register class inflation for the stack interval because it 972 // is all copies. 973 unsigned Reg = SA->getParent().reg; 974 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 975 976 // First handle all the blocks with uses. 977 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 978 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 979 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 980 unsigned Number = BI.MBB->getNumber(); 981 unsigned IntvIn = 0, IntvOut = 0; 982 SlotIndex IntfIn, IntfOut; 983 if (BI.LiveIn) { 984 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 985 if (CandIn != NoCand) { 986 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 987 IntvIn = Cand.IntvIdx; 988 Cand.Intf.moveToBlock(Number); 989 IntfIn = Cand.Intf.first(); 990 } 991 } 992 if (BI.LiveOut) { 993 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 994 if (CandOut != NoCand) { 995 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 996 IntvOut = Cand.IntvIdx; 997 Cand.Intf.moveToBlock(Number); 998 IntfOut = Cand.Intf.last(); 999 } 1000 } 1001 1002 // Create separate intervals for isolated blocks with multiple uses. 1003 if (!IntvIn && !IntvOut) { 1004 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1005 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1006 SE->splitSingleBlock(BI); 1007 continue; 1008 } 1009 1010 if (IntvIn && IntvOut) 1011 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1012 else if (IntvIn) 1013 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1014 else 1015 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1016 } 1017 1018 // Handle live-through blocks. The relevant live-through blocks are stored in 1019 // the ActiveBlocks list with each candidate. We need to filter out 1020 // duplicates. 1021 BitVector Todo = SA->getThroughBlocks(); 1022 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1023 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1024 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1025 unsigned Number = Blocks[i]; 1026 if (!Todo.test(Number)) 1027 continue; 1028 Todo.reset(Number); 1029 1030 unsigned IntvIn = 0, IntvOut = 0; 1031 SlotIndex IntfIn, IntfOut; 1032 1033 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1034 if (CandIn != NoCand) { 1035 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1036 IntvIn = Cand.IntvIdx; 1037 Cand.Intf.moveToBlock(Number); 1038 IntfIn = Cand.Intf.first(); 1039 } 1040 1041 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1042 if (CandOut != NoCand) { 1043 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1044 IntvOut = Cand.IntvIdx; 1045 Cand.Intf.moveToBlock(Number); 1046 IntfOut = Cand.Intf.last(); 1047 } 1048 if (!IntvIn && !IntvOut) 1049 continue; 1050 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1051 } 1052 } 1053 1054 ++NumGlobalSplits; 1055 1056 SmallVector<unsigned, 8> IntvMap; 1057 SE->finish(&IntvMap); 1058 DebugVars->splitRegister(Reg, LREdit.regs()); 1059 1060 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1061 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1062 1063 // Sort out the new intervals created by splitting. We get four kinds: 1064 // - Remainder intervals should not be split again. 1065 // - Candidate intervals can be assigned to Cand.PhysReg. 1066 // - Block-local splits are candidates for local splitting. 1067 // - DCE leftovers should go back on the queue. 1068 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1069 LiveInterval &Reg = *LREdit.get(i); 1070 1071 // Ignore old intervals from DCE. 1072 if (getStage(Reg) != RS_New) 1073 continue; 1074 1075 // Remainder interval. Don't try splitting again, spill if it doesn't 1076 // allocate. 1077 if (IntvMap[i] == 0) { 1078 setStage(Reg, RS_Spill); 1079 continue; 1080 } 1081 1082 // Global intervals. Allow repeated splitting as long as the number of live 1083 // blocks is strictly decreasing. 1084 if (IntvMap[i] < NumGlobalIntvs) { 1085 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1086 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1087 << " blocks as original.\n"); 1088 // Don't allow repeated splitting as a safe guard against looping. 1089 setStage(Reg, RS_Split2); 1090 } 1091 continue; 1092 } 1093 1094 // Other intervals are treated as new. This includes local intervals created 1095 // for blocks with multiple uses, and anything created by DCE. 1096 } 1097 1098 if (VerifyEnabled) 1099 MF->verify(this, "After splitting live range around region"); 1100 } 1101 1102 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1103 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1104 unsigned NumCands = 0; 1105 unsigned BestCand = NoCand; 1106 float BestCost; 1107 SmallVector<unsigned, 8> UsedCands; 1108 1109 // Check if we can split this live range around a compact region. 1110 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1111 if (HasCompact) { 1112 // Yes, keep GlobalCand[0] as the compact region candidate. 1113 NumCands = 1; 1114 BestCost = HUGE_VALF; 1115 } else { 1116 // No benefit from the compact region, our fallback will be per-block 1117 // splitting. Make sure we find a solution that is cheaper than spilling. 1118 BestCost = Hysteresis * calcSpillCost(); 1119 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1120 } 1121 1122 Order.rewind(); 1123 while (unsigned PhysReg = Order.next()) { 1124 // Discard bad candidates before we run out of interference cache cursors. 1125 // This will only affect register classes with a lot of registers (>32). 1126 if (NumCands == IntfCache.getMaxCursors()) { 1127 unsigned WorstCount = ~0u; 1128 unsigned Worst = 0; 1129 for (unsigned i = 0; i != NumCands; ++i) { 1130 if (i == BestCand || !GlobalCand[i].PhysReg) 1131 continue; 1132 unsigned Count = GlobalCand[i].LiveBundles.count(); 1133 if (Count < WorstCount) 1134 Worst = i, WorstCount = Count; 1135 } 1136 --NumCands; 1137 GlobalCand[Worst] = GlobalCand[NumCands]; 1138 if (BestCand == NumCands) 1139 BestCand = Worst; 1140 } 1141 1142 if (GlobalCand.size() <= NumCands) 1143 GlobalCand.resize(NumCands+1); 1144 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1145 Cand.reset(IntfCache, PhysReg); 1146 1147 SpillPlacer->prepare(Cand.LiveBundles); 1148 float Cost; 1149 if (!addSplitConstraints(Cand.Intf, Cost)) { 1150 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1151 continue; 1152 } 1153 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1154 if (Cost >= BestCost) { 1155 DEBUG({ 1156 if (BestCand == NoCand) 1157 dbgs() << " worse than no bundles\n"; 1158 else 1159 dbgs() << " worse than " 1160 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1161 }); 1162 continue; 1163 } 1164 growRegion(Cand); 1165 1166 SpillPlacer->finish(); 1167 1168 // No live bundles, defer to splitSingleBlocks(). 1169 if (!Cand.LiveBundles.any()) { 1170 DEBUG(dbgs() << " no bundles.\n"); 1171 continue; 1172 } 1173 1174 Cost += calcGlobalSplitCost(Cand); 1175 DEBUG({ 1176 dbgs() << ", total = " << Cost << " with bundles"; 1177 for (int i = Cand.LiveBundles.find_first(); i>=0; 1178 i = Cand.LiveBundles.find_next(i)) 1179 dbgs() << " EB#" << i; 1180 dbgs() << ".\n"; 1181 }); 1182 if (Cost < BestCost) { 1183 BestCand = NumCands; 1184 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1185 } 1186 ++NumCands; 1187 } 1188 1189 // No solutions found, fall back to single block splitting. 1190 if (!HasCompact && BestCand == NoCand) 1191 return 0; 1192 1193 // Prepare split editor. 1194 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1195 SE->reset(LREdit, SplitSpillMode); 1196 1197 // Assign all edge bundles to the preferred candidate, or NoCand. 1198 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1199 1200 // Assign bundles for the best candidate region. 1201 if (BestCand != NoCand) { 1202 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1203 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1204 UsedCands.push_back(BestCand); 1205 Cand.IntvIdx = SE->openIntv(); 1206 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1207 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1208 (void)B; 1209 } 1210 } 1211 1212 // Assign bundles for the compact region. 1213 if (HasCompact) { 1214 GlobalSplitCandidate &Cand = GlobalCand.front(); 1215 assert(!Cand.PhysReg && "Compact region has no physreg"); 1216 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1217 UsedCands.push_back(0); 1218 Cand.IntvIdx = SE->openIntv(); 1219 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1220 << Cand.IntvIdx << ".\n"); 1221 (void)B; 1222 } 1223 } 1224 1225 splitAroundRegion(LREdit, UsedCands); 1226 return 0; 1227 } 1228 1229 1230 //===----------------------------------------------------------------------===// 1231 // Per-Block Splitting 1232 //===----------------------------------------------------------------------===// 1233 1234 /// tryBlockSplit - Split a global live range around every block with uses. This 1235 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1236 /// they don't allocate. 1237 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1238 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1239 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1240 unsigned Reg = VirtReg.reg; 1241 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1242 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1243 SE->reset(LREdit, SplitSpillMode); 1244 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1245 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1246 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1247 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1248 SE->splitSingleBlock(BI); 1249 } 1250 // No blocks were split. 1251 if (LREdit.empty()) 1252 return 0; 1253 1254 // We did split for some blocks. 1255 SmallVector<unsigned, 8> IntvMap; 1256 SE->finish(&IntvMap); 1257 1258 // Tell LiveDebugVariables about the new ranges. 1259 DebugVars->splitRegister(Reg, LREdit.regs()); 1260 1261 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1262 1263 // Sort out the new intervals created by splitting. The remainder interval 1264 // goes straight to spilling, the new local ranges get to stay RS_New. 1265 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1266 LiveInterval &LI = *LREdit.get(i); 1267 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1268 setStage(LI, RS_Spill); 1269 } 1270 1271 if (VerifyEnabled) 1272 MF->verify(this, "After splitting live range around basic blocks"); 1273 return 0; 1274 } 1275 1276 1277 //===----------------------------------------------------------------------===// 1278 // Per-Instruction Splitting 1279 //===----------------------------------------------------------------------===// 1280 1281 /// tryInstructionSplit - Split a live range around individual instructions. 1282 /// This is normally not worthwhile since the spiller is doing essentially the 1283 /// same thing. However, when the live range is in a constrained register 1284 /// class, it may help to insert copies such that parts of the live range can 1285 /// be moved to a larger register class. 1286 /// 1287 /// This is similar to spilling to a larger register class. 1288 unsigned 1289 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1290 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1291 // There is no point to this if there are no larger sub-classes. 1292 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1293 return 0; 1294 1295 // Always enable split spill mode, since we're effectively spilling to a 1296 // register. 1297 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1298 SE->reset(LREdit, SplitEditor::SM_Size); 1299 1300 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1301 if (Uses.size() <= 1) 1302 return 0; 1303 1304 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1305 1306 // Split around every non-copy instruction. 1307 for (unsigned i = 0; i != Uses.size(); ++i) { 1308 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1309 if (MI->isFullCopy()) { 1310 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1311 continue; 1312 } 1313 SE->openIntv(); 1314 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1315 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1316 SE->useIntv(SegStart, SegStop); 1317 } 1318 1319 if (LREdit.empty()) { 1320 DEBUG(dbgs() << "All uses were copies.\n"); 1321 return 0; 1322 } 1323 1324 SmallVector<unsigned, 8> IntvMap; 1325 SE->finish(&IntvMap); 1326 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1327 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1328 1329 // Assign all new registers to RS_Spill. This was the last chance. 1330 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1331 return 0; 1332 } 1333 1334 1335 //===----------------------------------------------------------------------===// 1336 // Local Splitting 1337 //===----------------------------------------------------------------------===// 1338 1339 1340 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1341 /// in order to use PhysReg between two entries in SA->UseSlots. 1342 /// 1343 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1344 /// 1345 void RAGreedy::calcGapWeights(unsigned PhysReg, 1346 SmallVectorImpl<float> &GapWeight) { 1347 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1348 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1349 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1350 const unsigned NumGaps = Uses.size()-1; 1351 1352 // Start and end points for the interference check. 1353 SlotIndex StartIdx = 1354 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1355 SlotIndex StopIdx = 1356 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1357 1358 GapWeight.assign(NumGaps, 0.0f); 1359 1360 // Add interference from each overlapping register. 1361 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) { 1362 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1363 .checkInterference()) 1364 continue; 1365 1366 // We know that VirtReg is a continuous interval from FirstInstr to 1367 // LastInstr, so we don't need InterferenceQuery. 1368 // 1369 // Interference that overlaps an instruction is counted in both gaps 1370 // surrounding the instruction. The exception is interference before 1371 // StartIdx and after StopIdx. 1372 // 1373 LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx); 1374 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1375 // Skip the gaps before IntI. 1376 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1377 if (++Gap == NumGaps) 1378 break; 1379 if (Gap == NumGaps) 1380 break; 1381 1382 // Update the gaps covered by IntI. 1383 const float weight = IntI.value()->weight; 1384 for (; Gap != NumGaps; ++Gap) { 1385 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1386 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1387 break; 1388 } 1389 if (Gap == NumGaps) 1390 break; 1391 } 1392 } 1393 } 1394 1395 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1396 /// basic block. 1397 /// 1398 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1399 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1400 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1401 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1402 1403 // Note that it is possible to have an interval that is live-in or live-out 1404 // while only covering a single block - A phi-def can use undef values from 1405 // predecessors, and the block could be a single-block loop. 1406 // We don't bother doing anything clever about such a case, we simply assume 1407 // that the interval is continuous from FirstInstr to LastInstr. We should 1408 // make sure that we don't do anything illegal to such an interval, though. 1409 1410 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1411 if (Uses.size() <= 2) 1412 return 0; 1413 const unsigned NumGaps = Uses.size()-1; 1414 1415 DEBUG({ 1416 dbgs() << "tryLocalSplit: "; 1417 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1418 dbgs() << ' ' << Uses[i]; 1419 dbgs() << '\n'; 1420 }); 1421 1422 // If VirtReg is live across any register mask operands, compute a list of 1423 // gaps with register masks. 1424 SmallVector<unsigned, 8> RegMaskGaps; 1425 if (!UsableRegs.empty()) { 1426 // Get regmask slots for the whole block. 1427 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1428 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1429 // Constrain to VirtReg's live range. 1430 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1431 Uses.front().getRegSlot()) - RMS.begin(); 1432 unsigned re = RMS.size(); 1433 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1434 // Look for Uses[i] <= RMS <= Uses[i+1]. 1435 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1436 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1437 continue; 1438 // Skip a regmask on the same instruction as the last use. It doesn't 1439 // overlap the live range. 1440 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1441 break; 1442 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1443 RegMaskGaps.push_back(i); 1444 // Advance ri to the next gap. A regmask on one of the uses counts in 1445 // both gaps. 1446 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1447 ++ri; 1448 } 1449 DEBUG(dbgs() << '\n'); 1450 } 1451 1452 // Since we allow local split results to be split again, there is a risk of 1453 // creating infinite loops. It is tempting to require that the new live 1454 // ranges have less instructions than the original. That would guarantee 1455 // convergence, but it is too strict. A live range with 3 instructions can be 1456 // split 2+3 (including the COPY), and we want to allow that. 1457 // 1458 // Instead we use these rules: 1459 // 1460 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1461 // noop split, of course). 1462 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1463 // the new ranges must have fewer instructions than before the split. 1464 // 3. New ranges with the same number of instructions are marked RS_Split2, 1465 // smaller ranges are marked RS_New. 1466 // 1467 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1468 // excessive splitting and infinite loops. 1469 // 1470 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1471 1472 // Best split candidate. 1473 unsigned BestBefore = NumGaps; 1474 unsigned BestAfter = 0; 1475 float BestDiff = 0; 1476 1477 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1478 SmallVector<float, 8> GapWeight; 1479 1480 Order.rewind(); 1481 while (unsigned PhysReg = Order.next()) { 1482 // Keep track of the largest spill weight that would need to be evicted in 1483 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1484 calcGapWeights(PhysReg, GapWeight); 1485 1486 // Remove any gaps with regmask clobbers. 1487 if (clobberedByRegMask(PhysReg)) 1488 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1489 GapWeight[RegMaskGaps[i]] = HUGE_VALF; 1490 1491 // Try to find the best sequence of gaps to close. 1492 // The new spill weight must be larger than any gap interference. 1493 1494 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1495 unsigned SplitBefore = 0, SplitAfter = 1; 1496 1497 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1498 // It is the spill weight that needs to be evicted. 1499 float MaxGap = GapWeight[0]; 1500 1501 for (;;) { 1502 // Live before/after split? 1503 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1504 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1505 1506 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1507 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1508 << " i=" << MaxGap); 1509 1510 // Stop before the interval gets so big we wouldn't be making progress. 1511 if (!LiveBefore && !LiveAfter) { 1512 DEBUG(dbgs() << " all\n"); 1513 break; 1514 } 1515 // Should the interval be extended or shrunk? 1516 bool Shrink = true; 1517 1518 // How many gaps would the new range have? 1519 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1520 1521 // Legally, without causing looping? 1522 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1523 1524 if (Legal && MaxGap < HUGE_VALF) { 1525 // Estimate the new spill weight. Each instruction reads or writes the 1526 // register. Conservatively assume there are no read-modify-write 1527 // instructions. 1528 // 1529 // Try to guess the size of the new interval. 1530 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1531 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1532 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1533 // Would this split be possible to allocate? 1534 // Never allocate all gaps, we wouldn't be making progress. 1535 DEBUG(dbgs() << " w=" << EstWeight); 1536 if (EstWeight * Hysteresis >= MaxGap) { 1537 Shrink = false; 1538 float Diff = EstWeight - MaxGap; 1539 if (Diff > BestDiff) { 1540 DEBUG(dbgs() << " (best)"); 1541 BestDiff = Hysteresis * Diff; 1542 BestBefore = SplitBefore; 1543 BestAfter = SplitAfter; 1544 } 1545 } 1546 } 1547 1548 // Try to shrink. 1549 if (Shrink) { 1550 if (++SplitBefore < SplitAfter) { 1551 DEBUG(dbgs() << " shrink\n"); 1552 // Recompute the max when necessary. 1553 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1554 MaxGap = GapWeight[SplitBefore]; 1555 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1556 MaxGap = std::max(MaxGap, GapWeight[i]); 1557 } 1558 continue; 1559 } 1560 MaxGap = 0; 1561 } 1562 1563 // Try to extend the interval. 1564 if (SplitAfter >= NumGaps) { 1565 DEBUG(dbgs() << " end\n"); 1566 break; 1567 } 1568 1569 DEBUG(dbgs() << " extend\n"); 1570 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1571 } 1572 } 1573 1574 // Didn't find any candidates? 1575 if (BestBefore == NumGaps) 1576 return 0; 1577 1578 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1579 << '-' << Uses[BestAfter] << ", " << BestDiff 1580 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1581 1582 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1583 SE->reset(LREdit); 1584 1585 SE->openIntv(); 1586 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1587 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1588 SE->useIntv(SegStart, SegStop); 1589 SmallVector<unsigned, 8> IntvMap; 1590 SE->finish(&IntvMap); 1591 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1592 1593 // If the new range has the same number of instructions as before, mark it as 1594 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1595 // leave the new intervals as RS_New so they can compete. 1596 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1597 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1598 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1599 if (NewGaps >= NumGaps) { 1600 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1601 assert(!ProgressRequired && "Didn't make progress when it was required."); 1602 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1603 if (IntvMap[i] == 1) { 1604 setStage(*LREdit.get(i), RS_Split2); 1605 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1606 } 1607 DEBUG(dbgs() << '\n'); 1608 } 1609 ++NumLocalSplits; 1610 1611 return 0; 1612 } 1613 1614 //===----------------------------------------------------------------------===// 1615 // Live Range Splitting 1616 //===----------------------------------------------------------------------===// 1617 1618 /// trySplit - Try to split VirtReg or one of its interferences, making it 1619 /// assignable. 1620 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1621 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1622 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1623 // Ranges must be Split2 or less. 1624 if (getStage(VirtReg) >= RS_Spill) 1625 return 0; 1626 1627 // Local intervals are handled separately. 1628 if (LIS->intervalIsInOneMBB(VirtReg)) { 1629 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1630 SA->analyze(&VirtReg); 1631 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1632 if (PhysReg || !NewVRegs.empty()) 1633 return PhysReg; 1634 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1635 } 1636 1637 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1638 1639 SA->analyze(&VirtReg); 1640 1641 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1642 // coalescer. That may cause the range to become allocatable which means that 1643 // tryRegionSplit won't be making progress. This check should be replaced with 1644 // an assertion when the coalescer is fixed. 1645 if (SA->didRepairRange()) { 1646 // VirtReg has changed, so all cached queries are invalid. 1647 invalidateVirtRegs(); 1648 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1649 return PhysReg; 1650 } 1651 1652 // First try to split around a region spanning multiple blocks. RS_Split2 1653 // ranges already made dubious progress with region splitting, so they go 1654 // straight to single block splitting. 1655 if (getStage(VirtReg) < RS_Split2) { 1656 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1657 if (PhysReg || !NewVRegs.empty()) 1658 return PhysReg; 1659 } 1660 1661 // Then isolate blocks. 1662 return tryBlockSplit(VirtReg, Order, NewVRegs); 1663 } 1664 1665 1666 //===----------------------------------------------------------------------===// 1667 // Main Entry Point 1668 //===----------------------------------------------------------------------===// 1669 1670 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1671 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1672 // Check if VirtReg is live across any calls. 1673 UsableRegs.clear(); 1674 if (LIS->checkRegMaskInterference(VirtReg, UsableRegs)) 1675 DEBUG(dbgs() << "Live across regmasks.\n"); 1676 1677 // First try assigning a free register. 1678 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1679 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1680 return PhysReg; 1681 1682 LiveRangeStage Stage = getStage(VirtReg); 1683 DEBUG(dbgs() << StageName[Stage] 1684 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1685 1686 // Try to evict a less worthy live range, but only for ranges from the primary 1687 // queue. The RS_Split ranges already failed to do this, and they should not 1688 // get a second chance until they have been split. 1689 if (Stage != RS_Split) 1690 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1691 return PhysReg; 1692 1693 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1694 1695 // The first time we see a live range, don't try to split or spill. 1696 // Wait until the second time, when all smaller ranges have been allocated. 1697 // This gives a better picture of the interference to split around. 1698 if (Stage < RS_Split) { 1699 setStage(VirtReg, RS_Split); 1700 DEBUG(dbgs() << "wait for second round\n"); 1701 NewVRegs.push_back(&VirtReg); 1702 return 0; 1703 } 1704 1705 // If we couldn't allocate a register from spilling, there is probably some 1706 // invalid inline assembly. The base class wil report it. 1707 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1708 return ~0u; 1709 1710 // Try splitting VirtReg or interferences. 1711 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1712 if (PhysReg || !NewVRegs.empty()) 1713 return PhysReg; 1714 1715 // Finally spill VirtReg itself. 1716 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1717 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1718 spiller().spill(LRE); 1719 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1720 1721 if (VerifyEnabled) 1722 MF->verify(this, "After spilling"); 1723 1724 // The live virtual register requesting allocation was spilled, so tell 1725 // the caller not to allocate anything during this round. 1726 return 0; 1727 } 1728 1729 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1730 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1731 << "********** Function: " 1732 << ((Value*)mf.getFunction())->getName() << '\n'); 1733 1734 MF = &mf; 1735 if (VerifyEnabled) 1736 MF->verify(this, "Before greedy register allocator"); 1737 1738 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1739 Indexes = &getAnalysis<SlotIndexes>(); 1740 DomTree = &getAnalysis<MachineDominatorTree>(); 1741 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1742 Loops = &getAnalysis<MachineLoopInfo>(); 1743 Bundles = &getAnalysis<EdgeBundles>(); 1744 SpillPlacer = &getAnalysis<SpillPlacement>(); 1745 DebugVars = &getAnalysis<LiveDebugVariables>(); 1746 1747 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1748 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1749 ExtraRegInfo.clear(); 1750 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1751 NextCascade = 1; 1752 IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI); 1753 GlobalCand.resize(32); // This will grow as needed. 1754 1755 allocatePhysRegs(); 1756 releaseMemory(); 1757 return true; 1758 } 1759