1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "llvm/CodeGen/Passes.h" 17 #include "AllocationOrder.h" 18 #include "InterferenceCache.h" 19 #include "LiveDebugVariables.h" 20 #include "RegAllocBase.h" 21 #include "SpillPlacement.h" 22 #include "Spiller.h" 23 #include "SplitKit.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/CodeGen/CalcSpillWeights.h" 27 #include "llvm/CodeGen/EdgeBundles.h" 28 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 29 #include "llvm/CodeGen/LiveRangeEdit.h" 30 #include "llvm/CodeGen/LiveRegMatrix.h" 31 #include "llvm/CodeGen/LiveStackAnalysis.h" 32 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/RegAllocRegistry.h" 38 #include "llvm/CodeGen/VirtRegMap.h" 39 #include "llvm/PassAnalysisSupport.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/Timer.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include <queue> 46 47 using namespace llvm; 48 49 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 50 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 51 STATISTIC(NumEvicted, "Number of interferences evicted"); 52 53 static cl::opt<SplitEditor::ComplementSpillMode> 54 SplitSpillMode("split-spill-mode", cl::Hidden, 55 cl::desc("Spill mode for splitting live ranges"), 56 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 57 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 58 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 59 clEnumValEnd), 60 cl::init(SplitEditor::SM_Partition)); 61 62 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 63 createGreedyRegisterAllocator); 64 65 namespace { 66 class RAGreedy : public MachineFunctionPass, 67 public RegAllocBase, 68 private LiveRangeEdit::Delegate { 69 70 // context 71 MachineFunction *MF; 72 73 // analyses 74 SlotIndexes *Indexes; 75 MachineBlockFrequencyInfo *MBFI; 76 MachineDominatorTree *DomTree; 77 MachineLoopInfo *Loops; 78 EdgeBundles *Bundles; 79 SpillPlacement *SpillPlacer; 80 LiveDebugVariables *DebugVars; 81 82 // state 83 OwningPtr<Spiller> SpillerInstance; 84 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 85 unsigned NextCascade; 86 87 // Live ranges pass through a number of stages as we try to allocate them. 88 // Some of the stages may also create new live ranges: 89 // 90 // - Region splitting. 91 // - Per-block splitting. 92 // - Local splitting. 93 // - Spilling. 94 // 95 // Ranges produced by one of the stages skip the previous stages when they are 96 // dequeued. This improves performance because we can skip interference checks 97 // that are unlikely to give any results. It also guarantees that the live 98 // range splitting algorithm terminates, something that is otherwise hard to 99 // ensure. 100 enum LiveRangeStage { 101 /// Newly created live range that has never been queued. 102 RS_New, 103 104 /// Only attempt assignment and eviction. Then requeue as RS_Split. 105 RS_Assign, 106 107 /// Attempt live range splitting if assignment is impossible. 108 RS_Split, 109 110 /// Attempt more aggressive live range splitting that is guaranteed to make 111 /// progress. This is used for split products that may not be making 112 /// progress. 113 RS_Split2, 114 115 /// Live range will be spilled. No more splitting will be attempted. 116 RS_Spill, 117 118 /// There is nothing more we can do to this live range. Abort compilation 119 /// if it can't be assigned. 120 RS_Done 121 }; 122 123 #ifndef NDEBUG 124 static const char *const StageName[]; 125 #endif 126 127 // RegInfo - Keep additional information about each live range. 128 struct RegInfo { 129 LiveRangeStage Stage; 130 131 // Cascade - Eviction loop prevention. See canEvictInterference(). 132 unsigned Cascade; 133 134 RegInfo() : Stage(RS_New), Cascade(0) {} 135 }; 136 137 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 138 139 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 140 return ExtraRegInfo[VirtReg.reg].Stage; 141 } 142 143 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 144 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 145 ExtraRegInfo[VirtReg.reg].Stage = Stage; 146 } 147 148 template<typename Iterator> 149 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 150 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 151 for (;Begin != End; ++Begin) { 152 unsigned Reg = *Begin; 153 if (ExtraRegInfo[Reg].Stage == RS_New) 154 ExtraRegInfo[Reg].Stage = NewStage; 155 } 156 } 157 158 /// Cost of evicting interference. 159 struct EvictionCost { 160 unsigned BrokenHints; ///< Total number of broken hints. 161 float MaxWeight; ///< Maximum spill weight evicted. 162 163 EvictionCost(): BrokenHints(0), MaxWeight(0) {} 164 165 bool isMax() const { return BrokenHints == ~0u; } 166 167 void setMax() { BrokenHints = ~0u; } 168 169 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } 170 171 bool operator<(const EvictionCost &O) const { 172 if (BrokenHints != O.BrokenHints) 173 return BrokenHints < O.BrokenHints; 174 return MaxWeight < O.MaxWeight; 175 } 176 }; 177 178 // splitting state. 179 OwningPtr<SplitAnalysis> SA; 180 OwningPtr<SplitEditor> SE; 181 182 /// Cached per-block interference maps 183 InterferenceCache IntfCache; 184 185 /// All basic blocks where the current register has uses. 186 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 187 188 /// Global live range splitting candidate info. 189 struct GlobalSplitCandidate { 190 // Register intended for assignment, or 0. 191 unsigned PhysReg; 192 193 // SplitKit interval index for this candidate. 194 unsigned IntvIdx; 195 196 // Interference for PhysReg. 197 InterferenceCache::Cursor Intf; 198 199 // Bundles where this candidate should be live. 200 BitVector LiveBundles; 201 SmallVector<unsigned, 8> ActiveBlocks; 202 203 void reset(InterferenceCache &Cache, unsigned Reg) { 204 PhysReg = Reg; 205 IntvIdx = 0; 206 Intf.setPhysReg(Cache, Reg); 207 LiveBundles.clear(); 208 ActiveBlocks.clear(); 209 } 210 211 // Set B[i] = C for every live bundle where B[i] was NoCand. 212 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 213 unsigned Count = 0; 214 for (int i = LiveBundles.find_first(); i >= 0; 215 i = LiveBundles.find_next(i)) 216 if (B[i] == NoCand) { 217 B[i] = C; 218 Count++; 219 } 220 return Count; 221 } 222 }; 223 224 /// Candidate info for each PhysReg in AllocationOrder. 225 /// This vector never shrinks, but grows to the size of the largest register 226 /// class. 227 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 228 229 enum LLVM_ENUM_INT_TYPE(unsigned) { NoCand = ~0u }; 230 231 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 232 /// NoCand which indicates the stack interval. 233 SmallVector<unsigned, 32> BundleCand; 234 235 public: 236 RAGreedy(); 237 238 /// Return the pass name. 239 virtual const char* getPassName() const { 240 return "Greedy Register Allocator"; 241 } 242 243 /// RAGreedy analysis usage. 244 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 245 virtual void releaseMemory(); 246 virtual Spiller &spiller() { return *SpillerInstance; } 247 virtual void enqueue(LiveInterval *LI); 248 virtual LiveInterval *dequeue(); 249 virtual unsigned selectOrSplit(LiveInterval&, 250 SmallVectorImpl<unsigned>&); 251 252 /// Perform register allocation. 253 virtual bool runOnMachineFunction(MachineFunction &mf); 254 255 static char ID; 256 257 private: 258 bool LRE_CanEraseVirtReg(unsigned); 259 void LRE_WillShrinkVirtReg(unsigned); 260 void LRE_DidCloneVirtReg(unsigned, unsigned); 261 262 BlockFrequency calcSpillCost(); 263 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); 264 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 265 void growRegion(GlobalSplitCandidate &Cand); 266 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&); 267 bool calcCompactRegion(GlobalSplitCandidate&); 268 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 269 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 270 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg); 271 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 272 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 273 void evictInterference(LiveInterval&, unsigned, 274 SmallVectorImpl<unsigned>&); 275 276 unsigned tryAssign(LiveInterval&, AllocationOrder&, 277 SmallVectorImpl<unsigned>&); 278 unsigned tryEvict(LiveInterval&, AllocationOrder&, 279 SmallVectorImpl<unsigned>&, unsigned = ~0u); 280 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 281 SmallVectorImpl<unsigned>&); 282 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 283 SmallVectorImpl<unsigned>&); 284 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 285 SmallVectorImpl<unsigned>&); 286 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 287 SmallVectorImpl<unsigned>&); 288 unsigned trySplit(LiveInterval&, AllocationOrder&, 289 SmallVectorImpl<unsigned>&); 290 }; 291 } // end anonymous namespace 292 293 char RAGreedy::ID = 0; 294 295 #ifndef NDEBUG 296 const char *const RAGreedy::StageName[] = { 297 "RS_New", 298 "RS_Assign", 299 "RS_Split", 300 "RS_Split2", 301 "RS_Spill", 302 "RS_Done" 303 }; 304 #endif 305 306 // Hysteresis to use when comparing floats. 307 // This helps stabilize decisions based on float comparisons. 308 const float Hysteresis = 0.98f; 309 310 311 FunctionPass* llvm::createGreedyRegisterAllocator() { 312 return new RAGreedy(); 313 } 314 315 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 316 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 317 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 318 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 319 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 320 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 321 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 322 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 323 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 324 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 325 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 326 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry()); 327 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 328 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 329 } 330 331 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 332 AU.setPreservesCFG(); 333 AU.addRequired<MachineBlockFrequencyInfo>(); 334 AU.addPreserved<MachineBlockFrequencyInfo>(); 335 AU.addRequired<AliasAnalysis>(); 336 AU.addPreserved<AliasAnalysis>(); 337 AU.addRequired<LiveIntervals>(); 338 AU.addPreserved<LiveIntervals>(); 339 AU.addRequired<SlotIndexes>(); 340 AU.addPreserved<SlotIndexes>(); 341 AU.addRequired<LiveDebugVariables>(); 342 AU.addPreserved<LiveDebugVariables>(); 343 AU.addRequired<LiveStacks>(); 344 AU.addPreserved<LiveStacks>(); 345 AU.addRequired<MachineDominatorTree>(); 346 AU.addPreserved<MachineDominatorTree>(); 347 AU.addRequired<MachineLoopInfo>(); 348 AU.addPreserved<MachineLoopInfo>(); 349 AU.addRequired<VirtRegMap>(); 350 AU.addPreserved<VirtRegMap>(); 351 AU.addRequired<LiveRegMatrix>(); 352 AU.addPreserved<LiveRegMatrix>(); 353 AU.addRequired<EdgeBundles>(); 354 AU.addRequired<SpillPlacement>(); 355 MachineFunctionPass::getAnalysisUsage(AU); 356 } 357 358 359 //===----------------------------------------------------------------------===// 360 // LiveRangeEdit delegate methods 361 //===----------------------------------------------------------------------===// 362 363 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 364 if (VRM->hasPhys(VirtReg)) { 365 Matrix->unassign(LIS->getInterval(VirtReg)); 366 return true; 367 } 368 // Unassigned virtreg is probably in the priority queue. 369 // RegAllocBase will erase it after dequeueing. 370 return false; 371 } 372 373 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 374 if (!VRM->hasPhys(VirtReg)) 375 return; 376 377 // Register is assigned, put it back on the queue for reassignment. 378 LiveInterval &LI = LIS->getInterval(VirtReg); 379 Matrix->unassign(LI); 380 enqueue(&LI); 381 } 382 383 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 384 // Cloning a register we haven't even heard about yet? Just ignore it. 385 if (!ExtraRegInfo.inBounds(Old)) 386 return; 387 388 // LRE may clone a virtual register because dead code elimination causes it to 389 // be split into connected components. The new components are much smaller 390 // than the original, so they should get a new chance at being assigned. 391 // same stage as the parent. 392 ExtraRegInfo[Old].Stage = RS_Assign; 393 ExtraRegInfo.grow(New); 394 ExtraRegInfo[New] = ExtraRegInfo[Old]; 395 } 396 397 void RAGreedy::releaseMemory() { 398 SpillerInstance.reset(0); 399 ExtraRegInfo.clear(); 400 GlobalCand.clear(); 401 } 402 403 void RAGreedy::enqueue(LiveInterval *LI) { 404 // Prioritize live ranges by size, assigning larger ranges first. 405 // The queue holds (size, reg) pairs. 406 const unsigned Size = LI->getSize(); 407 const unsigned Reg = LI->reg; 408 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 409 "Can only enqueue virtual registers"); 410 unsigned Prio; 411 412 ExtraRegInfo.grow(Reg); 413 if (ExtraRegInfo[Reg].Stage == RS_New) 414 ExtraRegInfo[Reg].Stage = RS_Assign; 415 416 if (ExtraRegInfo[Reg].Stage == RS_Split) { 417 // Unsplit ranges that couldn't be allocated immediately are deferred until 418 // everything else has been allocated. 419 Prio = Size; 420 } else { 421 if (ExtraRegInfo[Reg].Stage == RS_Assign && !LI->empty() && 422 LIS->intervalIsInOneMBB(*LI)) { 423 // Allocate original local ranges in linear instruction order. Since they 424 // are singly defined, this produces optimal coloring in the absence of 425 // global interference and other constraints. 426 if (!TRI->reverseLocalAssignment()) 427 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 428 else { 429 // Allocating bottom up may allow many short LRGs to be assigned first 430 // to one of the cheap registers. This could be much faster for very 431 // large blocks on targets with many physical registers. 432 Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex()); 433 } 434 } 435 else { 436 // Allocate global and split ranges in long->short order. Long ranges that 437 // don't fit should be spilled (or split) ASAP so they don't create 438 // interference. Mark a bit to prioritize global above local ranges. 439 Prio = (1u << 29) + Size; 440 } 441 // Mark a higher bit to prioritize global and local above RS_Split. 442 Prio |= (1u << 31); 443 444 // Boost ranges that have a physical register hint. 445 if (VRM->hasKnownPreference(Reg)) 446 Prio |= (1u << 30); 447 } 448 // The virtual register number is a tie breaker for same-sized ranges. 449 // Give lower vreg numbers higher priority to assign them first. 450 Queue.push(std::make_pair(Prio, ~Reg)); 451 } 452 453 LiveInterval *RAGreedy::dequeue() { 454 if (Queue.empty()) 455 return 0; 456 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 457 Queue.pop(); 458 return LI; 459 } 460 461 462 //===----------------------------------------------------------------------===// 463 // Direct Assignment 464 //===----------------------------------------------------------------------===// 465 466 /// tryAssign - Try to assign VirtReg to an available register. 467 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 468 AllocationOrder &Order, 469 SmallVectorImpl<unsigned> &NewVRegs) { 470 Order.rewind(); 471 unsigned PhysReg; 472 while ((PhysReg = Order.next())) 473 if (!Matrix->checkInterference(VirtReg, PhysReg)) 474 break; 475 if (!PhysReg || Order.isHint()) 476 return PhysReg; 477 478 // PhysReg is available, but there may be a better choice. 479 480 // If we missed a simple hint, try to cheaply evict interference from the 481 // preferred register. 482 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 483 if (Order.isHint(Hint)) { 484 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 485 EvictionCost MaxCost; 486 MaxCost.setBrokenHints(1); 487 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 488 evictInterference(VirtReg, Hint, NewVRegs); 489 return Hint; 490 } 491 } 492 493 // Try to evict interference from a cheaper alternative. 494 unsigned Cost = TRI->getCostPerUse(PhysReg); 495 496 // Most registers have 0 additional cost. 497 if (!Cost) 498 return PhysReg; 499 500 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 501 << '\n'); 502 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 503 return CheapReg ? CheapReg : PhysReg; 504 } 505 506 507 //===----------------------------------------------------------------------===// 508 // Interference eviction 509 //===----------------------------------------------------------------------===// 510 511 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) { 512 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 513 unsigned PhysReg; 514 while ((PhysReg = Order.next())) { 515 if (PhysReg == PrevReg) 516 continue; 517 518 MCRegUnitIterator Units(PhysReg, TRI); 519 for (; Units.isValid(); ++Units) { 520 // Instantiate a "subquery", not to be confused with the Queries array. 521 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]); 522 if (subQ.checkInterference()) 523 break; 524 } 525 // If no units have interference, break out with the current PhysReg. 526 if (!Units.isValid()) 527 break; 528 } 529 if (PhysReg) 530 DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 531 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI) 532 << '\n'); 533 return PhysReg; 534 } 535 536 /// shouldEvict - determine if A should evict the assigned live range B. The 537 /// eviction policy defined by this function together with the allocation order 538 /// defined by enqueue() decides which registers ultimately end up being split 539 /// and spilled. 540 /// 541 /// Cascade numbers are used to prevent infinite loops if this function is a 542 /// cyclic relation. 543 /// 544 /// @param A The live range to be assigned. 545 /// @param IsHint True when A is about to be assigned to its preferred 546 /// register. 547 /// @param B The live range to be evicted. 548 /// @param BreaksHint True when B is already assigned to its preferred register. 549 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 550 LiveInterval &B, bool BreaksHint) { 551 bool CanSplit = getStage(B) < RS_Spill; 552 553 // Be fairly aggressive about following hints as long as the evictee can be 554 // split. 555 if (CanSplit && IsHint && !BreaksHint) 556 return true; 557 558 if (A.weight > B.weight) { 559 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n'); 560 return true; 561 } 562 return false; 563 } 564 565 /// canEvictInterference - Return true if all interferences between VirtReg and 566 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 567 /// 568 /// @param VirtReg Live range that is about to be assigned. 569 /// @param PhysReg Desired register for assignment. 570 /// @param IsHint True when PhysReg is VirtReg's preferred register. 571 /// @param MaxCost Only look for cheaper candidates and update with new cost 572 /// when returning true. 573 /// @returns True when interference can be evicted cheaper than MaxCost. 574 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 575 bool IsHint, EvictionCost &MaxCost) { 576 // It is only possible to evict virtual register interference. 577 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 578 return false; 579 580 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); 581 582 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 583 // involved in an eviction before. If a cascade number was assigned, deny 584 // evicting anything with the same or a newer cascade number. This prevents 585 // infinite eviction loops. 586 // 587 // This works out so a register without a cascade number is allowed to evict 588 // anything, and it can be evicted by anything. 589 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 590 if (!Cascade) 591 Cascade = NextCascade; 592 593 EvictionCost Cost; 594 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 595 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 596 // If there is 10 or more interferences, chances are one is heavier. 597 if (Q.collectInterferingVRegs(10) >= 10) 598 return false; 599 600 // Check if any interfering live range is heavier than MaxWeight. 601 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 602 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 603 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 604 "Only expecting virtual register interference from query"); 605 // Never evict spill products. They cannot split or spill. 606 if (getStage(*Intf) == RS_Done) 607 return false; 608 // Once a live range becomes small enough, it is urgent that we find a 609 // register for it. This is indicated by an infinite spill weight. These 610 // urgent live ranges get to evict almost anything. 611 // 612 // Also allow urgent evictions of unspillable ranges from a strictly 613 // larger allocation order. 614 bool Urgent = !VirtReg.isSpillable() && 615 (Intf->isSpillable() || 616 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 617 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 618 // Only evict older cascades or live ranges without a cascade. 619 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 620 if (Cascade <= IntfCascade) { 621 if (!Urgent) 622 return false; 623 // We permit breaking cascades for urgent evictions. It should be the 624 // last resort, though, so make it really expensive. 625 Cost.BrokenHints += 10; 626 } 627 // Would this break a satisfied hint? 628 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 629 // Update eviction cost. 630 Cost.BrokenHints += BreaksHint; 631 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 632 // Abort if this would be too expensive. 633 if (!(Cost < MaxCost)) 634 return false; 635 if (Urgent) 636 continue; 637 // Apply the eviction policy for non-urgent evictions. 638 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 639 return false; 640 // If !MaxCost.isMax(), then we're just looking for a cheap register. 641 // Evicting another local live range in this case could lead to suboptimal 642 // coloring. 643 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && 644 !canReassign(*Intf, PhysReg)) { 645 return false; 646 } 647 } 648 } 649 MaxCost = Cost; 650 return true; 651 } 652 653 /// evictInterference - Evict any interferring registers that prevent VirtReg 654 /// from being assigned to Physreg. This assumes that canEvictInterference 655 /// returned true. 656 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 657 SmallVectorImpl<unsigned> &NewVRegs) { 658 // Make sure that VirtReg has a cascade number, and assign that cascade 659 // number to every evicted register. These live ranges than then only be 660 // evicted by a newer cascade, preventing infinite loops. 661 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 662 if (!Cascade) 663 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 664 665 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 666 << " interference: Cascade " << Cascade << '\n'); 667 668 // Collect all interfering virtregs first. 669 SmallVector<LiveInterval*, 8> Intfs; 670 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 671 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 672 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 673 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 674 Intfs.append(IVR.begin(), IVR.end()); 675 } 676 677 // Evict them second. This will invalidate the queries. 678 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 679 LiveInterval *Intf = Intfs[i]; 680 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 681 if (!VRM->hasPhys(Intf->reg)) 682 continue; 683 Matrix->unassign(*Intf); 684 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 685 VirtReg.isSpillable() < Intf->isSpillable()) && 686 "Cannot decrease cascade number, illegal eviction"); 687 ExtraRegInfo[Intf->reg].Cascade = Cascade; 688 ++NumEvicted; 689 NewVRegs.push_back(Intf->reg); 690 } 691 } 692 693 /// tryEvict - Try to evict all interferences for a physreg. 694 /// @param VirtReg Currently unassigned virtual register. 695 /// @param Order Physregs to try. 696 /// @return Physreg to assign VirtReg, or 0. 697 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 698 AllocationOrder &Order, 699 SmallVectorImpl<unsigned> &NewVRegs, 700 unsigned CostPerUseLimit) { 701 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 702 703 // Keep track of the cheapest interference seen so far. 704 EvictionCost BestCost; 705 BestCost.setMax(); 706 unsigned BestPhys = 0; 707 unsigned OrderLimit = Order.getOrder().size(); 708 709 // When we are just looking for a reduced cost per use, don't break any 710 // hints, and only evict smaller spill weights. 711 if (CostPerUseLimit < ~0u) { 712 BestCost.BrokenHints = 0; 713 BestCost.MaxWeight = VirtReg.weight; 714 715 // Check of any registers in RC are below CostPerUseLimit. 716 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg); 717 unsigned MinCost = RegClassInfo.getMinCost(RC); 718 if (MinCost >= CostPerUseLimit) { 719 DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost 720 << ", no cheaper registers to be found.\n"); 721 return 0; 722 } 723 724 // It is normal for register classes to have a long tail of registers with 725 // the same cost. We don't need to look at them if they're too expensive. 726 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) { 727 OrderLimit = RegClassInfo.getLastCostChange(RC); 728 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n"); 729 } 730 } 731 732 Order.rewind(); 733 while (unsigned PhysReg = Order.next(OrderLimit)) { 734 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 735 continue; 736 // The first use of a callee-saved register in a function has cost 1. 737 // Don't start using a CSR when the CostPerUseLimit is low. 738 if (CostPerUseLimit == 1) 739 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 740 if (!MRI->isPhysRegUsed(CSR)) { 741 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 742 << PrintReg(CSR, TRI) << '\n'); 743 continue; 744 } 745 746 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 747 continue; 748 749 // Best so far. 750 BestPhys = PhysReg; 751 752 // Stop if the hint can be used. 753 if (Order.isHint()) 754 break; 755 } 756 757 if (!BestPhys) 758 return 0; 759 760 evictInterference(VirtReg, BestPhys, NewVRegs); 761 return BestPhys; 762 } 763 764 765 //===----------------------------------------------------------------------===// 766 // Region Splitting 767 //===----------------------------------------------------------------------===// 768 769 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 770 /// interference pattern in Physreg and its aliases. Add the constraints to 771 /// SpillPlacement and return the static cost of this split in Cost, assuming 772 /// that all preferences in SplitConstraints are met. 773 /// Return false if there are no bundles with positive bias. 774 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 775 BlockFrequency &Cost) { 776 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 777 778 // Reset interference dependent info. 779 SplitConstraints.resize(UseBlocks.size()); 780 BlockFrequency StaticCost = 0; 781 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 782 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 783 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 784 785 BC.Number = BI.MBB->getNumber(); 786 Intf.moveToBlock(BC.Number); 787 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 788 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 789 BC.ChangesValue = BI.FirstDef.isValid(); 790 791 if (!Intf.hasInterference()) 792 continue; 793 794 // Number of spill code instructions to insert. 795 unsigned Ins = 0; 796 797 // Interference for the live-in value. 798 if (BI.LiveIn) { 799 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 800 BC.Entry = SpillPlacement::MustSpill, ++Ins; 801 else if (Intf.first() < BI.FirstInstr) 802 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 803 else if (Intf.first() < BI.LastInstr) 804 ++Ins; 805 } 806 807 // Interference for the live-out value. 808 if (BI.LiveOut) { 809 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 810 BC.Exit = SpillPlacement::MustSpill, ++Ins; 811 else if (Intf.last() > BI.LastInstr) 812 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 813 else if (Intf.last() > BI.FirstInstr) 814 ++Ins; 815 } 816 817 // Accumulate the total frequency of inserted spill code. 818 while (Ins--) 819 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 820 } 821 Cost = StaticCost; 822 823 // Add constraints for use-blocks. Note that these are the only constraints 824 // that may add a positive bias, it is downhill from here. 825 SpillPlacer->addConstraints(SplitConstraints); 826 return SpillPlacer->scanActiveBundles(); 827 } 828 829 830 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 831 /// live-through blocks in Blocks. 832 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 833 ArrayRef<unsigned> Blocks) { 834 const unsigned GroupSize = 8; 835 SpillPlacement::BlockConstraint BCS[GroupSize]; 836 unsigned TBS[GroupSize]; 837 unsigned B = 0, T = 0; 838 839 for (unsigned i = 0; i != Blocks.size(); ++i) { 840 unsigned Number = Blocks[i]; 841 Intf.moveToBlock(Number); 842 843 if (!Intf.hasInterference()) { 844 assert(T < GroupSize && "Array overflow"); 845 TBS[T] = Number; 846 if (++T == GroupSize) { 847 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 848 T = 0; 849 } 850 continue; 851 } 852 853 assert(B < GroupSize && "Array overflow"); 854 BCS[B].Number = Number; 855 856 // Interference for the live-in value. 857 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 858 BCS[B].Entry = SpillPlacement::MustSpill; 859 else 860 BCS[B].Entry = SpillPlacement::PrefSpill; 861 862 // Interference for the live-out value. 863 if (Intf.last() >= SA->getLastSplitPoint(Number)) 864 BCS[B].Exit = SpillPlacement::MustSpill; 865 else 866 BCS[B].Exit = SpillPlacement::PrefSpill; 867 868 if (++B == GroupSize) { 869 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 870 SpillPlacer->addConstraints(Array); 871 B = 0; 872 } 873 } 874 875 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 876 SpillPlacer->addConstraints(Array); 877 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 878 } 879 880 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 881 // Keep track of through blocks that have not been added to SpillPlacer. 882 BitVector Todo = SA->getThroughBlocks(); 883 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 884 unsigned AddedTo = 0; 885 #ifndef NDEBUG 886 unsigned Visited = 0; 887 #endif 888 889 for (;;) { 890 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 891 // Find new through blocks in the periphery of PrefRegBundles. 892 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 893 unsigned Bundle = NewBundles[i]; 894 // Look at all blocks connected to Bundle in the full graph. 895 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 896 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 897 I != E; ++I) { 898 unsigned Block = *I; 899 if (!Todo.test(Block)) 900 continue; 901 Todo.reset(Block); 902 // This is a new through block. Add it to SpillPlacer later. 903 ActiveBlocks.push_back(Block); 904 #ifndef NDEBUG 905 ++Visited; 906 #endif 907 } 908 } 909 // Any new blocks to add? 910 if (ActiveBlocks.size() == AddedTo) 911 break; 912 913 // Compute through constraints from the interference, or assume that all 914 // through blocks prefer spilling when forming compact regions. 915 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 916 if (Cand.PhysReg) 917 addThroughConstraints(Cand.Intf, NewBlocks); 918 else 919 // Provide a strong negative bias on through blocks to prevent unwanted 920 // liveness on loop backedges. 921 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 922 AddedTo = ActiveBlocks.size(); 923 924 // Perhaps iterating can enable more bundles? 925 SpillPlacer->iterate(); 926 } 927 DEBUG(dbgs() << ", v=" << Visited); 928 } 929 930 /// calcCompactRegion - Compute the set of edge bundles that should be live 931 /// when splitting the current live range into compact regions. Compact 932 /// regions can be computed without looking at interference. They are the 933 /// regions formed by removing all the live-through blocks from the live range. 934 /// 935 /// Returns false if the current live range is already compact, or if the 936 /// compact regions would form single block regions anyway. 937 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 938 // Without any through blocks, the live range is already compact. 939 if (!SA->getNumThroughBlocks()) 940 return false; 941 942 // Compact regions don't correspond to any physreg. 943 Cand.reset(IntfCache, 0); 944 945 DEBUG(dbgs() << "Compact region bundles"); 946 947 // Use the spill placer to determine the live bundles. GrowRegion pretends 948 // that all the through blocks have interference when PhysReg is unset. 949 SpillPlacer->prepare(Cand.LiveBundles); 950 951 // The static split cost will be zero since Cand.Intf reports no interference. 952 BlockFrequency Cost; 953 if (!addSplitConstraints(Cand.Intf, Cost)) { 954 DEBUG(dbgs() << ", none.\n"); 955 return false; 956 } 957 958 growRegion(Cand); 959 SpillPlacer->finish(); 960 961 if (!Cand.LiveBundles.any()) { 962 DEBUG(dbgs() << ", none.\n"); 963 return false; 964 } 965 966 DEBUG({ 967 for (int i = Cand.LiveBundles.find_first(); i>=0; 968 i = Cand.LiveBundles.find_next(i)) 969 dbgs() << " EB#" << i; 970 dbgs() << ".\n"; 971 }); 972 return true; 973 } 974 975 /// calcSpillCost - Compute how expensive it would be to split the live range in 976 /// SA around all use blocks instead of forming bundle regions. 977 BlockFrequency RAGreedy::calcSpillCost() { 978 BlockFrequency Cost = 0; 979 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 980 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 981 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 982 unsigned Number = BI.MBB->getNumber(); 983 // We normally only need one spill instruction - a load or a store. 984 Cost += SpillPlacer->getBlockFrequency(Number); 985 986 // Unless the value is redefined in the block. 987 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 988 Cost += SpillPlacer->getBlockFrequency(Number); 989 } 990 return Cost; 991 } 992 993 /// calcGlobalSplitCost - Return the global split cost of following the split 994 /// pattern in LiveBundles. This cost should be added to the local cost of the 995 /// interference pattern in SplitConstraints. 996 /// 997 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 998 BlockFrequency GlobalCost = 0; 999 const BitVector &LiveBundles = Cand.LiveBundles; 1000 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1001 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1002 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1003 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1004 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 1005 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 1006 unsigned Ins = 0; 1007 1008 if (BI.LiveIn) 1009 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1010 if (BI.LiveOut) 1011 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1012 while (Ins--) 1013 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1014 } 1015 1016 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1017 unsigned Number = Cand.ActiveBlocks[i]; 1018 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 1019 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 1020 if (!RegIn && !RegOut) 1021 continue; 1022 if (RegIn && RegOut) { 1023 // We need double spill code if this block has interference. 1024 Cand.Intf.moveToBlock(Number); 1025 if (Cand.Intf.hasInterference()) { 1026 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1027 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1028 } 1029 continue; 1030 } 1031 // live-in / stack-out or stack-in live-out. 1032 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1033 } 1034 return GlobalCost; 1035 } 1036 1037 /// splitAroundRegion - Split the current live range around the regions 1038 /// determined by BundleCand and GlobalCand. 1039 /// 1040 /// Before calling this function, GlobalCand and BundleCand must be initialized 1041 /// so each bundle is assigned to a valid candidate, or NoCand for the 1042 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1043 /// objects must be initialized for the current live range, and intervals 1044 /// created for the used candidates. 1045 /// 1046 /// @param LREdit The LiveRangeEdit object handling the current split. 1047 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1048 /// must appear in this list. 1049 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1050 ArrayRef<unsigned> UsedCands) { 1051 // These are the intervals created for new global ranges. We may create more 1052 // intervals for local ranges. 1053 const unsigned NumGlobalIntvs = LREdit.size(); 1054 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 1055 assert(NumGlobalIntvs && "No global intervals configured"); 1056 1057 // Isolate even single instructions when dealing with a proper sub-class. 1058 // That guarantees register class inflation for the stack interval because it 1059 // is all copies. 1060 unsigned Reg = SA->getParent().reg; 1061 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1062 1063 // First handle all the blocks with uses. 1064 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1065 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1066 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1067 unsigned Number = BI.MBB->getNumber(); 1068 unsigned IntvIn = 0, IntvOut = 0; 1069 SlotIndex IntfIn, IntfOut; 1070 if (BI.LiveIn) { 1071 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1072 if (CandIn != NoCand) { 1073 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1074 IntvIn = Cand.IntvIdx; 1075 Cand.Intf.moveToBlock(Number); 1076 IntfIn = Cand.Intf.first(); 1077 } 1078 } 1079 if (BI.LiveOut) { 1080 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1081 if (CandOut != NoCand) { 1082 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1083 IntvOut = Cand.IntvIdx; 1084 Cand.Intf.moveToBlock(Number); 1085 IntfOut = Cand.Intf.last(); 1086 } 1087 } 1088 1089 // Create separate intervals for isolated blocks with multiple uses. 1090 if (!IntvIn && !IntvOut) { 1091 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1092 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1093 SE->splitSingleBlock(BI); 1094 continue; 1095 } 1096 1097 if (IntvIn && IntvOut) 1098 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1099 else if (IntvIn) 1100 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1101 else 1102 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1103 } 1104 1105 // Handle live-through blocks. The relevant live-through blocks are stored in 1106 // the ActiveBlocks list with each candidate. We need to filter out 1107 // duplicates. 1108 BitVector Todo = SA->getThroughBlocks(); 1109 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1110 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1111 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1112 unsigned Number = Blocks[i]; 1113 if (!Todo.test(Number)) 1114 continue; 1115 Todo.reset(Number); 1116 1117 unsigned IntvIn = 0, IntvOut = 0; 1118 SlotIndex IntfIn, IntfOut; 1119 1120 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1121 if (CandIn != NoCand) { 1122 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1123 IntvIn = Cand.IntvIdx; 1124 Cand.Intf.moveToBlock(Number); 1125 IntfIn = Cand.Intf.first(); 1126 } 1127 1128 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1129 if (CandOut != NoCand) { 1130 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1131 IntvOut = Cand.IntvIdx; 1132 Cand.Intf.moveToBlock(Number); 1133 IntfOut = Cand.Intf.last(); 1134 } 1135 if (!IntvIn && !IntvOut) 1136 continue; 1137 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1138 } 1139 } 1140 1141 ++NumGlobalSplits; 1142 1143 SmallVector<unsigned, 8> IntvMap; 1144 SE->finish(&IntvMap); 1145 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1146 1147 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1148 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1149 1150 // Sort out the new intervals created by splitting. We get four kinds: 1151 // - Remainder intervals should not be split again. 1152 // - Candidate intervals can be assigned to Cand.PhysReg. 1153 // - Block-local splits are candidates for local splitting. 1154 // - DCE leftovers should go back on the queue. 1155 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1156 LiveInterval &Reg = LIS->getInterval(LREdit.get(i)); 1157 1158 // Ignore old intervals from DCE. 1159 if (getStage(Reg) != RS_New) 1160 continue; 1161 1162 // Remainder interval. Don't try splitting again, spill if it doesn't 1163 // allocate. 1164 if (IntvMap[i] == 0) { 1165 setStage(Reg, RS_Spill); 1166 continue; 1167 } 1168 1169 // Global intervals. Allow repeated splitting as long as the number of live 1170 // blocks is strictly decreasing. 1171 if (IntvMap[i] < NumGlobalIntvs) { 1172 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1173 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1174 << " blocks as original.\n"); 1175 // Don't allow repeated splitting as a safe guard against looping. 1176 setStage(Reg, RS_Split2); 1177 } 1178 continue; 1179 } 1180 1181 // Other intervals are treated as new. This includes local intervals created 1182 // for blocks with multiple uses, and anything created by DCE. 1183 } 1184 1185 if (VerifyEnabled) 1186 MF->verify(this, "After splitting live range around region"); 1187 } 1188 1189 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1190 SmallVectorImpl<unsigned> &NewVRegs) { 1191 unsigned NumCands = 0; 1192 unsigned BestCand = NoCand; 1193 BlockFrequency BestCost; 1194 SmallVector<unsigned, 8> UsedCands; 1195 1196 // Check if we can split this live range around a compact region. 1197 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1198 if (HasCompact) { 1199 // Yes, keep GlobalCand[0] as the compact region candidate. 1200 NumCands = 1; 1201 BestCost = BlockFrequency::getMaxFrequency(); 1202 } else { 1203 // No benefit from the compact region, our fallback will be per-block 1204 // splitting. Make sure we find a solution that is cheaper than spilling. 1205 BestCost = calcSpillCost(); 1206 DEBUG(dbgs() << "Cost of isolating all blocks = "; 1207 MBFI->printBlockFreq(dbgs(), BestCost) << '\n'); 1208 } 1209 1210 Order.rewind(); 1211 while (unsigned PhysReg = Order.next()) { 1212 // Discard bad candidates before we run out of interference cache cursors. 1213 // This will only affect register classes with a lot of registers (>32). 1214 if (NumCands == IntfCache.getMaxCursors()) { 1215 unsigned WorstCount = ~0u; 1216 unsigned Worst = 0; 1217 for (unsigned i = 0; i != NumCands; ++i) { 1218 if (i == BestCand || !GlobalCand[i].PhysReg) 1219 continue; 1220 unsigned Count = GlobalCand[i].LiveBundles.count(); 1221 if (Count < WorstCount) 1222 Worst = i, WorstCount = Count; 1223 } 1224 --NumCands; 1225 GlobalCand[Worst] = GlobalCand[NumCands]; 1226 if (BestCand == NumCands) 1227 BestCand = Worst; 1228 } 1229 1230 if (GlobalCand.size() <= NumCands) 1231 GlobalCand.resize(NumCands+1); 1232 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1233 Cand.reset(IntfCache, PhysReg); 1234 1235 SpillPlacer->prepare(Cand.LiveBundles); 1236 BlockFrequency Cost; 1237 if (!addSplitConstraints(Cand.Intf, Cost)) { 1238 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1239 continue; 1240 } 1241 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = "; 1242 MBFI->printBlockFreq(dbgs(), Cost)); 1243 if (Cost >= BestCost) { 1244 DEBUG({ 1245 if (BestCand == NoCand) 1246 dbgs() << " worse than no bundles\n"; 1247 else 1248 dbgs() << " worse than " 1249 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1250 }); 1251 continue; 1252 } 1253 growRegion(Cand); 1254 1255 SpillPlacer->finish(); 1256 1257 // No live bundles, defer to splitSingleBlocks(). 1258 if (!Cand.LiveBundles.any()) { 1259 DEBUG(dbgs() << " no bundles.\n"); 1260 continue; 1261 } 1262 1263 Cost += calcGlobalSplitCost(Cand); 1264 DEBUG({ 1265 dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost) 1266 << " with bundles"; 1267 for (int i = Cand.LiveBundles.find_first(); i>=0; 1268 i = Cand.LiveBundles.find_next(i)) 1269 dbgs() << " EB#" << i; 1270 dbgs() << ".\n"; 1271 }); 1272 if (Cost < BestCost) { 1273 BestCand = NumCands; 1274 BestCost = Cost; 1275 } 1276 ++NumCands; 1277 } 1278 1279 // No solutions found, fall back to single block splitting. 1280 if (!HasCompact && BestCand == NoCand) 1281 return 0; 1282 1283 // Prepare split editor. 1284 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1285 SE->reset(LREdit, SplitSpillMode); 1286 1287 // Assign all edge bundles to the preferred candidate, or NoCand. 1288 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1289 1290 // Assign bundles for the best candidate region. 1291 if (BestCand != NoCand) { 1292 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1293 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1294 UsedCands.push_back(BestCand); 1295 Cand.IntvIdx = SE->openIntv(); 1296 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1297 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1298 (void)B; 1299 } 1300 } 1301 1302 // Assign bundles for the compact region. 1303 if (HasCompact) { 1304 GlobalSplitCandidate &Cand = GlobalCand.front(); 1305 assert(!Cand.PhysReg && "Compact region has no physreg"); 1306 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1307 UsedCands.push_back(0); 1308 Cand.IntvIdx = SE->openIntv(); 1309 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1310 << Cand.IntvIdx << ".\n"); 1311 (void)B; 1312 } 1313 } 1314 1315 splitAroundRegion(LREdit, UsedCands); 1316 return 0; 1317 } 1318 1319 1320 //===----------------------------------------------------------------------===// 1321 // Per-Block Splitting 1322 //===----------------------------------------------------------------------===// 1323 1324 /// tryBlockSplit - Split a global live range around every block with uses. This 1325 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1326 /// they don't allocate. 1327 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1328 SmallVectorImpl<unsigned> &NewVRegs) { 1329 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1330 unsigned Reg = VirtReg.reg; 1331 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1332 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1333 SE->reset(LREdit, SplitSpillMode); 1334 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1335 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1336 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1337 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1338 SE->splitSingleBlock(BI); 1339 } 1340 // No blocks were split. 1341 if (LREdit.empty()) 1342 return 0; 1343 1344 // We did split for some blocks. 1345 SmallVector<unsigned, 8> IntvMap; 1346 SE->finish(&IntvMap); 1347 1348 // Tell LiveDebugVariables about the new ranges. 1349 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1350 1351 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1352 1353 // Sort out the new intervals created by splitting. The remainder interval 1354 // goes straight to spilling, the new local ranges get to stay RS_New. 1355 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1356 LiveInterval &LI = LIS->getInterval(LREdit.get(i)); 1357 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1358 setStage(LI, RS_Spill); 1359 } 1360 1361 if (VerifyEnabled) 1362 MF->verify(this, "After splitting live range around basic blocks"); 1363 return 0; 1364 } 1365 1366 1367 //===----------------------------------------------------------------------===// 1368 // Per-Instruction Splitting 1369 //===----------------------------------------------------------------------===// 1370 1371 /// tryInstructionSplit - Split a live range around individual instructions. 1372 /// This is normally not worthwhile since the spiller is doing essentially the 1373 /// same thing. However, when the live range is in a constrained register 1374 /// class, it may help to insert copies such that parts of the live range can 1375 /// be moved to a larger register class. 1376 /// 1377 /// This is similar to spilling to a larger register class. 1378 unsigned 1379 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1380 SmallVectorImpl<unsigned> &NewVRegs) { 1381 // There is no point to this if there are no larger sub-classes. 1382 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1383 return 0; 1384 1385 // Always enable split spill mode, since we're effectively spilling to a 1386 // register. 1387 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1388 SE->reset(LREdit, SplitEditor::SM_Size); 1389 1390 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1391 if (Uses.size() <= 1) 1392 return 0; 1393 1394 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1395 1396 // Split around every non-copy instruction. 1397 for (unsigned i = 0; i != Uses.size(); ++i) { 1398 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1399 if (MI->isFullCopy()) { 1400 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1401 continue; 1402 } 1403 SE->openIntv(); 1404 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1405 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1406 SE->useIntv(SegStart, SegStop); 1407 } 1408 1409 if (LREdit.empty()) { 1410 DEBUG(dbgs() << "All uses were copies.\n"); 1411 return 0; 1412 } 1413 1414 SmallVector<unsigned, 8> IntvMap; 1415 SE->finish(&IntvMap); 1416 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1417 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1418 1419 // Assign all new registers to RS_Spill. This was the last chance. 1420 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1421 return 0; 1422 } 1423 1424 1425 //===----------------------------------------------------------------------===// 1426 // Local Splitting 1427 //===----------------------------------------------------------------------===// 1428 1429 1430 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1431 /// in order to use PhysReg between two entries in SA->UseSlots. 1432 /// 1433 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1434 /// 1435 void RAGreedy::calcGapWeights(unsigned PhysReg, 1436 SmallVectorImpl<float> &GapWeight) { 1437 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1438 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1439 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1440 const unsigned NumGaps = Uses.size()-1; 1441 1442 // Start and end points for the interference check. 1443 SlotIndex StartIdx = 1444 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1445 SlotIndex StopIdx = 1446 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1447 1448 GapWeight.assign(NumGaps, 0.0f); 1449 1450 // Add interference from each overlapping register. 1451 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1452 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1453 .checkInterference()) 1454 continue; 1455 1456 // We know that VirtReg is a continuous interval from FirstInstr to 1457 // LastInstr, so we don't need InterferenceQuery. 1458 // 1459 // Interference that overlaps an instruction is counted in both gaps 1460 // surrounding the instruction. The exception is interference before 1461 // StartIdx and after StopIdx. 1462 // 1463 LiveIntervalUnion::SegmentIter IntI = 1464 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1465 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1466 // Skip the gaps before IntI. 1467 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1468 if (++Gap == NumGaps) 1469 break; 1470 if (Gap == NumGaps) 1471 break; 1472 1473 // Update the gaps covered by IntI. 1474 const float weight = IntI.value()->weight; 1475 for (; Gap != NumGaps; ++Gap) { 1476 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1477 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1478 break; 1479 } 1480 if (Gap == NumGaps) 1481 break; 1482 } 1483 } 1484 1485 // Add fixed interference. 1486 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1487 const LiveRange &LR = LIS->getRegUnit(*Units); 1488 LiveRange::const_iterator I = LR.find(StartIdx); 1489 LiveRange::const_iterator E = LR.end(); 1490 1491 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1492 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1493 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1494 if (++Gap == NumGaps) 1495 break; 1496 if (Gap == NumGaps) 1497 break; 1498 1499 for (; Gap != NumGaps; ++Gap) { 1500 GapWeight[Gap] = llvm::huge_valf; 1501 if (Uses[Gap+1].getBaseIndex() >= I->end) 1502 break; 1503 } 1504 if (Gap == NumGaps) 1505 break; 1506 } 1507 } 1508 } 1509 1510 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1511 /// basic block. 1512 /// 1513 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1514 SmallVectorImpl<unsigned> &NewVRegs) { 1515 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1516 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1517 1518 // Note that it is possible to have an interval that is live-in or live-out 1519 // while only covering a single block - A phi-def can use undef values from 1520 // predecessors, and the block could be a single-block loop. 1521 // We don't bother doing anything clever about such a case, we simply assume 1522 // that the interval is continuous from FirstInstr to LastInstr. We should 1523 // make sure that we don't do anything illegal to such an interval, though. 1524 1525 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1526 if (Uses.size() <= 2) 1527 return 0; 1528 const unsigned NumGaps = Uses.size()-1; 1529 1530 DEBUG({ 1531 dbgs() << "tryLocalSplit: "; 1532 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1533 dbgs() << ' ' << Uses[i]; 1534 dbgs() << '\n'; 1535 }); 1536 1537 // If VirtReg is live across any register mask operands, compute a list of 1538 // gaps with register masks. 1539 SmallVector<unsigned, 8> RegMaskGaps; 1540 if (Matrix->checkRegMaskInterference(VirtReg)) { 1541 // Get regmask slots for the whole block. 1542 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1543 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1544 // Constrain to VirtReg's live range. 1545 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1546 Uses.front().getRegSlot()) - RMS.begin(); 1547 unsigned re = RMS.size(); 1548 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1549 // Look for Uses[i] <= RMS <= Uses[i+1]. 1550 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1551 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1552 continue; 1553 // Skip a regmask on the same instruction as the last use. It doesn't 1554 // overlap the live range. 1555 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1556 break; 1557 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1558 RegMaskGaps.push_back(i); 1559 // Advance ri to the next gap. A regmask on one of the uses counts in 1560 // both gaps. 1561 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1562 ++ri; 1563 } 1564 DEBUG(dbgs() << '\n'); 1565 } 1566 1567 // Since we allow local split results to be split again, there is a risk of 1568 // creating infinite loops. It is tempting to require that the new live 1569 // ranges have less instructions than the original. That would guarantee 1570 // convergence, but it is too strict. A live range with 3 instructions can be 1571 // split 2+3 (including the COPY), and we want to allow that. 1572 // 1573 // Instead we use these rules: 1574 // 1575 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1576 // noop split, of course). 1577 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1578 // the new ranges must have fewer instructions than before the split. 1579 // 3. New ranges with the same number of instructions are marked RS_Split2, 1580 // smaller ranges are marked RS_New. 1581 // 1582 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1583 // excessive splitting and infinite loops. 1584 // 1585 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1586 1587 // Best split candidate. 1588 unsigned BestBefore = NumGaps; 1589 unsigned BestAfter = 0; 1590 float BestDiff = 0; 1591 1592 const float blockFreq = 1593 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 1594 (1.0f / MBFI->getEntryFreq()); 1595 SmallVector<float, 8> GapWeight; 1596 1597 Order.rewind(); 1598 while (unsigned PhysReg = Order.next()) { 1599 // Keep track of the largest spill weight that would need to be evicted in 1600 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1601 calcGapWeights(PhysReg, GapWeight); 1602 1603 // Remove any gaps with regmask clobbers. 1604 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1605 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1606 GapWeight[RegMaskGaps[i]] = llvm::huge_valf; 1607 1608 // Try to find the best sequence of gaps to close. 1609 // The new spill weight must be larger than any gap interference. 1610 1611 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1612 unsigned SplitBefore = 0, SplitAfter = 1; 1613 1614 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1615 // It is the spill weight that needs to be evicted. 1616 float MaxGap = GapWeight[0]; 1617 1618 for (;;) { 1619 // Live before/after split? 1620 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1621 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1622 1623 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1624 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1625 << " i=" << MaxGap); 1626 1627 // Stop before the interval gets so big we wouldn't be making progress. 1628 if (!LiveBefore && !LiveAfter) { 1629 DEBUG(dbgs() << " all\n"); 1630 break; 1631 } 1632 // Should the interval be extended or shrunk? 1633 bool Shrink = true; 1634 1635 // How many gaps would the new range have? 1636 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1637 1638 // Legally, without causing looping? 1639 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1640 1641 if (Legal && MaxGap < llvm::huge_valf) { 1642 // Estimate the new spill weight. Each instruction reads or writes the 1643 // register. Conservatively assume there are no read-modify-write 1644 // instructions. 1645 // 1646 // Try to guess the size of the new interval. 1647 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1648 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1649 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1650 // Would this split be possible to allocate? 1651 // Never allocate all gaps, we wouldn't be making progress. 1652 DEBUG(dbgs() << " w=" << EstWeight); 1653 if (EstWeight * Hysteresis >= MaxGap) { 1654 Shrink = false; 1655 float Diff = EstWeight - MaxGap; 1656 if (Diff > BestDiff) { 1657 DEBUG(dbgs() << " (best)"); 1658 BestDiff = Hysteresis * Diff; 1659 BestBefore = SplitBefore; 1660 BestAfter = SplitAfter; 1661 } 1662 } 1663 } 1664 1665 // Try to shrink. 1666 if (Shrink) { 1667 if (++SplitBefore < SplitAfter) { 1668 DEBUG(dbgs() << " shrink\n"); 1669 // Recompute the max when necessary. 1670 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1671 MaxGap = GapWeight[SplitBefore]; 1672 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1673 MaxGap = std::max(MaxGap, GapWeight[i]); 1674 } 1675 continue; 1676 } 1677 MaxGap = 0; 1678 } 1679 1680 // Try to extend the interval. 1681 if (SplitAfter >= NumGaps) { 1682 DEBUG(dbgs() << " end\n"); 1683 break; 1684 } 1685 1686 DEBUG(dbgs() << " extend\n"); 1687 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1688 } 1689 } 1690 1691 // Didn't find any candidates? 1692 if (BestBefore == NumGaps) 1693 return 0; 1694 1695 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1696 << '-' << Uses[BestAfter] << ", " << BestDiff 1697 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1698 1699 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1700 SE->reset(LREdit); 1701 1702 SE->openIntv(); 1703 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1704 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1705 SE->useIntv(SegStart, SegStop); 1706 SmallVector<unsigned, 8> IntvMap; 1707 SE->finish(&IntvMap); 1708 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1709 1710 // If the new range has the same number of instructions as before, mark it as 1711 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1712 // leave the new intervals as RS_New so they can compete. 1713 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1714 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1715 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1716 if (NewGaps >= NumGaps) { 1717 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1718 assert(!ProgressRequired && "Didn't make progress when it was required."); 1719 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1720 if (IntvMap[i] == 1) { 1721 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2); 1722 DEBUG(dbgs() << PrintReg(LREdit.get(i))); 1723 } 1724 DEBUG(dbgs() << '\n'); 1725 } 1726 ++NumLocalSplits; 1727 1728 return 0; 1729 } 1730 1731 //===----------------------------------------------------------------------===// 1732 // Live Range Splitting 1733 //===----------------------------------------------------------------------===// 1734 1735 /// trySplit - Try to split VirtReg or one of its interferences, making it 1736 /// assignable. 1737 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1738 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1739 SmallVectorImpl<unsigned>&NewVRegs) { 1740 // Ranges must be Split2 or less. 1741 if (getStage(VirtReg) >= RS_Spill) 1742 return 0; 1743 1744 // Local intervals are handled separately. 1745 if (LIS->intervalIsInOneMBB(VirtReg)) { 1746 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1747 SA->analyze(&VirtReg); 1748 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1749 if (PhysReg || !NewVRegs.empty()) 1750 return PhysReg; 1751 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1752 } 1753 1754 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1755 1756 SA->analyze(&VirtReg); 1757 1758 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1759 // coalescer. That may cause the range to become allocatable which means that 1760 // tryRegionSplit won't be making progress. This check should be replaced with 1761 // an assertion when the coalescer is fixed. 1762 if (SA->didRepairRange()) { 1763 // VirtReg has changed, so all cached queries are invalid. 1764 Matrix->invalidateVirtRegs(); 1765 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1766 return PhysReg; 1767 } 1768 1769 // First try to split around a region spanning multiple blocks. RS_Split2 1770 // ranges already made dubious progress with region splitting, so they go 1771 // straight to single block splitting. 1772 if (getStage(VirtReg) < RS_Split2) { 1773 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1774 if (PhysReg || !NewVRegs.empty()) 1775 return PhysReg; 1776 } 1777 1778 // Then isolate blocks. 1779 return tryBlockSplit(VirtReg, Order, NewVRegs); 1780 } 1781 1782 1783 //===----------------------------------------------------------------------===// 1784 // Main Entry Point 1785 //===----------------------------------------------------------------------===// 1786 1787 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1788 SmallVectorImpl<unsigned> &NewVRegs) { 1789 // First try assigning a free register. 1790 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1791 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1792 return PhysReg; 1793 1794 LiveRangeStage Stage = getStage(VirtReg); 1795 DEBUG(dbgs() << StageName[Stage] 1796 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1797 1798 // Try to evict a less worthy live range, but only for ranges from the primary 1799 // queue. The RS_Split ranges already failed to do this, and they should not 1800 // get a second chance until they have been split. 1801 if (Stage != RS_Split) 1802 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1803 return PhysReg; 1804 1805 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1806 1807 // The first time we see a live range, don't try to split or spill. 1808 // Wait until the second time, when all smaller ranges have been allocated. 1809 // This gives a better picture of the interference to split around. 1810 if (Stage < RS_Split) { 1811 setStage(VirtReg, RS_Split); 1812 DEBUG(dbgs() << "wait for second round\n"); 1813 NewVRegs.push_back(VirtReg.reg); 1814 return 0; 1815 } 1816 1817 // If we couldn't allocate a register from spilling, there is probably some 1818 // invalid inline assembly. The base class wil report it. 1819 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1820 return ~0u; 1821 1822 // Try splitting VirtReg or interferences. 1823 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1824 if (PhysReg || !NewVRegs.empty()) 1825 return PhysReg; 1826 1827 // Finally spill VirtReg itself. 1828 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1829 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1830 spiller().spill(LRE); 1831 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1832 1833 if (VerifyEnabled) 1834 MF->verify(this, "After spilling"); 1835 1836 // The live virtual register requesting allocation was spilled, so tell 1837 // the caller not to allocate anything during this round. 1838 return 0; 1839 } 1840 1841 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1842 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1843 << "********** Function: " << mf.getName() << '\n'); 1844 1845 MF = &mf; 1846 if (VerifyEnabled) 1847 MF->verify(this, "Before greedy register allocator"); 1848 1849 RegAllocBase::init(getAnalysis<VirtRegMap>(), 1850 getAnalysis<LiveIntervals>(), 1851 getAnalysis<LiveRegMatrix>()); 1852 Indexes = &getAnalysis<SlotIndexes>(); 1853 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 1854 DomTree = &getAnalysis<MachineDominatorTree>(); 1855 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1856 Loops = &getAnalysis<MachineLoopInfo>(); 1857 Bundles = &getAnalysis<EdgeBundles>(); 1858 SpillPlacer = &getAnalysis<SpillPlacement>(); 1859 DebugVars = &getAnalysis<LiveDebugVariables>(); 1860 1861 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI); 1862 1863 DEBUG(LIS->dump()); 1864 1865 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1866 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI)); 1867 ExtraRegInfo.clear(); 1868 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1869 NextCascade = 1; 1870 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 1871 GlobalCand.resize(32); // This will grow as needed. 1872 1873 allocatePhysRegs(); 1874 releaseMemory(); 1875 return true; 1876 } 1877