1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "llvm/CodeGen/Passes.h" 17 #include "AllocationOrder.h" 18 #include "InterferenceCache.h" 19 #include "LiveDebugVariables.h" 20 #include "RegAllocBase.h" 21 #include "SpillPlacement.h" 22 #include "Spiller.h" 23 #include "SplitKit.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/CodeGen/CalcSpillWeights.h" 27 #include "llvm/CodeGen/EdgeBundles.h" 28 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 29 #include "llvm/CodeGen/LiveRangeEdit.h" 30 #include "llvm/CodeGen/LiveRegMatrix.h" 31 #include "llvm/CodeGen/LiveStackAnalysis.h" 32 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/RegAllocRegistry.h" 38 #include "llvm/CodeGen/VirtRegMap.h" 39 #include "llvm/PassAnalysisSupport.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/Timer.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include <queue> 46 47 using namespace llvm; 48 49 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 50 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 51 STATISTIC(NumEvicted, "Number of interferences evicted"); 52 53 static cl::opt<SplitEditor::ComplementSpillMode> 54 SplitSpillMode("split-spill-mode", cl::Hidden, 55 cl::desc("Spill mode for splitting live ranges"), 56 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 57 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 58 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 59 clEnumValEnd), 60 cl::init(SplitEditor::SM_Partition)); 61 62 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 63 createGreedyRegisterAllocator); 64 65 namespace { 66 class RAGreedy : public MachineFunctionPass, 67 public RegAllocBase, 68 private LiveRangeEdit::Delegate { 69 70 // context 71 MachineFunction *MF; 72 73 // analyses 74 SlotIndexes *Indexes; 75 MachineBlockFrequencyInfo *MBFI; 76 MachineDominatorTree *DomTree; 77 MachineLoopInfo *Loops; 78 EdgeBundles *Bundles; 79 SpillPlacement *SpillPlacer; 80 LiveDebugVariables *DebugVars; 81 82 // state 83 OwningPtr<Spiller> SpillerInstance; 84 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 85 unsigned NextCascade; 86 87 // Live ranges pass through a number of stages as we try to allocate them. 88 // Some of the stages may also create new live ranges: 89 // 90 // - Region splitting. 91 // - Per-block splitting. 92 // - Local splitting. 93 // - Spilling. 94 // 95 // Ranges produced by one of the stages skip the previous stages when they are 96 // dequeued. This improves performance because we can skip interference checks 97 // that are unlikely to give any results. It also guarantees that the live 98 // range splitting algorithm terminates, something that is otherwise hard to 99 // ensure. 100 enum LiveRangeStage { 101 /// Newly created live range that has never been queued. 102 RS_New, 103 104 /// Only attempt assignment and eviction. Then requeue as RS_Split. 105 RS_Assign, 106 107 /// Attempt live range splitting if assignment is impossible. 108 RS_Split, 109 110 /// Attempt more aggressive live range splitting that is guaranteed to make 111 /// progress. This is used for split products that may not be making 112 /// progress. 113 RS_Split2, 114 115 /// Live range will be spilled. No more splitting will be attempted. 116 RS_Spill, 117 118 /// There is nothing more we can do to this live range. Abort compilation 119 /// if it can't be assigned. 120 RS_Done 121 }; 122 123 #ifndef NDEBUG 124 static const char *const StageName[]; 125 #endif 126 127 // RegInfo - Keep additional information about each live range. 128 struct RegInfo { 129 LiveRangeStage Stage; 130 131 // Cascade - Eviction loop prevention. See canEvictInterference(). 132 unsigned Cascade; 133 134 RegInfo() : Stage(RS_New), Cascade(0) {} 135 }; 136 137 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 138 139 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 140 return ExtraRegInfo[VirtReg.reg].Stage; 141 } 142 143 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 144 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 145 ExtraRegInfo[VirtReg.reg].Stage = Stage; 146 } 147 148 template<typename Iterator> 149 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 150 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 151 for (;Begin != End; ++Begin) { 152 unsigned Reg = *Begin; 153 if (ExtraRegInfo[Reg].Stage == RS_New) 154 ExtraRegInfo[Reg].Stage = NewStage; 155 } 156 } 157 158 /// Cost of evicting interference. 159 struct EvictionCost { 160 unsigned BrokenHints; ///< Total number of broken hints. 161 float MaxWeight; ///< Maximum spill weight evicted. 162 163 EvictionCost(): BrokenHints(0), MaxWeight(0) {} 164 165 bool isMax() const { return BrokenHints == ~0u; } 166 167 void setMax() { BrokenHints = ~0u; } 168 169 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } 170 171 bool operator<(const EvictionCost &O) const { 172 if (BrokenHints != O.BrokenHints) 173 return BrokenHints < O.BrokenHints; 174 return MaxWeight < O.MaxWeight; 175 } 176 }; 177 178 // splitting state. 179 OwningPtr<SplitAnalysis> SA; 180 OwningPtr<SplitEditor> SE; 181 182 /// Cached per-block interference maps 183 InterferenceCache IntfCache; 184 185 /// All basic blocks where the current register has uses. 186 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 187 188 /// Global live range splitting candidate info. 189 struct GlobalSplitCandidate { 190 // Register intended for assignment, or 0. 191 unsigned PhysReg; 192 193 // SplitKit interval index for this candidate. 194 unsigned IntvIdx; 195 196 // Interference for PhysReg. 197 InterferenceCache::Cursor Intf; 198 199 // Bundles where this candidate should be live. 200 BitVector LiveBundles; 201 SmallVector<unsigned, 8> ActiveBlocks; 202 203 void reset(InterferenceCache &Cache, unsigned Reg) { 204 PhysReg = Reg; 205 IntvIdx = 0; 206 Intf.setPhysReg(Cache, Reg); 207 LiveBundles.clear(); 208 ActiveBlocks.clear(); 209 } 210 211 // Set B[i] = C for every live bundle where B[i] was NoCand. 212 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 213 unsigned Count = 0; 214 for (int i = LiveBundles.find_first(); i >= 0; 215 i = LiveBundles.find_next(i)) 216 if (B[i] == NoCand) { 217 B[i] = C; 218 Count++; 219 } 220 return Count; 221 } 222 }; 223 224 /// Candidate info for each PhysReg in AllocationOrder. 225 /// This vector never shrinks, but grows to the size of the largest register 226 /// class. 227 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 228 229 enum LLVM_ENUM_INT_TYPE(unsigned) { NoCand = ~0u }; 230 231 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 232 /// NoCand which indicates the stack interval. 233 SmallVector<unsigned, 32> BundleCand; 234 235 public: 236 RAGreedy(); 237 238 /// Return the pass name. 239 virtual const char* getPassName() const { 240 return "Greedy Register Allocator"; 241 } 242 243 /// RAGreedy analysis usage. 244 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 245 virtual void releaseMemory(); 246 virtual Spiller &spiller() { return *SpillerInstance; } 247 virtual void enqueue(LiveInterval *LI); 248 virtual LiveInterval *dequeue(); 249 virtual unsigned selectOrSplit(LiveInterval&, 250 SmallVectorImpl<unsigned>&); 251 252 /// Perform register allocation. 253 virtual bool runOnMachineFunction(MachineFunction &mf); 254 255 static char ID; 256 257 private: 258 bool LRE_CanEraseVirtReg(unsigned); 259 void LRE_WillShrinkVirtReg(unsigned); 260 void LRE_DidCloneVirtReg(unsigned, unsigned); 261 262 BlockFrequency calcSpillCost(); 263 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); 264 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 265 void growRegion(GlobalSplitCandidate &Cand); 266 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&); 267 bool calcCompactRegion(GlobalSplitCandidate&); 268 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 269 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 270 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg); 271 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 272 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 273 void evictInterference(LiveInterval&, unsigned, 274 SmallVectorImpl<unsigned>&); 275 276 unsigned tryAssign(LiveInterval&, AllocationOrder&, 277 SmallVectorImpl<unsigned>&); 278 unsigned tryEvict(LiveInterval&, AllocationOrder&, 279 SmallVectorImpl<unsigned>&, unsigned = ~0u); 280 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 281 SmallVectorImpl<unsigned>&); 282 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 283 SmallVectorImpl<unsigned>&); 284 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 285 SmallVectorImpl<unsigned>&); 286 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 287 SmallVectorImpl<unsigned>&); 288 unsigned trySplit(LiveInterval&, AllocationOrder&, 289 SmallVectorImpl<unsigned>&); 290 }; 291 } // end anonymous namespace 292 293 char RAGreedy::ID = 0; 294 295 #ifndef NDEBUG 296 const char *const RAGreedy::StageName[] = { 297 "RS_New", 298 "RS_Assign", 299 "RS_Split", 300 "RS_Split2", 301 "RS_Spill", 302 "RS_Done" 303 }; 304 #endif 305 306 // Hysteresis to use when comparing floats. 307 // This helps stabilize decisions based on float comparisons. 308 const float Hysteresis = 0.98f; 309 310 311 FunctionPass* llvm::createGreedyRegisterAllocator() { 312 return new RAGreedy(); 313 } 314 315 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 316 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 317 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 318 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 319 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 320 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 321 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 322 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 323 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 324 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 325 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 326 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry()); 327 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 328 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 329 } 330 331 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 332 AU.setPreservesCFG(); 333 AU.addRequired<MachineBlockFrequencyInfo>(); 334 AU.addPreserved<MachineBlockFrequencyInfo>(); 335 AU.addRequired<AliasAnalysis>(); 336 AU.addPreserved<AliasAnalysis>(); 337 AU.addRequired<LiveIntervals>(); 338 AU.addPreserved<LiveIntervals>(); 339 AU.addRequired<SlotIndexes>(); 340 AU.addPreserved<SlotIndexes>(); 341 AU.addRequired<LiveDebugVariables>(); 342 AU.addPreserved<LiveDebugVariables>(); 343 AU.addRequired<LiveStacks>(); 344 AU.addPreserved<LiveStacks>(); 345 AU.addRequired<MachineDominatorTree>(); 346 AU.addPreserved<MachineDominatorTree>(); 347 AU.addRequired<MachineLoopInfo>(); 348 AU.addPreserved<MachineLoopInfo>(); 349 AU.addRequired<VirtRegMap>(); 350 AU.addPreserved<VirtRegMap>(); 351 AU.addRequired<LiveRegMatrix>(); 352 AU.addPreserved<LiveRegMatrix>(); 353 AU.addRequired<EdgeBundles>(); 354 AU.addRequired<SpillPlacement>(); 355 MachineFunctionPass::getAnalysisUsage(AU); 356 } 357 358 359 //===----------------------------------------------------------------------===// 360 // LiveRangeEdit delegate methods 361 //===----------------------------------------------------------------------===// 362 363 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 364 if (VRM->hasPhys(VirtReg)) { 365 Matrix->unassign(LIS->getInterval(VirtReg)); 366 return true; 367 } 368 // Unassigned virtreg is probably in the priority queue. 369 // RegAllocBase will erase it after dequeueing. 370 return false; 371 } 372 373 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 374 if (!VRM->hasPhys(VirtReg)) 375 return; 376 377 // Register is assigned, put it back on the queue for reassignment. 378 LiveInterval &LI = LIS->getInterval(VirtReg); 379 Matrix->unassign(LI); 380 enqueue(&LI); 381 } 382 383 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 384 // Cloning a register we haven't even heard about yet? Just ignore it. 385 if (!ExtraRegInfo.inBounds(Old)) 386 return; 387 388 // LRE may clone a virtual register because dead code elimination causes it to 389 // be split into connected components. The new components are much smaller 390 // than the original, so they should get a new chance at being assigned. 391 // same stage as the parent. 392 ExtraRegInfo[Old].Stage = RS_Assign; 393 ExtraRegInfo.grow(New); 394 ExtraRegInfo[New] = ExtraRegInfo[Old]; 395 } 396 397 void RAGreedy::releaseMemory() { 398 SpillerInstance.reset(0); 399 ExtraRegInfo.clear(); 400 GlobalCand.clear(); 401 } 402 403 void RAGreedy::enqueue(LiveInterval *LI) { 404 // Prioritize live ranges by size, assigning larger ranges first. 405 // The queue holds (size, reg) pairs. 406 const unsigned Size = LI->getSize(); 407 const unsigned Reg = LI->reg; 408 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 409 "Can only enqueue virtual registers"); 410 unsigned Prio; 411 412 ExtraRegInfo.grow(Reg); 413 if (ExtraRegInfo[Reg].Stage == RS_New) 414 ExtraRegInfo[Reg].Stage = RS_Assign; 415 416 if (ExtraRegInfo[Reg].Stage == RS_Split) { 417 // Unsplit ranges that couldn't be allocated immediately are deferred until 418 // everything else has been allocated. 419 Prio = Size; 420 } else { 421 if (ExtraRegInfo[Reg].Stage == RS_Assign && !LI->empty() && 422 LIS->intervalIsInOneMBB(*LI)) { 423 // Allocate original local ranges in linear instruction order. Since they 424 // are singly defined, this produces optimal coloring in the absence of 425 // global interference and other constraints. 426 if (!TRI->reverseLocalAssignment()) 427 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 428 else { 429 // Allocating bottom up may allow many short LRGs to be assigned first 430 // to one of the cheap registers. This could be much faster for very 431 // large blocks on targets with many physical registers. 432 Prio = Indexes->getZeroIndex().getInstrDistance(LI->beginIndex()); 433 } 434 } 435 else { 436 // Allocate global and split ranges in long->short order. Long ranges that 437 // don't fit should be spilled (or split) ASAP so they don't create 438 // interference. Mark a bit to prioritize global above local ranges. 439 Prio = (1u << 29) + Size; 440 } 441 // Mark a higher bit to prioritize global and local above RS_Split. 442 Prio |= (1u << 31); 443 444 // Boost ranges that have a physical register hint. 445 if (VRM->hasKnownPreference(Reg)) 446 Prio |= (1u << 30); 447 } 448 // The virtual register number is a tie breaker for same-sized ranges. 449 // Give lower vreg numbers higher priority to assign them first. 450 Queue.push(std::make_pair(Prio, ~Reg)); 451 } 452 453 LiveInterval *RAGreedy::dequeue() { 454 if (Queue.empty()) 455 return 0; 456 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 457 Queue.pop(); 458 return LI; 459 } 460 461 462 //===----------------------------------------------------------------------===// 463 // Direct Assignment 464 //===----------------------------------------------------------------------===// 465 466 /// tryAssign - Try to assign VirtReg to an available register. 467 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 468 AllocationOrder &Order, 469 SmallVectorImpl<unsigned> &NewVRegs) { 470 Order.rewind(); 471 unsigned PhysReg; 472 while ((PhysReg = Order.next())) 473 if (!Matrix->checkInterference(VirtReg, PhysReg)) 474 break; 475 if (!PhysReg || Order.isHint()) 476 return PhysReg; 477 478 // PhysReg is available, but there may be a better choice. 479 480 // If we missed a simple hint, try to cheaply evict interference from the 481 // preferred register. 482 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 483 if (Order.isHint(Hint)) { 484 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 485 EvictionCost MaxCost; 486 MaxCost.setBrokenHints(1); 487 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 488 evictInterference(VirtReg, Hint, NewVRegs); 489 return Hint; 490 } 491 } 492 493 // Try to evict interference from a cheaper alternative. 494 unsigned Cost = TRI->getCostPerUse(PhysReg); 495 496 // Most registers have 0 additional cost. 497 if (!Cost) 498 return PhysReg; 499 500 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 501 << '\n'); 502 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 503 return CheapReg ? CheapReg : PhysReg; 504 } 505 506 507 //===----------------------------------------------------------------------===// 508 // Interference eviction 509 //===----------------------------------------------------------------------===// 510 511 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) { 512 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 513 unsigned PhysReg; 514 while ((PhysReg = Order.next())) { 515 if (PhysReg == PrevReg) 516 continue; 517 518 MCRegUnitIterator Units(PhysReg, TRI); 519 for (; Units.isValid(); ++Units) { 520 // Instantiate a "subquery", not to be confused with the Queries array. 521 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]); 522 if (subQ.checkInterference()) 523 break; 524 } 525 // If no units have interference, break out with the current PhysReg. 526 if (!Units.isValid()) 527 break; 528 } 529 if (PhysReg) 530 DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 531 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI) 532 << '\n'); 533 return PhysReg; 534 } 535 536 /// shouldEvict - determine if A should evict the assigned live range B. The 537 /// eviction policy defined by this function together with the allocation order 538 /// defined by enqueue() decides which registers ultimately end up being split 539 /// and spilled. 540 /// 541 /// Cascade numbers are used to prevent infinite loops if this function is a 542 /// cyclic relation. 543 /// 544 /// @param A The live range to be assigned. 545 /// @param IsHint True when A is about to be assigned to its preferred 546 /// register. 547 /// @param B The live range to be evicted. 548 /// @param BreaksHint True when B is already assigned to its preferred register. 549 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 550 LiveInterval &B, bool BreaksHint) { 551 bool CanSplit = getStage(B) < RS_Spill; 552 553 // Be fairly aggressive about following hints as long as the evictee can be 554 // split. 555 if (CanSplit && IsHint && !BreaksHint) 556 return true; 557 558 if (A.weight > B.weight) { 559 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n'); 560 return true; 561 } 562 return false; 563 } 564 565 /// canEvictInterference - Return true if all interferences between VirtReg and 566 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 567 /// 568 /// @param VirtReg Live range that is about to be assigned. 569 /// @param PhysReg Desired register for assignment. 570 /// @param IsHint True when PhysReg is VirtReg's preferred register. 571 /// @param MaxCost Only look for cheaper candidates and update with new cost 572 /// when returning true. 573 /// @returns True when interference can be evicted cheaper than MaxCost. 574 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 575 bool IsHint, EvictionCost &MaxCost) { 576 // It is only possible to evict virtual register interference. 577 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 578 return false; 579 580 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); 581 582 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 583 // involved in an eviction before. If a cascade number was assigned, deny 584 // evicting anything with the same or a newer cascade number. This prevents 585 // infinite eviction loops. 586 // 587 // This works out so a register without a cascade number is allowed to evict 588 // anything, and it can be evicted by anything. 589 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 590 if (!Cascade) 591 Cascade = NextCascade; 592 593 EvictionCost Cost; 594 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 595 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 596 // If there is 10 or more interferences, chances are one is heavier. 597 if (Q.collectInterferingVRegs(10) >= 10) 598 return false; 599 600 // Check if any interfering live range is heavier than MaxWeight. 601 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 602 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 603 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 604 "Only expecting virtual register interference from query"); 605 // Never evict spill products. They cannot split or spill. 606 if (getStage(*Intf) == RS_Done) 607 return false; 608 // Once a live range becomes small enough, it is urgent that we find a 609 // register for it. This is indicated by an infinite spill weight. These 610 // urgent live ranges get to evict almost anything. 611 // 612 // Also allow urgent evictions of unspillable ranges from a strictly 613 // larger allocation order. 614 bool Urgent = !VirtReg.isSpillable() && 615 (Intf->isSpillable() || 616 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 617 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 618 // Only evict older cascades or live ranges without a cascade. 619 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 620 if (Cascade <= IntfCascade) { 621 if (!Urgent) 622 return false; 623 // We permit breaking cascades for urgent evictions. It should be the 624 // last resort, though, so make it really expensive. 625 Cost.BrokenHints += 10; 626 } 627 // Would this break a satisfied hint? 628 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 629 // Update eviction cost. 630 Cost.BrokenHints += BreaksHint; 631 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 632 // Abort if this would be too expensive. 633 if (!(Cost < MaxCost)) 634 return false; 635 if (Urgent) 636 continue; 637 // Apply the eviction policy for non-urgent evictions. 638 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 639 return false; 640 // If !MaxCost.isMax(), then we're just looking for a cheap register. 641 // Evicting another local live range in this case could lead to suboptimal 642 // coloring. 643 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && 644 !canReassign(*Intf, PhysReg)) { 645 return false; 646 } 647 } 648 } 649 MaxCost = Cost; 650 return true; 651 } 652 653 /// evictInterference - Evict any interferring registers that prevent VirtReg 654 /// from being assigned to Physreg. This assumes that canEvictInterference 655 /// returned true. 656 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 657 SmallVectorImpl<unsigned> &NewVRegs) { 658 // Make sure that VirtReg has a cascade number, and assign that cascade 659 // number to every evicted register. These live ranges than then only be 660 // evicted by a newer cascade, preventing infinite loops. 661 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 662 if (!Cascade) 663 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 664 665 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 666 << " interference: Cascade " << Cascade << '\n'); 667 668 // Collect all interfering virtregs first. 669 SmallVector<LiveInterval*, 8> Intfs; 670 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 671 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 672 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 673 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 674 Intfs.append(IVR.begin(), IVR.end()); 675 } 676 677 // Evict them second. This will invalidate the queries. 678 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 679 LiveInterval *Intf = Intfs[i]; 680 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 681 if (!VRM->hasPhys(Intf->reg)) 682 continue; 683 Matrix->unassign(*Intf); 684 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 685 VirtReg.isSpillable() < Intf->isSpillable()) && 686 "Cannot decrease cascade number, illegal eviction"); 687 ExtraRegInfo[Intf->reg].Cascade = Cascade; 688 ++NumEvicted; 689 NewVRegs.push_back(Intf->reg); 690 } 691 } 692 693 /// tryEvict - Try to evict all interferences for a physreg. 694 /// @param VirtReg Currently unassigned virtual register. 695 /// @param Order Physregs to try. 696 /// @return Physreg to assign VirtReg, or 0. 697 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 698 AllocationOrder &Order, 699 SmallVectorImpl<unsigned> &NewVRegs, 700 unsigned CostPerUseLimit) { 701 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 702 703 // Keep track of the cheapest interference seen so far. 704 EvictionCost BestCost; 705 BestCost.setMax(); 706 unsigned BestPhys = 0; 707 unsigned OrderLimit = Order.getOrder().size(); 708 709 // When we are just looking for a reduced cost per use, don't break any 710 // hints, and only evict smaller spill weights. 711 if (CostPerUseLimit < ~0u) { 712 BestCost.BrokenHints = 0; 713 BestCost.MaxWeight = VirtReg.weight; 714 715 // Check of any registers in RC are below CostPerUseLimit. 716 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg); 717 unsigned MinCost = RegClassInfo.getMinCost(RC); 718 if (MinCost >= CostPerUseLimit) { 719 DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost 720 << ", no cheaper registers to be found.\n"); 721 return 0; 722 } 723 724 // It is normal for register classes to have a long tail of registers with 725 // the same cost. We don't need to look at them if they're too expensive. 726 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) { 727 OrderLimit = RegClassInfo.getLastCostChange(RC); 728 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n"); 729 } 730 } 731 732 Order.rewind(); 733 while (unsigned PhysReg = Order.next(OrderLimit)) { 734 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 735 continue; 736 // The first use of a callee-saved register in a function has cost 1. 737 // Don't start using a CSR when the CostPerUseLimit is low. 738 if (CostPerUseLimit == 1) 739 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 740 if (!MRI->isPhysRegUsed(CSR)) { 741 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 742 << PrintReg(CSR, TRI) << '\n'); 743 continue; 744 } 745 746 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 747 continue; 748 749 // Best so far. 750 BestPhys = PhysReg; 751 752 // Stop if the hint can be used. 753 if (Order.isHint()) 754 break; 755 } 756 757 if (!BestPhys) 758 return 0; 759 760 evictInterference(VirtReg, BestPhys, NewVRegs); 761 return BestPhys; 762 } 763 764 765 //===----------------------------------------------------------------------===// 766 // Region Splitting 767 //===----------------------------------------------------------------------===// 768 769 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 770 /// interference pattern in Physreg and its aliases. Add the constraints to 771 /// SpillPlacement and return the static cost of this split in Cost, assuming 772 /// that all preferences in SplitConstraints are met. 773 /// Return false if there are no bundles with positive bias. 774 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 775 BlockFrequency &Cost) { 776 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 777 778 // Reset interference dependent info. 779 SplitConstraints.resize(UseBlocks.size()); 780 BlockFrequency StaticCost = 0; 781 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 782 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 783 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 784 785 BC.Number = BI.MBB->getNumber(); 786 Intf.moveToBlock(BC.Number); 787 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 788 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 789 BC.ChangesValue = BI.FirstDef.isValid(); 790 791 if (!Intf.hasInterference()) 792 continue; 793 794 // Number of spill code instructions to insert. 795 unsigned Ins = 0; 796 797 // Interference for the live-in value. 798 if (BI.LiveIn) { 799 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 800 BC.Entry = SpillPlacement::MustSpill, ++Ins; 801 else if (Intf.first() < BI.FirstInstr) 802 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 803 else if (Intf.first() < BI.LastInstr) 804 ++Ins; 805 } 806 807 // Interference for the live-out value. 808 if (BI.LiveOut) { 809 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 810 BC.Exit = SpillPlacement::MustSpill, ++Ins; 811 else if (Intf.last() > BI.LastInstr) 812 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 813 else if (Intf.last() > BI.FirstInstr) 814 ++Ins; 815 } 816 817 // Accumulate the total frequency of inserted spill code. 818 while (Ins--) 819 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 820 } 821 Cost = StaticCost; 822 823 // Add constraints for use-blocks. Note that these are the only constraints 824 // that may add a positive bias, it is downhill from here. 825 SpillPlacer->addConstraints(SplitConstraints); 826 return SpillPlacer->scanActiveBundles(); 827 } 828 829 830 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 831 /// live-through blocks in Blocks. 832 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 833 ArrayRef<unsigned> Blocks) { 834 const unsigned GroupSize = 8; 835 SpillPlacement::BlockConstraint BCS[GroupSize]; 836 unsigned TBS[GroupSize]; 837 unsigned B = 0, T = 0; 838 839 for (unsigned i = 0; i != Blocks.size(); ++i) { 840 unsigned Number = Blocks[i]; 841 Intf.moveToBlock(Number); 842 843 if (!Intf.hasInterference()) { 844 assert(T < GroupSize && "Array overflow"); 845 TBS[T] = Number; 846 if (++T == GroupSize) { 847 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 848 T = 0; 849 } 850 continue; 851 } 852 853 assert(B < GroupSize && "Array overflow"); 854 BCS[B].Number = Number; 855 856 // Interference for the live-in value. 857 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 858 BCS[B].Entry = SpillPlacement::MustSpill; 859 else 860 BCS[B].Entry = SpillPlacement::PrefSpill; 861 862 // Interference for the live-out value. 863 if (Intf.last() >= SA->getLastSplitPoint(Number)) 864 BCS[B].Exit = SpillPlacement::MustSpill; 865 else 866 BCS[B].Exit = SpillPlacement::PrefSpill; 867 868 if (++B == GroupSize) { 869 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 870 SpillPlacer->addConstraints(Array); 871 B = 0; 872 } 873 } 874 875 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 876 SpillPlacer->addConstraints(Array); 877 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 878 } 879 880 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 881 // Keep track of through blocks that have not been added to SpillPlacer. 882 BitVector Todo = SA->getThroughBlocks(); 883 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 884 unsigned AddedTo = 0; 885 #ifndef NDEBUG 886 unsigned Visited = 0; 887 #endif 888 889 for (;;) { 890 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 891 // Find new through blocks in the periphery of PrefRegBundles. 892 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 893 unsigned Bundle = NewBundles[i]; 894 // Look at all blocks connected to Bundle in the full graph. 895 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 896 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 897 I != E; ++I) { 898 unsigned Block = *I; 899 if (!Todo.test(Block)) 900 continue; 901 Todo.reset(Block); 902 // This is a new through block. Add it to SpillPlacer later. 903 ActiveBlocks.push_back(Block); 904 #ifndef NDEBUG 905 ++Visited; 906 #endif 907 } 908 } 909 // Any new blocks to add? 910 if (ActiveBlocks.size() == AddedTo) 911 break; 912 913 // Compute through constraints from the interference, or assume that all 914 // through blocks prefer spilling when forming compact regions. 915 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 916 if (Cand.PhysReg) 917 addThroughConstraints(Cand.Intf, NewBlocks); 918 else 919 // Provide a strong negative bias on through blocks to prevent unwanted 920 // liveness on loop backedges. 921 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 922 AddedTo = ActiveBlocks.size(); 923 924 // Perhaps iterating can enable more bundles? 925 SpillPlacer->iterate(); 926 } 927 DEBUG(dbgs() << ", v=" << Visited); 928 } 929 930 /// calcCompactRegion - Compute the set of edge bundles that should be live 931 /// when splitting the current live range into compact regions. Compact 932 /// regions can be computed without looking at interference. They are the 933 /// regions formed by removing all the live-through blocks from the live range. 934 /// 935 /// Returns false if the current live range is already compact, or if the 936 /// compact regions would form single block regions anyway. 937 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 938 // Without any through blocks, the live range is already compact. 939 if (!SA->getNumThroughBlocks()) 940 return false; 941 942 // Compact regions don't correspond to any physreg. 943 Cand.reset(IntfCache, 0); 944 945 DEBUG(dbgs() << "Compact region bundles"); 946 947 // Use the spill placer to determine the live bundles. GrowRegion pretends 948 // that all the through blocks have interference when PhysReg is unset. 949 SpillPlacer->prepare(Cand.LiveBundles); 950 951 // The static split cost will be zero since Cand.Intf reports no interference. 952 BlockFrequency Cost; 953 if (!addSplitConstraints(Cand.Intf, Cost)) { 954 DEBUG(dbgs() << ", none.\n"); 955 return false; 956 } 957 958 growRegion(Cand); 959 SpillPlacer->finish(); 960 961 if (!Cand.LiveBundles.any()) { 962 DEBUG(dbgs() << ", none.\n"); 963 return false; 964 } 965 966 DEBUG({ 967 for (int i = Cand.LiveBundles.find_first(); i>=0; 968 i = Cand.LiveBundles.find_next(i)) 969 dbgs() << " EB#" << i; 970 dbgs() << ".\n"; 971 }); 972 return true; 973 } 974 975 /// calcSpillCost - Compute how expensive it would be to split the live range in 976 /// SA around all use blocks instead of forming bundle regions. 977 BlockFrequency RAGreedy::calcSpillCost() { 978 BlockFrequency Cost = 0; 979 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 980 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 981 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 982 unsigned Number = BI.MBB->getNumber(); 983 // We normally only need one spill instruction - a load or a store. 984 Cost += SpillPlacer->getBlockFrequency(Number); 985 986 // Unless the value is redefined in the block. 987 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 988 Cost += SpillPlacer->getBlockFrequency(Number); 989 } 990 return Cost; 991 } 992 993 /// calcGlobalSplitCost - Return the global split cost of following the split 994 /// pattern in LiveBundles. This cost should be added to the local cost of the 995 /// interference pattern in SplitConstraints. 996 /// 997 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 998 BlockFrequency GlobalCost = 0; 999 const BitVector &LiveBundles = Cand.LiveBundles; 1000 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1001 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1002 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1003 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 1004 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 1005 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 1006 unsigned Ins = 0; 1007 1008 if (BI.LiveIn) 1009 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1010 if (BI.LiveOut) 1011 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1012 while (Ins--) 1013 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1014 } 1015 1016 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1017 unsigned Number = Cand.ActiveBlocks[i]; 1018 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 1019 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 1020 if (!RegIn && !RegOut) 1021 continue; 1022 if (RegIn && RegOut) { 1023 // We need double spill code if this block has interference. 1024 Cand.Intf.moveToBlock(Number); 1025 if (Cand.Intf.hasInterference()) { 1026 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1027 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1028 } 1029 continue; 1030 } 1031 // live-in / stack-out or stack-in live-out. 1032 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1033 } 1034 return GlobalCost; 1035 } 1036 1037 /// splitAroundRegion - Split the current live range around the regions 1038 /// determined by BundleCand and GlobalCand. 1039 /// 1040 /// Before calling this function, GlobalCand and BundleCand must be initialized 1041 /// so each bundle is assigned to a valid candidate, or NoCand for the 1042 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1043 /// objects must be initialized for the current live range, and intervals 1044 /// created for the used candidates. 1045 /// 1046 /// @param LREdit The LiveRangeEdit object handling the current split. 1047 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1048 /// must appear in this list. 1049 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1050 ArrayRef<unsigned> UsedCands) { 1051 // These are the intervals created for new global ranges. We may create more 1052 // intervals for local ranges. 1053 const unsigned NumGlobalIntvs = LREdit.size(); 1054 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 1055 assert(NumGlobalIntvs && "No global intervals configured"); 1056 1057 // Isolate even single instructions when dealing with a proper sub-class. 1058 // That guarantees register class inflation for the stack interval because it 1059 // is all copies. 1060 unsigned Reg = SA->getParent().reg; 1061 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1062 1063 // First handle all the blocks with uses. 1064 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1065 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1066 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1067 unsigned Number = BI.MBB->getNumber(); 1068 unsigned IntvIn = 0, IntvOut = 0; 1069 SlotIndex IntfIn, IntfOut; 1070 if (BI.LiveIn) { 1071 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1072 if (CandIn != NoCand) { 1073 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1074 IntvIn = Cand.IntvIdx; 1075 Cand.Intf.moveToBlock(Number); 1076 IntfIn = Cand.Intf.first(); 1077 } 1078 } 1079 if (BI.LiveOut) { 1080 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1081 if (CandOut != NoCand) { 1082 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1083 IntvOut = Cand.IntvIdx; 1084 Cand.Intf.moveToBlock(Number); 1085 IntfOut = Cand.Intf.last(); 1086 } 1087 } 1088 1089 // Create separate intervals for isolated blocks with multiple uses. 1090 if (!IntvIn && !IntvOut) { 1091 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1092 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1093 SE->splitSingleBlock(BI); 1094 continue; 1095 } 1096 1097 if (IntvIn && IntvOut) 1098 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1099 else if (IntvIn) 1100 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1101 else 1102 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1103 } 1104 1105 // Handle live-through blocks. The relevant live-through blocks are stored in 1106 // the ActiveBlocks list with each candidate. We need to filter out 1107 // duplicates. 1108 BitVector Todo = SA->getThroughBlocks(); 1109 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1110 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1111 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1112 unsigned Number = Blocks[i]; 1113 if (!Todo.test(Number)) 1114 continue; 1115 Todo.reset(Number); 1116 1117 unsigned IntvIn = 0, IntvOut = 0; 1118 SlotIndex IntfIn, IntfOut; 1119 1120 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1121 if (CandIn != NoCand) { 1122 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1123 IntvIn = Cand.IntvIdx; 1124 Cand.Intf.moveToBlock(Number); 1125 IntfIn = Cand.Intf.first(); 1126 } 1127 1128 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1129 if (CandOut != NoCand) { 1130 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1131 IntvOut = Cand.IntvIdx; 1132 Cand.Intf.moveToBlock(Number); 1133 IntfOut = Cand.Intf.last(); 1134 } 1135 if (!IntvIn && !IntvOut) 1136 continue; 1137 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1138 } 1139 } 1140 1141 ++NumGlobalSplits; 1142 1143 SmallVector<unsigned, 8> IntvMap; 1144 SE->finish(&IntvMap); 1145 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1146 1147 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1148 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1149 1150 // Sort out the new intervals created by splitting. We get four kinds: 1151 // - Remainder intervals should not be split again. 1152 // - Candidate intervals can be assigned to Cand.PhysReg. 1153 // - Block-local splits are candidates for local splitting. 1154 // - DCE leftovers should go back on the queue. 1155 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1156 LiveInterval &Reg = LIS->getInterval(LREdit.get(i)); 1157 1158 // Ignore old intervals from DCE. 1159 if (getStage(Reg) != RS_New) 1160 continue; 1161 1162 // Remainder interval. Don't try splitting again, spill if it doesn't 1163 // allocate. 1164 if (IntvMap[i] == 0) { 1165 setStage(Reg, RS_Spill); 1166 continue; 1167 } 1168 1169 // Global intervals. Allow repeated splitting as long as the number of live 1170 // blocks is strictly decreasing. 1171 if (IntvMap[i] < NumGlobalIntvs) { 1172 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1173 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1174 << " blocks as original.\n"); 1175 // Don't allow repeated splitting as a safe guard against looping. 1176 setStage(Reg, RS_Split2); 1177 } 1178 continue; 1179 } 1180 1181 // Other intervals are treated as new. This includes local intervals created 1182 // for blocks with multiple uses, and anything created by DCE. 1183 } 1184 1185 if (VerifyEnabled) 1186 MF->verify(this, "After splitting live range around region"); 1187 } 1188 1189 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1190 SmallVectorImpl<unsigned> &NewVRegs) { 1191 unsigned NumCands = 0; 1192 unsigned BestCand = NoCand; 1193 BlockFrequency BestCost; 1194 SmallVector<unsigned, 8> UsedCands; 1195 1196 // Check if we can split this live range around a compact region. 1197 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1198 if (HasCompact) { 1199 // Yes, keep GlobalCand[0] as the compact region candidate. 1200 NumCands = 1; 1201 BestCost = BlockFrequency::getMaxFrequency(); 1202 } else { 1203 // No benefit from the compact region, our fallback will be per-block 1204 // splitting. Make sure we find a solution that is cheaper than spilling. 1205 BestCost = calcSpillCost(); 1206 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1207 } 1208 1209 Order.rewind(); 1210 while (unsigned PhysReg = Order.next()) { 1211 // Discard bad candidates before we run out of interference cache cursors. 1212 // This will only affect register classes with a lot of registers (>32). 1213 if (NumCands == IntfCache.getMaxCursors()) { 1214 unsigned WorstCount = ~0u; 1215 unsigned Worst = 0; 1216 for (unsigned i = 0; i != NumCands; ++i) { 1217 if (i == BestCand || !GlobalCand[i].PhysReg) 1218 continue; 1219 unsigned Count = GlobalCand[i].LiveBundles.count(); 1220 if (Count < WorstCount) 1221 Worst = i, WorstCount = Count; 1222 } 1223 --NumCands; 1224 GlobalCand[Worst] = GlobalCand[NumCands]; 1225 if (BestCand == NumCands) 1226 BestCand = Worst; 1227 } 1228 1229 if (GlobalCand.size() <= NumCands) 1230 GlobalCand.resize(NumCands+1); 1231 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1232 Cand.reset(IntfCache, PhysReg); 1233 1234 SpillPlacer->prepare(Cand.LiveBundles); 1235 BlockFrequency Cost; 1236 if (!addSplitConstraints(Cand.Intf, Cost)) { 1237 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1238 continue; 1239 } 1240 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1241 if (Cost >= BestCost) { 1242 DEBUG({ 1243 if (BestCand == NoCand) 1244 dbgs() << " worse than no bundles\n"; 1245 else 1246 dbgs() << " worse than " 1247 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1248 }); 1249 continue; 1250 } 1251 growRegion(Cand); 1252 1253 SpillPlacer->finish(); 1254 1255 // No live bundles, defer to splitSingleBlocks(). 1256 if (!Cand.LiveBundles.any()) { 1257 DEBUG(dbgs() << " no bundles.\n"); 1258 continue; 1259 } 1260 1261 Cost += calcGlobalSplitCost(Cand); 1262 DEBUG({ 1263 dbgs() << ", total = " << Cost << " with bundles"; 1264 for (int i = Cand.LiveBundles.find_first(); i>=0; 1265 i = Cand.LiveBundles.find_next(i)) 1266 dbgs() << " EB#" << i; 1267 dbgs() << ".\n"; 1268 }); 1269 if (Cost < BestCost) { 1270 BestCand = NumCands; 1271 BestCost = Cost; 1272 } 1273 ++NumCands; 1274 } 1275 1276 // No solutions found, fall back to single block splitting. 1277 if (!HasCompact && BestCand == NoCand) 1278 return 0; 1279 1280 // Prepare split editor. 1281 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1282 SE->reset(LREdit, SplitSpillMode); 1283 1284 // Assign all edge bundles to the preferred candidate, or NoCand. 1285 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1286 1287 // Assign bundles for the best candidate region. 1288 if (BestCand != NoCand) { 1289 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1290 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1291 UsedCands.push_back(BestCand); 1292 Cand.IntvIdx = SE->openIntv(); 1293 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1294 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1295 (void)B; 1296 } 1297 } 1298 1299 // Assign bundles for the compact region. 1300 if (HasCompact) { 1301 GlobalSplitCandidate &Cand = GlobalCand.front(); 1302 assert(!Cand.PhysReg && "Compact region has no physreg"); 1303 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1304 UsedCands.push_back(0); 1305 Cand.IntvIdx = SE->openIntv(); 1306 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1307 << Cand.IntvIdx << ".\n"); 1308 (void)B; 1309 } 1310 } 1311 1312 splitAroundRegion(LREdit, UsedCands); 1313 return 0; 1314 } 1315 1316 1317 //===----------------------------------------------------------------------===// 1318 // Per-Block Splitting 1319 //===----------------------------------------------------------------------===// 1320 1321 /// tryBlockSplit - Split a global live range around every block with uses. This 1322 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1323 /// they don't allocate. 1324 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1325 SmallVectorImpl<unsigned> &NewVRegs) { 1326 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1327 unsigned Reg = VirtReg.reg; 1328 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1329 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1330 SE->reset(LREdit, SplitSpillMode); 1331 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1332 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1333 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1334 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1335 SE->splitSingleBlock(BI); 1336 } 1337 // No blocks were split. 1338 if (LREdit.empty()) 1339 return 0; 1340 1341 // We did split for some blocks. 1342 SmallVector<unsigned, 8> IntvMap; 1343 SE->finish(&IntvMap); 1344 1345 // Tell LiveDebugVariables about the new ranges. 1346 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1347 1348 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1349 1350 // Sort out the new intervals created by splitting. The remainder interval 1351 // goes straight to spilling, the new local ranges get to stay RS_New. 1352 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1353 LiveInterval &LI = LIS->getInterval(LREdit.get(i)); 1354 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1355 setStage(LI, RS_Spill); 1356 } 1357 1358 if (VerifyEnabled) 1359 MF->verify(this, "After splitting live range around basic blocks"); 1360 return 0; 1361 } 1362 1363 1364 //===----------------------------------------------------------------------===// 1365 // Per-Instruction Splitting 1366 //===----------------------------------------------------------------------===// 1367 1368 /// tryInstructionSplit - Split a live range around individual instructions. 1369 /// This is normally not worthwhile since the spiller is doing essentially the 1370 /// same thing. However, when the live range is in a constrained register 1371 /// class, it may help to insert copies such that parts of the live range can 1372 /// be moved to a larger register class. 1373 /// 1374 /// This is similar to spilling to a larger register class. 1375 unsigned 1376 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1377 SmallVectorImpl<unsigned> &NewVRegs) { 1378 // There is no point to this if there are no larger sub-classes. 1379 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1380 return 0; 1381 1382 // Always enable split spill mode, since we're effectively spilling to a 1383 // register. 1384 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1385 SE->reset(LREdit, SplitEditor::SM_Size); 1386 1387 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1388 if (Uses.size() <= 1) 1389 return 0; 1390 1391 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1392 1393 // Split around every non-copy instruction. 1394 for (unsigned i = 0; i != Uses.size(); ++i) { 1395 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1396 if (MI->isFullCopy()) { 1397 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1398 continue; 1399 } 1400 SE->openIntv(); 1401 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1402 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1403 SE->useIntv(SegStart, SegStop); 1404 } 1405 1406 if (LREdit.empty()) { 1407 DEBUG(dbgs() << "All uses were copies.\n"); 1408 return 0; 1409 } 1410 1411 SmallVector<unsigned, 8> IntvMap; 1412 SE->finish(&IntvMap); 1413 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1414 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1415 1416 // Assign all new registers to RS_Spill. This was the last chance. 1417 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1418 return 0; 1419 } 1420 1421 1422 //===----------------------------------------------------------------------===// 1423 // Local Splitting 1424 //===----------------------------------------------------------------------===// 1425 1426 1427 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1428 /// in order to use PhysReg between two entries in SA->UseSlots. 1429 /// 1430 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1431 /// 1432 void RAGreedy::calcGapWeights(unsigned PhysReg, 1433 SmallVectorImpl<float> &GapWeight) { 1434 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1435 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1436 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1437 const unsigned NumGaps = Uses.size()-1; 1438 1439 // Start and end points for the interference check. 1440 SlotIndex StartIdx = 1441 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1442 SlotIndex StopIdx = 1443 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1444 1445 GapWeight.assign(NumGaps, 0.0f); 1446 1447 // Add interference from each overlapping register. 1448 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1449 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1450 .checkInterference()) 1451 continue; 1452 1453 // We know that VirtReg is a continuous interval from FirstInstr to 1454 // LastInstr, so we don't need InterferenceQuery. 1455 // 1456 // Interference that overlaps an instruction is counted in both gaps 1457 // surrounding the instruction. The exception is interference before 1458 // StartIdx and after StopIdx. 1459 // 1460 LiveIntervalUnion::SegmentIter IntI = 1461 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1462 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1463 // Skip the gaps before IntI. 1464 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1465 if (++Gap == NumGaps) 1466 break; 1467 if (Gap == NumGaps) 1468 break; 1469 1470 // Update the gaps covered by IntI. 1471 const float weight = IntI.value()->weight; 1472 for (; Gap != NumGaps; ++Gap) { 1473 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1474 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1475 break; 1476 } 1477 if (Gap == NumGaps) 1478 break; 1479 } 1480 } 1481 1482 // Add fixed interference. 1483 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1484 const LiveRange &LR = LIS->getRegUnit(*Units); 1485 LiveRange::const_iterator I = LR.find(StartIdx); 1486 LiveRange::const_iterator E = LR.end(); 1487 1488 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1489 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1490 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1491 if (++Gap == NumGaps) 1492 break; 1493 if (Gap == NumGaps) 1494 break; 1495 1496 for (; Gap != NumGaps; ++Gap) { 1497 GapWeight[Gap] = llvm::huge_valf; 1498 if (Uses[Gap+1].getBaseIndex() >= I->end) 1499 break; 1500 } 1501 if (Gap == NumGaps) 1502 break; 1503 } 1504 } 1505 } 1506 1507 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1508 /// basic block. 1509 /// 1510 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1511 SmallVectorImpl<unsigned> &NewVRegs) { 1512 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1513 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1514 1515 // Note that it is possible to have an interval that is live-in or live-out 1516 // while only covering a single block - A phi-def can use undef values from 1517 // predecessors, and the block could be a single-block loop. 1518 // We don't bother doing anything clever about such a case, we simply assume 1519 // that the interval is continuous from FirstInstr to LastInstr. We should 1520 // make sure that we don't do anything illegal to such an interval, though. 1521 1522 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1523 if (Uses.size() <= 2) 1524 return 0; 1525 const unsigned NumGaps = Uses.size()-1; 1526 1527 DEBUG({ 1528 dbgs() << "tryLocalSplit: "; 1529 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1530 dbgs() << ' ' << Uses[i]; 1531 dbgs() << '\n'; 1532 }); 1533 1534 // If VirtReg is live across any register mask operands, compute a list of 1535 // gaps with register masks. 1536 SmallVector<unsigned, 8> RegMaskGaps; 1537 if (Matrix->checkRegMaskInterference(VirtReg)) { 1538 // Get regmask slots for the whole block. 1539 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1540 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1541 // Constrain to VirtReg's live range. 1542 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1543 Uses.front().getRegSlot()) - RMS.begin(); 1544 unsigned re = RMS.size(); 1545 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1546 // Look for Uses[i] <= RMS <= Uses[i+1]. 1547 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1548 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1549 continue; 1550 // Skip a regmask on the same instruction as the last use. It doesn't 1551 // overlap the live range. 1552 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1553 break; 1554 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1555 RegMaskGaps.push_back(i); 1556 // Advance ri to the next gap. A regmask on one of the uses counts in 1557 // both gaps. 1558 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1559 ++ri; 1560 } 1561 DEBUG(dbgs() << '\n'); 1562 } 1563 1564 // Since we allow local split results to be split again, there is a risk of 1565 // creating infinite loops. It is tempting to require that the new live 1566 // ranges have less instructions than the original. That would guarantee 1567 // convergence, but it is too strict. A live range with 3 instructions can be 1568 // split 2+3 (including the COPY), and we want to allow that. 1569 // 1570 // Instead we use these rules: 1571 // 1572 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1573 // noop split, of course). 1574 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1575 // the new ranges must have fewer instructions than before the split. 1576 // 3. New ranges with the same number of instructions are marked RS_Split2, 1577 // smaller ranges are marked RS_New. 1578 // 1579 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1580 // excessive splitting and infinite loops. 1581 // 1582 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1583 1584 // Best split candidate. 1585 unsigned BestBefore = NumGaps; 1586 unsigned BestAfter = 0; 1587 float BestDiff = 0; 1588 1589 const float blockFreq = 1590 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 1591 (1.0f / BlockFrequency::getEntryFrequency()); 1592 SmallVector<float, 8> GapWeight; 1593 1594 Order.rewind(); 1595 while (unsigned PhysReg = Order.next()) { 1596 // Keep track of the largest spill weight that would need to be evicted in 1597 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1598 calcGapWeights(PhysReg, GapWeight); 1599 1600 // Remove any gaps with regmask clobbers. 1601 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1602 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1603 GapWeight[RegMaskGaps[i]] = llvm::huge_valf; 1604 1605 // Try to find the best sequence of gaps to close. 1606 // The new spill weight must be larger than any gap interference. 1607 1608 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1609 unsigned SplitBefore = 0, SplitAfter = 1; 1610 1611 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1612 // It is the spill weight that needs to be evicted. 1613 float MaxGap = GapWeight[0]; 1614 1615 for (;;) { 1616 // Live before/after split? 1617 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1618 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1619 1620 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1621 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1622 << " i=" << MaxGap); 1623 1624 // Stop before the interval gets so big we wouldn't be making progress. 1625 if (!LiveBefore && !LiveAfter) { 1626 DEBUG(dbgs() << " all\n"); 1627 break; 1628 } 1629 // Should the interval be extended or shrunk? 1630 bool Shrink = true; 1631 1632 // How many gaps would the new range have? 1633 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1634 1635 // Legally, without causing looping? 1636 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1637 1638 if (Legal && MaxGap < llvm::huge_valf) { 1639 // Estimate the new spill weight. Each instruction reads or writes the 1640 // register. Conservatively assume there are no read-modify-write 1641 // instructions. 1642 // 1643 // Try to guess the size of the new interval. 1644 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1645 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1646 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1647 // Would this split be possible to allocate? 1648 // Never allocate all gaps, we wouldn't be making progress. 1649 DEBUG(dbgs() << " w=" << EstWeight); 1650 if (EstWeight * Hysteresis >= MaxGap) { 1651 Shrink = false; 1652 float Diff = EstWeight - MaxGap; 1653 if (Diff > BestDiff) { 1654 DEBUG(dbgs() << " (best)"); 1655 BestDiff = Hysteresis * Diff; 1656 BestBefore = SplitBefore; 1657 BestAfter = SplitAfter; 1658 } 1659 } 1660 } 1661 1662 // Try to shrink. 1663 if (Shrink) { 1664 if (++SplitBefore < SplitAfter) { 1665 DEBUG(dbgs() << " shrink\n"); 1666 // Recompute the max when necessary. 1667 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1668 MaxGap = GapWeight[SplitBefore]; 1669 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1670 MaxGap = std::max(MaxGap, GapWeight[i]); 1671 } 1672 continue; 1673 } 1674 MaxGap = 0; 1675 } 1676 1677 // Try to extend the interval. 1678 if (SplitAfter >= NumGaps) { 1679 DEBUG(dbgs() << " end\n"); 1680 break; 1681 } 1682 1683 DEBUG(dbgs() << " extend\n"); 1684 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1685 } 1686 } 1687 1688 // Didn't find any candidates? 1689 if (BestBefore == NumGaps) 1690 return 0; 1691 1692 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1693 << '-' << Uses[BestAfter] << ", " << BestDiff 1694 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1695 1696 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1697 SE->reset(LREdit); 1698 1699 SE->openIntv(); 1700 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1701 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1702 SE->useIntv(SegStart, SegStop); 1703 SmallVector<unsigned, 8> IntvMap; 1704 SE->finish(&IntvMap); 1705 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1706 1707 // If the new range has the same number of instructions as before, mark it as 1708 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1709 // leave the new intervals as RS_New so they can compete. 1710 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1711 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1712 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1713 if (NewGaps >= NumGaps) { 1714 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1715 assert(!ProgressRequired && "Didn't make progress when it was required."); 1716 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1717 if (IntvMap[i] == 1) { 1718 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2); 1719 DEBUG(dbgs() << PrintReg(LREdit.get(i))); 1720 } 1721 DEBUG(dbgs() << '\n'); 1722 } 1723 ++NumLocalSplits; 1724 1725 return 0; 1726 } 1727 1728 //===----------------------------------------------------------------------===// 1729 // Live Range Splitting 1730 //===----------------------------------------------------------------------===// 1731 1732 /// trySplit - Try to split VirtReg or one of its interferences, making it 1733 /// assignable. 1734 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1735 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1736 SmallVectorImpl<unsigned>&NewVRegs) { 1737 // Ranges must be Split2 or less. 1738 if (getStage(VirtReg) >= RS_Spill) 1739 return 0; 1740 1741 // Local intervals are handled separately. 1742 if (LIS->intervalIsInOneMBB(VirtReg)) { 1743 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1744 SA->analyze(&VirtReg); 1745 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1746 if (PhysReg || !NewVRegs.empty()) 1747 return PhysReg; 1748 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1749 } 1750 1751 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1752 1753 SA->analyze(&VirtReg); 1754 1755 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1756 // coalescer. That may cause the range to become allocatable which means that 1757 // tryRegionSplit won't be making progress. This check should be replaced with 1758 // an assertion when the coalescer is fixed. 1759 if (SA->didRepairRange()) { 1760 // VirtReg has changed, so all cached queries are invalid. 1761 Matrix->invalidateVirtRegs(); 1762 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1763 return PhysReg; 1764 } 1765 1766 // First try to split around a region spanning multiple blocks. RS_Split2 1767 // ranges already made dubious progress with region splitting, so they go 1768 // straight to single block splitting. 1769 if (getStage(VirtReg) < RS_Split2) { 1770 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1771 if (PhysReg || !NewVRegs.empty()) 1772 return PhysReg; 1773 } 1774 1775 // Then isolate blocks. 1776 return tryBlockSplit(VirtReg, Order, NewVRegs); 1777 } 1778 1779 1780 //===----------------------------------------------------------------------===// 1781 // Main Entry Point 1782 //===----------------------------------------------------------------------===// 1783 1784 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1785 SmallVectorImpl<unsigned> &NewVRegs) { 1786 // First try assigning a free register. 1787 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1788 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1789 return PhysReg; 1790 1791 LiveRangeStage Stage = getStage(VirtReg); 1792 DEBUG(dbgs() << StageName[Stage] 1793 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1794 1795 // Try to evict a less worthy live range, but only for ranges from the primary 1796 // queue. The RS_Split ranges already failed to do this, and they should not 1797 // get a second chance until they have been split. 1798 if (Stage != RS_Split) 1799 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1800 return PhysReg; 1801 1802 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1803 1804 // The first time we see a live range, don't try to split or spill. 1805 // Wait until the second time, when all smaller ranges have been allocated. 1806 // This gives a better picture of the interference to split around. 1807 if (Stage < RS_Split) { 1808 setStage(VirtReg, RS_Split); 1809 DEBUG(dbgs() << "wait for second round\n"); 1810 NewVRegs.push_back(VirtReg.reg); 1811 return 0; 1812 } 1813 1814 // If we couldn't allocate a register from spilling, there is probably some 1815 // invalid inline assembly. The base class wil report it. 1816 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1817 return ~0u; 1818 1819 // Try splitting VirtReg or interferences. 1820 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1821 if (PhysReg || !NewVRegs.empty()) 1822 return PhysReg; 1823 1824 // Finally spill VirtReg itself. 1825 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1826 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1827 spiller().spill(LRE); 1828 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1829 1830 if (VerifyEnabled) 1831 MF->verify(this, "After spilling"); 1832 1833 // The live virtual register requesting allocation was spilled, so tell 1834 // the caller not to allocate anything during this round. 1835 return 0; 1836 } 1837 1838 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1839 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1840 << "********** Function: " << mf.getName() << '\n'); 1841 1842 MF = &mf; 1843 if (VerifyEnabled) 1844 MF->verify(this, "Before greedy register allocator"); 1845 1846 RegAllocBase::init(getAnalysis<VirtRegMap>(), 1847 getAnalysis<LiveIntervals>(), 1848 getAnalysis<LiveRegMatrix>()); 1849 Indexes = &getAnalysis<SlotIndexes>(); 1850 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 1851 DomTree = &getAnalysis<MachineDominatorTree>(); 1852 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1853 Loops = &getAnalysis<MachineLoopInfo>(); 1854 Bundles = &getAnalysis<EdgeBundles>(); 1855 SpillPlacer = &getAnalysis<SpillPlacement>(); 1856 DebugVars = &getAnalysis<LiveDebugVariables>(); 1857 1858 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI); 1859 1860 DEBUG(LIS->dump()); 1861 1862 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1863 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI)); 1864 ExtraRegInfo.clear(); 1865 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1866 NextCascade = 1; 1867 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 1868 GlobalCand.resize(32); // This will grow as needed. 1869 1870 allocatePhysRegs(); 1871 releaseMemory(); 1872 return true; 1873 } 1874