1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "llvm/CodeGen/Passes.h" 17 #include "AllocationOrder.h" 18 #include "InterferenceCache.h" 19 #include "LiveDebugVariables.h" 20 #include "RegAllocBase.h" 21 #include "SpillPlacement.h" 22 #include "Spiller.h" 23 #include "SplitKit.h" 24 #include "llvm/ADT/Statistic.h" 25 #include "llvm/Analysis/AliasAnalysis.h" 26 #include "llvm/CodeGen/CalcSpillWeights.h" 27 #include "llvm/CodeGen/EdgeBundles.h" 28 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 29 #include "llvm/CodeGen/LiveRangeEdit.h" 30 #include "llvm/CodeGen/LiveRegMatrix.h" 31 #include "llvm/CodeGen/LiveStackAnalysis.h" 32 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFunctionPass.h" 35 #include "llvm/CodeGen/MachineLoopInfo.h" 36 #include "llvm/CodeGen/MachineRegisterInfo.h" 37 #include "llvm/CodeGen/RegAllocRegistry.h" 38 #include "llvm/CodeGen/VirtRegMap.h" 39 #include "llvm/PassAnalysisSupport.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/ErrorHandling.h" 43 #include "llvm/Support/Timer.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include <queue> 46 47 using namespace llvm; 48 49 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 50 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 51 STATISTIC(NumEvicted, "Number of interferences evicted"); 52 53 static cl::opt<SplitEditor::ComplementSpillMode> 54 SplitSpillMode("split-spill-mode", cl::Hidden, 55 cl::desc("Spill mode for splitting live ranges"), 56 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 57 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 58 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 59 clEnumValEnd), 60 cl::init(SplitEditor::SM_Partition)); 61 62 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 63 createGreedyRegisterAllocator); 64 65 namespace { 66 class RAGreedy : public MachineFunctionPass, 67 public RegAllocBase, 68 private LiveRangeEdit::Delegate { 69 70 // context 71 MachineFunction *MF; 72 73 // analyses 74 SlotIndexes *Indexes; 75 MachineBlockFrequencyInfo *MBFI; 76 MachineDominatorTree *DomTree; 77 MachineLoopInfo *Loops; 78 EdgeBundles *Bundles; 79 SpillPlacement *SpillPlacer; 80 LiveDebugVariables *DebugVars; 81 82 // state 83 OwningPtr<Spiller> SpillerInstance; 84 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 85 unsigned NextCascade; 86 87 // Live ranges pass through a number of stages as we try to allocate them. 88 // Some of the stages may also create new live ranges: 89 // 90 // - Region splitting. 91 // - Per-block splitting. 92 // - Local splitting. 93 // - Spilling. 94 // 95 // Ranges produced by one of the stages skip the previous stages when they are 96 // dequeued. This improves performance because we can skip interference checks 97 // that are unlikely to give any results. It also guarantees that the live 98 // range splitting algorithm terminates, something that is otherwise hard to 99 // ensure. 100 enum LiveRangeStage { 101 /// Newly created live range that has never been queued. 102 RS_New, 103 104 /// Only attempt assignment and eviction. Then requeue as RS_Split. 105 RS_Assign, 106 107 /// Attempt live range splitting if assignment is impossible. 108 RS_Split, 109 110 /// Attempt more aggressive live range splitting that is guaranteed to make 111 /// progress. This is used for split products that may not be making 112 /// progress. 113 RS_Split2, 114 115 /// Live range will be spilled. No more splitting will be attempted. 116 RS_Spill, 117 118 /// There is nothing more we can do to this live range. Abort compilation 119 /// if it can't be assigned. 120 RS_Done 121 }; 122 123 #ifndef NDEBUG 124 static const char *const StageName[]; 125 #endif 126 127 // RegInfo - Keep additional information about each live range. 128 struct RegInfo { 129 LiveRangeStage Stage; 130 131 // Cascade - Eviction loop prevention. See canEvictInterference(). 132 unsigned Cascade; 133 134 RegInfo() : Stage(RS_New), Cascade(0) {} 135 }; 136 137 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 138 139 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 140 return ExtraRegInfo[VirtReg.reg].Stage; 141 } 142 143 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 144 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 145 ExtraRegInfo[VirtReg.reg].Stage = Stage; 146 } 147 148 template<typename Iterator> 149 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 150 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 151 for (;Begin != End; ++Begin) { 152 unsigned Reg = *Begin; 153 if (ExtraRegInfo[Reg].Stage == RS_New) 154 ExtraRegInfo[Reg].Stage = NewStage; 155 } 156 } 157 158 /// Cost of evicting interference. 159 struct EvictionCost { 160 unsigned BrokenHints; ///< Total number of broken hints. 161 float MaxWeight; ///< Maximum spill weight evicted. 162 163 EvictionCost(): BrokenHints(0), MaxWeight(0) {} 164 165 bool isMax() const { return BrokenHints == ~0u; } 166 167 void setMax() { BrokenHints = ~0u; } 168 169 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; } 170 171 bool operator<(const EvictionCost &O) const { 172 if (BrokenHints != O.BrokenHints) 173 return BrokenHints < O.BrokenHints; 174 return MaxWeight < O.MaxWeight; 175 } 176 }; 177 178 // splitting state. 179 OwningPtr<SplitAnalysis> SA; 180 OwningPtr<SplitEditor> SE; 181 182 /// Cached per-block interference maps 183 InterferenceCache IntfCache; 184 185 /// All basic blocks where the current register has uses. 186 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 187 188 /// Global live range splitting candidate info. 189 struct GlobalSplitCandidate { 190 // Register intended for assignment, or 0. 191 unsigned PhysReg; 192 193 // SplitKit interval index for this candidate. 194 unsigned IntvIdx; 195 196 // Interference for PhysReg. 197 InterferenceCache::Cursor Intf; 198 199 // Bundles where this candidate should be live. 200 BitVector LiveBundles; 201 SmallVector<unsigned, 8> ActiveBlocks; 202 203 void reset(InterferenceCache &Cache, unsigned Reg) { 204 PhysReg = Reg; 205 IntvIdx = 0; 206 Intf.setPhysReg(Cache, Reg); 207 LiveBundles.clear(); 208 ActiveBlocks.clear(); 209 } 210 211 // Set B[i] = C for every live bundle where B[i] was NoCand. 212 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 213 unsigned Count = 0; 214 for (int i = LiveBundles.find_first(); i >= 0; 215 i = LiveBundles.find_next(i)) 216 if (B[i] == NoCand) { 217 B[i] = C; 218 Count++; 219 } 220 return Count; 221 } 222 }; 223 224 /// Candidate info for each PhysReg in AllocationOrder. 225 /// This vector never shrinks, but grows to the size of the largest register 226 /// class. 227 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 228 229 enum LLVM_ENUM_INT_TYPE(unsigned) { NoCand = ~0u }; 230 231 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 232 /// NoCand which indicates the stack interval. 233 SmallVector<unsigned, 32> BundleCand; 234 235 public: 236 RAGreedy(); 237 238 /// Return the pass name. 239 virtual const char* getPassName() const { 240 return "Greedy Register Allocator"; 241 } 242 243 /// RAGreedy analysis usage. 244 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 245 virtual void releaseMemory(); 246 virtual Spiller &spiller() { return *SpillerInstance; } 247 virtual void enqueue(LiveInterval *LI); 248 virtual LiveInterval *dequeue(); 249 virtual unsigned selectOrSplit(LiveInterval&, 250 SmallVectorImpl<unsigned>&); 251 252 /// Perform register allocation. 253 virtual bool runOnMachineFunction(MachineFunction &mf); 254 255 static char ID; 256 257 private: 258 bool LRE_CanEraseVirtReg(unsigned); 259 void LRE_WillShrinkVirtReg(unsigned); 260 void LRE_DidCloneVirtReg(unsigned, unsigned); 261 262 BlockFrequency calcSpillCost(); 263 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&); 264 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 265 void growRegion(GlobalSplitCandidate &Cand); 266 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&); 267 bool calcCompactRegion(GlobalSplitCandidate&); 268 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 269 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 270 unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg); 271 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 272 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 273 void evictInterference(LiveInterval&, unsigned, 274 SmallVectorImpl<unsigned>&); 275 276 unsigned tryAssign(LiveInterval&, AllocationOrder&, 277 SmallVectorImpl<unsigned>&); 278 unsigned tryEvict(LiveInterval&, AllocationOrder&, 279 SmallVectorImpl<unsigned>&, unsigned = ~0u); 280 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 281 SmallVectorImpl<unsigned>&); 282 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 283 SmallVectorImpl<unsigned>&); 284 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 285 SmallVectorImpl<unsigned>&); 286 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 287 SmallVectorImpl<unsigned>&); 288 unsigned trySplit(LiveInterval&, AllocationOrder&, 289 SmallVectorImpl<unsigned>&); 290 }; 291 } // end anonymous namespace 292 293 char RAGreedy::ID = 0; 294 295 #ifndef NDEBUG 296 const char *const RAGreedy::StageName[] = { 297 "RS_New", 298 "RS_Assign", 299 "RS_Split", 300 "RS_Split2", 301 "RS_Spill", 302 "RS_Done" 303 }; 304 #endif 305 306 // Hysteresis to use when comparing floats. 307 // This helps stabilize decisions based on float comparisons. 308 const float Hysteresis = 0.98f; 309 310 311 FunctionPass* llvm::createGreedyRegisterAllocator() { 312 return new RAGreedy(); 313 } 314 315 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 316 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 317 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 318 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 319 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 320 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 321 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 322 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 323 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 324 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 325 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 326 initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry()); 327 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 328 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 329 } 330 331 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 332 AU.setPreservesCFG(); 333 AU.addRequired<MachineBlockFrequencyInfo>(); 334 AU.addPreserved<MachineBlockFrequencyInfo>(); 335 AU.addRequired<AliasAnalysis>(); 336 AU.addPreserved<AliasAnalysis>(); 337 AU.addRequired<LiveIntervals>(); 338 AU.addPreserved<LiveIntervals>(); 339 AU.addRequired<SlotIndexes>(); 340 AU.addPreserved<SlotIndexes>(); 341 AU.addRequired<LiveDebugVariables>(); 342 AU.addPreserved<LiveDebugVariables>(); 343 AU.addRequired<LiveStacks>(); 344 AU.addPreserved<LiveStacks>(); 345 AU.addRequired<MachineDominatorTree>(); 346 AU.addPreserved<MachineDominatorTree>(); 347 AU.addRequired<MachineLoopInfo>(); 348 AU.addPreserved<MachineLoopInfo>(); 349 AU.addRequired<VirtRegMap>(); 350 AU.addPreserved<VirtRegMap>(); 351 AU.addRequired<LiveRegMatrix>(); 352 AU.addPreserved<LiveRegMatrix>(); 353 AU.addRequired<EdgeBundles>(); 354 AU.addRequired<SpillPlacement>(); 355 MachineFunctionPass::getAnalysisUsage(AU); 356 } 357 358 359 //===----------------------------------------------------------------------===// 360 // LiveRangeEdit delegate methods 361 //===----------------------------------------------------------------------===// 362 363 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 364 if (VRM->hasPhys(VirtReg)) { 365 Matrix->unassign(LIS->getInterval(VirtReg)); 366 return true; 367 } 368 // Unassigned virtreg is probably in the priority queue. 369 // RegAllocBase will erase it after dequeueing. 370 return false; 371 } 372 373 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 374 if (!VRM->hasPhys(VirtReg)) 375 return; 376 377 // Register is assigned, put it back on the queue for reassignment. 378 LiveInterval &LI = LIS->getInterval(VirtReg); 379 Matrix->unassign(LI); 380 enqueue(&LI); 381 } 382 383 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 384 // Cloning a register we haven't even heard about yet? Just ignore it. 385 if (!ExtraRegInfo.inBounds(Old)) 386 return; 387 388 // LRE may clone a virtual register because dead code elimination causes it to 389 // be split into connected components. The new components are much smaller 390 // than the original, so they should get a new chance at being assigned. 391 // same stage as the parent. 392 ExtraRegInfo[Old].Stage = RS_Assign; 393 ExtraRegInfo.grow(New); 394 ExtraRegInfo[New] = ExtraRegInfo[Old]; 395 } 396 397 void RAGreedy::releaseMemory() { 398 SpillerInstance.reset(0); 399 ExtraRegInfo.clear(); 400 GlobalCand.clear(); 401 } 402 403 void RAGreedy::enqueue(LiveInterval *LI) { 404 // Prioritize live ranges by size, assigning larger ranges first. 405 // The queue holds (size, reg) pairs. 406 const unsigned Size = LI->getSize(); 407 const unsigned Reg = LI->reg; 408 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 409 "Can only enqueue virtual registers"); 410 unsigned Prio; 411 412 ExtraRegInfo.grow(Reg); 413 if (ExtraRegInfo[Reg].Stage == RS_New) 414 ExtraRegInfo[Reg].Stage = RS_Assign; 415 416 if (ExtraRegInfo[Reg].Stage == RS_Split) { 417 // Unsplit ranges that couldn't be allocated immediately are deferred until 418 // everything else has been allocated. 419 Prio = Size; 420 } else { 421 if (ExtraRegInfo[Reg].Stage == RS_Assign && !LI->empty() && 422 LIS->intervalIsInOneMBB(*LI)) { 423 // Allocate original local ranges in linear instruction order. Since they 424 // are singly defined, this produces optimal coloring in the absence of 425 // global interference and other constraints. 426 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex()); 427 } 428 else { 429 // Allocate global and split ranges in long->short order. Long ranges that 430 // don't fit should be spilled (or split) ASAP so they don't create 431 // interference. Mark a bit to prioritize global above local ranges. 432 Prio = (1u << 29) + Size; 433 } 434 // Mark a higher bit to prioritize global and local above RS_Split. 435 Prio |= (1u << 31); 436 437 // Boost ranges that have a physical register hint. 438 if (VRM->hasKnownPreference(Reg)) 439 Prio |= (1u << 30); 440 } 441 // The virtual register number is a tie breaker for same-sized ranges. 442 // Give lower vreg numbers higher priority to assign them first. 443 Queue.push(std::make_pair(Prio, ~Reg)); 444 } 445 446 LiveInterval *RAGreedy::dequeue() { 447 if (Queue.empty()) 448 return 0; 449 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 450 Queue.pop(); 451 return LI; 452 } 453 454 455 //===----------------------------------------------------------------------===// 456 // Direct Assignment 457 //===----------------------------------------------------------------------===// 458 459 /// tryAssign - Try to assign VirtReg to an available register. 460 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 461 AllocationOrder &Order, 462 SmallVectorImpl<unsigned> &NewVRegs) { 463 Order.rewind(); 464 unsigned PhysReg; 465 while ((PhysReg = Order.next())) 466 if (!Matrix->checkInterference(VirtReg, PhysReg)) 467 break; 468 if (!PhysReg || Order.isHint()) 469 return PhysReg; 470 471 // PhysReg is available, but there may be a better choice. 472 473 // If we missed a simple hint, try to cheaply evict interference from the 474 // preferred register. 475 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 476 if (Order.isHint(Hint)) { 477 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 478 EvictionCost MaxCost; 479 MaxCost.setBrokenHints(1); 480 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 481 evictInterference(VirtReg, Hint, NewVRegs); 482 return Hint; 483 } 484 } 485 486 // Try to evict interference from a cheaper alternative. 487 unsigned Cost = TRI->getCostPerUse(PhysReg); 488 489 // Most registers have 0 additional cost. 490 if (!Cost) 491 return PhysReg; 492 493 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 494 << '\n'); 495 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 496 return CheapReg ? CheapReg : PhysReg; 497 } 498 499 500 //===----------------------------------------------------------------------===// 501 // Interference eviction 502 //===----------------------------------------------------------------------===// 503 504 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) { 505 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 506 unsigned PhysReg; 507 while ((PhysReg = Order.next())) { 508 if (PhysReg == PrevReg) 509 continue; 510 511 MCRegUnitIterator Units(PhysReg, TRI); 512 for (; Units.isValid(); ++Units) { 513 // Instantiate a "subquery", not to be confused with the Queries array. 514 LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]); 515 if (subQ.checkInterference()) 516 break; 517 } 518 // If no units have interference, break out with the current PhysReg. 519 if (!Units.isValid()) 520 break; 521 } 522 if (PhysReg) 523 DEBUG(dbgs() << "can reassign: " << VirtReg << " from " 524 << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI) 525 << '\n'); 526 return PhysReg; 527 } 528 529 /// shouldEvict - determine if A should evict the assigned live range B. The 530 /// eviction policy defined by this function together with the allocation order 531 /// defined by enqueue() decides which registers ultimately end up being split 532 /// and spilled. 533 /// 534 /// Cascade numbers are used to prevent infinite loops if this function is a 535 /// cyclic relation. 536 /// 537 /// @param A The live range to be assigned. 538 /// @param IsHint True when A is about to be assigned to its preferred 539 /// register. 540 /// @param B The live range to be evicted. 541 /// @param BreaksHint True when B is already assigned to its preferred register. 542 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 543 LiveInterval &B, bool BreaksHint) { 544 bool CanSplit = getStage(B) < RS_Spill; 545 546 // Be fairly aggressive about following hints as long as the evictee can be 547 // split. 548 if (CanSplit && IsHint && !BreaksHint) 549 return true; 550 551 if (A.weight > B.weight) { 552 DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n'); 553 return true; 554 } 555 return false; 556 } 557 558 /// canEvictInterference - Return true if all interferences between VirtReg and 559 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 560 /// 561 /// @param VirtReg Live range that is about to be assigned. 562 /// @param PhysReg Desired register for assignment. 563 /// @param IsHint True when PhysReg is VirtReg's preferred register. 564 /// @param MaxCost Only look for cheaper candidates and update with new cost 565 /// when returning true. 566 /// @returns True when interference can be evicted cheaper than MaxCost. 567 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 568 bool IsHint, EvictionCost &MaxCost) { 569 // It is only possible to evict virtual register interference. 570 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg) 571 return false; 572 573 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg); 574 575 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 576 // involved in an eviction before. If a cascade number was assigned, deny 577 // evicting anything with the same or a newer cascade number. This prevents 578 // infinite eviction loops. 579 // 580 // This works out so a register without a cascade number is allowed to evict 581 // anything, and it can be evicted by anything. 582 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 583 if (!Cascade) 584 Cascade = NextCascade; 585 586 EvictionCost Cost; 587 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 588 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 589 // If there is 10 or more interferences, chances are one is heavier. 590 if (Q.collectInterferingVRegs(10) >= 10) 591 return false; 592 593 // Check if any interfering live range is heavier than MaxWeight. 594 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 595 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 596 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) && 597 "Only expecting virtual register interference from query"); 598 // Never evict spill products. They cannot split or spill. 599 if (getStage(*Intf) == RS_Done) 600 return false; 601 // Once a live range becomes small enough, it is urgent that we find a 602 // register for it. This is indicated by an infinite spill weight. These 603 // urgent live ranges get to evict almost anything. 604 // 605 // Also allow urgent evictions of unspillable ranges from a strictly 606 // larger allocation order. 607 bool Urgent = !VirtReg.isSpillable() && 608 (Intf->isSpillable() || 609 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 610 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 611 // Only evict older cascades or live ranges without a cascade. 612 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 613 if (Cascade <= IntfCascade) { 614 if (!Urgent) 615 return false; 616 // We permit breaking cascades for urgent evictions. It should be the 617 // last resort, though, so make it really expensive. 618 Cost.BrokenHints += 10; 619 } 620 // Would this break a satisfied hint? 621 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 622 // Update eviction cost. 623 Cost.BrokenHints += BreaksHint; 624 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 625 // Abort if this would be too expensive. 626 if (!(Cost < MaxCost)) 627 return false; 628 if (Urgent) 629 continue; 630 // If !MaxCost.isMax(), then we're just looking for a cheap register. 631 // Evicting another local live range in this case could lead to suboptimal 632 // coloring. 633 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) && 634 !canReassign(*Intf, PhysReg)) { 635 return false; 636 } 637 // Finally, apply the eviction policy for non-urgent evictions. 638 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 639 return false; 640 } 641 } 642 MaxCost = Cost; 643 return true; 644 } 645 646 /// evictInterference - Evict any interferring registers that prevent VirtReg 647 /// from being assigned to Physreg. This assumes that canEvictInterference 648 /// returned true. 649 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 650 SmallVectorImpl<unsigned> &NewVRegs) { 651 // Make sure that VirtReg has a cascade number, and assign that cascade 652 // number to every evicted register. These live ranges than then only be 653 // evicted by a newer cascade, preventing infinite loops. 654 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 655 if (!Cascade) 656 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 657 658 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 659 << " interference: Cascade " << Cascade << '\n'); 660 661 // Collect all interfering virtregs first. 662 SmallVector<LiveInterval*, 8> Intfs; 663 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 664 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units); 665 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 666 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs(); 667 Intfs.append(IVR.begin(), IVR.end()); 668 } 669 670 // Evict them second. This will invalidate the queries. 671 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) { 672 LiveInterval *Intf = Intfs[i]; 673 // The same VirtReg may be present in multiple RegUnits. Skip duplicates. 674 if (!VRM->hasPhys(Intf->reg)) 675 continue; 676 Matrix->unassign(*Intf); 677 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 678 VirtReg.isSpillable() < Intf->isSpillable()) && 679 "Cannot decrease cascade number, illegal eviction"); 680 ExtraRegInfo[Intf->reg].Cascade = Cascade; 681 ++NumEvicted; 682 NewVRegs.push_back(Intf->reg); 683 } 684 } 685 686 /// tryEvict - Try to evict all interferences for a physreg. 687 /// @param VirtReg Currently unassigned virtual register. 688 /// @param Order Physregs to try. 689 /// @return Physreg to assign VirtReg, or 0. 690 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 691 AllocationOrder &Order, 692 SmallVectorImpl<unsigned> &NewVRegs, 693 unsigned CostPerUseLimit) { 694 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 695 696 // Keep track of the cheapest interference seen so far. 697 EvictionCost BestCost; 698 BestCost.setMax(); 699 unsigned BestPhys = 0; 700 unsigned OrderLimit = Order.getOrder().size(); 701 702 // When we are just looking for a reduced cost per use, don't break any 703 // hints, and only evict smaller spill weights. 704 if (CostPerUseLimit < ~0u) { 705 BestCost.BrokenHints = 0; 706 BestCost.MaxWeight = VirtReg.weight; 707 708 // Check of any registers in RC are below CostPerUseLimit. 709 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg); 710 unsigned MinCost = RegClassInfo.getMinCost(RC); 711 if (MinCost >= CostPerUseLimit) { 712 DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost 713 << ", no cheaper registers to be found.\n"); 714 return 0; 715 } 716 717 // It is normal for register classes to have a long tail of registers with 718 // the same cost. We don't need to look at them if they're too expensive. 719 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) { 720 OrderLimit = RegClassInfo.getLastCostChange(RC); 721 DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n"); 722 } 723 } 724 725 Order.rewind(); 726 while (unsigned PhysReg = Order.nextWithDups(OrderLimit)) { 727 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 728 continue; 729 // The first use of a callee-saved register in a function has cost 1. 730 // Don't start using a CSR when the CostPerUseLimit is low. 731 if (CostPerUseLimit == 1) 732 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 733 if (!MRI->isPhysRegUsed(CSR)) { 734 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 735 << PrintReg(CSR, TRI) << '\n'); 736 continue; 737 } 738 739 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 740 continue; 741 742 // Best so far. 743 BestPhys = PhysReg; 744 745 // Stop if the hint can be used. 746 if (Order.isHint()) 747 break; 748 } 749 750 if (!BestPhys) 751 return 0; 752 753 evictInterference(VirtReg, BestPhys, NewVRegs); 754 return BestPhys; 755 } 756 757 758 //===----------------------------------------------------------------------===// 759 // Region Splitting 760 //===----------------------------------------------------------------------===// 761 762 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 763 /// interference pattern in Physreg and its aliases. Add the constraints to 764 /// SpillPlacement and return the static cost of this split in Cost, assuming 765 /// that all preferences in SplitConstraints are met. 766 /// Return false if there are no bundles with positive bias. 767 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 768 BlockFrequency &Cost) { 769 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 770 771 // Reset interference dependent info. 772 SplitConstraints.resize(UseBlocks.size()); 773 BlockFrequency StaticCost = 0; 774 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 775 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 776 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 777 778 BC.Number = BI.MBB->getNumber(); 779 Intf.moveToBlock(BC.Number); 780 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 781 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 782 BC.ChangesValue = BI.FirstDef.isValid(); 783 784 if (!Intf.hasInterference()) 785 continue; 786 787 // Number of spill code instructions to insert. 788 unsigned Ins = 0; 789 790 // Interference for the live-in value. 791 if (BI.LiveIn) { 792 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 793 BC.Entry = SpillPlacement::MustSpill, ++Ins; 794 else if (Intf.first() < BI.FirstInstr) 795 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 796 else if (Intf.first() < BI.LastInstr) 797 ++Ins; 798 } 799 800 // Interference for the live-out value. 801 if (BI.LiveOut) { 802 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 803 BC.Exit = SpillPlacement::MustSpill, ++Ins; 804 else if (Intf.last() > BI.LastInstr) 805 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 806 else if (Intf.last() > BI.FirstInstr) 807 ++Ins; 808 } 809 810 // Accumulate the total frequency of inserted spill code. 811 while (Ins--) 812 StaticCost += SpillPlacer->getBlockFrequency(BC.Number); 813 } 814 Cost = StaticCost; 815 816 // Add constraints for use-blocks. Note that these are the only constraints 817 // that may add a positive bias, it is downhill from here. 818 SpillPlacer->addConstraints(SplitConstraints); 819 return SpillPlacer->scanActiveBundles(); 820 } 821 822 823 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 824 /// live-through blocks in Blocks. 825 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 826 ArrayRef<unsigned> Blocks) { 827 const unsigned GroupSize = 8; 828 SpillPlacement::BlockConstraint BCS[GroupSize]; 829 unsigned TBS[GroupSize]; 830 unsigned B = 0, T = 0; 831 832 for (unsigned i = 0; i != Blocks.size(); ++i) { 833 unsigned Number = Blocks[i]; 834 Intf.moveToBlock(Number); 835 836 if (!Intf.hasInterference()) { 837 assert(T < GroupSize && "Array overflow"); 838 TBS[T] = Number; 839 if (++T == GroupSize) { 840 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 841 T = 0; 842 } 843 continue; 844 } 845 846 assert(B < GroupSize && "Array overflow"); 847 BCS[B].Number = Number; 848 849 // Interference for the live-in value. 850 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 851 BCS[B].Entry = SpillPlacement::MustSpill; 852 else 853 BCS[B].Entry = SpillPlacement::PrefSpill; 854 855 // Interference for the live-out value. 856 if (Intf.last() >= SA->getLastSplitPoint(Number)) 857 BCS[B].Exit = SpillPlacement::MustSpill; 858 else 859 BCS[B].Exit = SpillPlacement::PrefSpill; 860 861 if (++B == GroupSize) { 862 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 863 SpillPlacer->addConstraints(Array); 864 B = 0; 865 } 866 } 867 868 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 869 SpillPlacer->addConstraints(Array); 870 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 871 } 872 873 void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 874 // Keep track of through blocks that have not been added to SpillPlacer. 875 BitVector Todo = SA->getThroughBlocks(); 876 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 877 unsigned AddedTo = 0; 878 #ifndef NDEBUG 879 unsigned Visited = 0; 880 #endif 881 882 for (;;) { 883 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 884 // Find new through blocks in the periphery of PrefRegBundles. 885 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 886 unsigned Bundle = NewBundles[i]; 887 // Look at all blocks connected to Bundle in the full graph. 888 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 889 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 890 I != E; ++I) { 891 unsigned Block = *I; 892 if (!Todo.test(Block)) 893 continue; 894 Todo.reset(Block); 895 // This is a new through block. Add it to SpillPlacer later. 896 ActiveBlocks.push_back(Block); 897 #ifndef NDEBUG 898 ++Visited; 899 #endif 900 } 901 } 902 // Any new blocks to add? 903 if (ActiveBlocks.size() == AddedTo) 904 break; 905 906 // Compute through constraints from the interference, or assume that all 907 // through blocks prefer spilling when forming compact regions. 908 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 909 if (Cand.PhysReg) 910 addThroughConstraints(Cand.Intf, NewBlocks); 911 else 912 // Provide a strong negative bias on through blocks to prevent unwanted 913 // liveness on loop backedges. 914 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 915 AddedTo = ActiveBlocks.size(); 916 917 // Perhaps iterating can enable more bundles? 918 SpillPlacer->iterate(); 919 } 920 DEBUG(dbgs() << ", v=" << Visited); 921 } 922 923 /// calcCompactRegion - Compute the set of edge bundles that should be live 924 /// when splitting the current live range into compact regions. Compact 925 /// regions can be computed without looking at interference. They are the 926 /// regions formed by removing all the live-through blocks from the live range. 927 /// 928 /// Returns false if the current live range is already compact, or if the 929 /// compact regions would form single block regions anyway. 930 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 931 // Without any through blocks, the live range is already compact. 932 if (!SA->getNumThroughBlocks()) 933 return false; 934 935 // Compact regions don't correspond to any physreg. 936 Cand.reset(IntfCache, 0); 937 938 DEBUG(dbgs() << "Compact region bundles"); 939 940 // Use the spill placer to determine the live bundles. GrowRegion pretends 941 // that all the through blocks have interference when PhysReg is unset. 942 SpillPlacer->prepare(Cand.LiveBundles); 943 944 // The static split cost will be zero since Cand.Intf reports no interference. 945 BlockFrequency Cost; 946 if (!addSplitConstraints(Cand.Intf, Cost)) { 947 DEBUG(dbgs() << ", none.\n"); 948 return false; 949 } 950 951 growRegion(Cand); 952 SpillPlacer->finish(); 953 954 if (!Cand.LiveBundles.any()) { 955 DEBUG(dbgs() << ", none.\n"); 956 return false; 957 } 958 959 DEBUG({ 960 for (int i = Cand.LiveBundles.find_first(); i>=0; 961 i = Cand.LiveBundles.find_next(i)) 962 dbgs() << " EB#" << i; 963 dbgs() << ".\n"; 964 }); 965 return true; 966 } 967 968 /// calcSpillCost - Compute how expensive it would be to split the live range in 969 /// SA around all use blocks instead of forming bundle regions. 970 BlockFrequency RAGreedy::calcSpillCost() { 971 BlockFrequency Cost = 0; 972 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 973 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 974 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 975 unsigned Number = BI.MBB->getNumber(); 976 // We normally only need one spill instruction - a load or a store. 977 Cost += SpillPlacer->getBlockFrequency(Number); 978 979 // Unless the value is redefined in the block. 980 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 981 Cost += SpillPlacer->getBlockFrequency(Number); 982 } 983 return Cost; 984 } 985 986 /// calcGlobalSplitCost - Return the global split cost of following the split 987 /// pattern in LiveBundles. This cost should be added to the local cost of the 988 /// interference pattern in SplitConstraints. 989 /// 990 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 991 BlockFrequency GlobalCost = 0; 992 const BitVector &LiveBundles = Cand.LiveBundles; 993 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 994 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 995 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 996 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 997 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 998 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 999 unsigned Ins = 0; 1000 1001 if (BI.LiveIn) 1002 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 1003 if (BI.LiveOut) 1004 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 1005 while (Ins--) 1006 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number); 1007 } 1008 1009 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1010 unsigned Number = Cand.ActiveBlocks[i]; 1011 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 1012 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 1013 if (!RegIn && !RegOut) 1014 continue; 1015 if (RegIn && RegOut) { 1016 // We need double spill code if this block has interference. 1017 Cand.Intf.moveToBlock(Number); 1018 if (Cand.Intf.hasInterference()) { 1019 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1020 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1021 } 1022 continue; 1023 } 1024 // live-in / stack-out or stack-in live-out. 1025 GlobalCost += SpillPlacer->getBlockFrequency(Number); 1026 } 1027 return GlobalCost; 1028 } 1029 1030 /// splitAroundRegion - Split the current live range around the regions 1031 /// determined by BundleCand and GlobalCand. 1032 /// 1033 /// Before calling this function, GlobalCand and BundleCand must be initialized 1034 /// so each bundle is assigned to a valid candidate, or NoCand for the 1035 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 1036 /// objects must be initialized for the current live range, and intervals 1037 /// created for the used candidates. 1038 /// 1039 /// @param LREdit The LiveRangeEdit object handling the current split. 1040 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value 1041 /// must appear in this list. 1042 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 1043 ArrayRef<unsigned> UsedCands) { 1044 // These are the intervals created for new global ranges. We may create more 1045 // intervals for local ranges. 1046 const unsigned NumGlobalIntvs = LREdit.size(); 1047 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 1048 assert(NumGlobalIntvs && "No global intervals configured"); 1049 1050 // Isolate even single instructions when dealing with a proper sub-class. 1051 // That guarantees register class inflation for the stack interval because it 1052 // is all copies. 1053 unsigned Reg = SA->getParent().reg; 1054 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1055 1056 // First handle all the blocks with uses. 1057 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1058 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1059 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1060 unsigned Number = BI.MBB->getNumber(); 1061 unsigned IntvIn = 0, IntvOut = 0; 1062 SlotIndex IntfIn, IntfOut; 1063 if (BI.LiveIn) { 1064 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1065 if (CandIn != NoCand) { 1066 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1067 IntvIn = Cand.IntvIdx; 1068 Cand.Intf.moveToBlock(Number); 1069 IntfIn = Cand.Intf.first(); 1070 } 1071 } 1072 if (BI.LiveOut) { 1073 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1074 if (CandOut != NoCand) { 1075 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1076 IntvOut = Cand.IntvIdx; 1077 Cand.Intf.moveToBlock(Number); 1078 IntfOut = Cand.Intf.last(); 1079 } 1080 } 1081 1082 // Create separate intervals for isolated blocks with multiple uses. 1083 if (!IntvIn && !IntvOut) { 1084 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1085 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1086 SE->splitSingleBlock(BI); 1087 continue; 1088 } 1089 1090 if (IntvIn && IntvOut) 1091 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1092 else if (IntvIn) 1093 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1094 else 1095 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1096 } 1097 1098 // Handle live-through blocks. The relevant live-through blocks are stored in 1099 // the ActiveBlocks list with each candidate. We need to filter out 1100 // duplicates. 1101 BitVector Todo = SA->getThroughBlocks(); 1102 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1103 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1104 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1105 unsigned Number = Blocks[i]; 1106 if (!Todo.test(Number)) 1107 continue; 1108 Todo.reset(Number); 1109 1110 unsigned IntvIn = 0, IntvOut = 0; 1111 SlotIndex IntfIn, IntfOut; 1112 1113 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1114 if (CandIn != NoCand) { 1115 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1116 IntvIn = Cand.IntvIdx; 1117 Cand.Intf.moveToBlock(Number); 1118 IntfIn = Cand.Intf.first(); 1119 } 1120 1121 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1122 if (CandOut != NoCand) { 1123 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1124 IntvOut = Cand.IntvIdx; 1125 Cand.Intf.moveToBlock(Number); 1126 IntfOut = Cand.Intf.last(); 1127 } 1128 if (!IntvIn && !IntvOut) 1129 continue; 1130 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1131 } 1132 } 1133 1134 ++NumGlobalSplits; 1135 1136 SmallVector<unsigned, 8> IntvMap; 1137 SE->finish(&IntvMap); 1138 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1139 1140 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1141 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1142 1143 // Sort out the new intervals created by splitting. We get four kinds: 1144 // - Remainder intervals should not be split again. 1145 // - Candidate intervals can be assigned to Cand.PhysReg. 1146 // - Block-local splits are candidates for local splitting. 1147 // - DCE leftovers should go back on the queue. 1148 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1149 LiveInterval &Reg = LIS->getInterval(LREdit.get(i)); 1150 1151 // Ignore old intervals from DCE. 1152 if (getStage(Reg) != RS_New) 1153 continue; 1154 1155 // Remainder interval. Don't try splitting again, spill if it doesn't 1156 // allocate. 1157 if (IntvMap[i] == 0) { 1158 setStage(Reg, RS_Spill); 1159 continue; 1160 } 1161 1162 // Global intervals. Allow repeated splitting as long as the number of live 1163 // blocks is strictly decreasing. 1164 if (IntvMap[i] < NumGlobalIntvs) { 1165 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1166 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1167 << " blocks as original.\n"); 1168 // Don't allow repeated splitting as a safe guard against looping. 1169 setStage(Reg, RS_Split2); 1170 } 1171 continue; 1172 } 1173 1174 // Other intervals are treated as new. This includes local intervals created 1175 // for blocks with multiple uses, and anything created by DCE. 1176 } 1177 1178 if (VerifyEnabled) 1179 MF->verify(this, "After splitting live range around region"); 1180 } 1181 1182 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1183 SmallVectorImpl<unsigned> &NewVRegs) { 1184 unsigned NumCands = 0; 1185 unsigned BestCand = NoCand; 1186 BlockFrequency BestCost; 1187 SmallVector<unsigned, 8> UsedCands; 1188 1189 // Check if we can split this live range around a compact region. 1190 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1191 if (HasCompact) { 1192 // Yes, keep GlobalCand[0] as the compact region candidate. 1193 NumCands = 1; 1194 BestCost = BlockFrequency::getMaxFrequency(); 1195 } else { 1196 // No benefit from the compact region, our fallback will be per-block 1197 // splitting. Make sure we find a solution that is cheaper than spilling. 1198 BestCost = calcSpillCost(); 1199 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1200 } 1201 1202 Order.rewind(); 1203 while (unsigned PhysReg = Order.next()) { 1204 // Discard bad candidates before we run out of interference cache cursors. 1205 // This will only affect register classes with a lot of registers (>32). 1206 if (NumCands == IntfCache.getMaxCursors()) { 1207 unsigned WorstCount = ~0u; 1208 unsigned Worst = 0; 1209 for (unsigned i = 0; i != NumCands; ++i) { 1210 if (i == BestCand || !GlobalCand[i].PhysReg) 1211 continue; 1212 unsigned Count = GlobalCand[i].LiveBundles.count(); 1213 if (Count < WorstCount) 1214 Worst = i, WorstCount = Count; 1215 } 1216 --NumCands; 1217 GlobalCand[Worst] = GlobalCand[NumCands]; 1218 if (BestCand == NumCands) 1219 BestCand = Worst; 1220 } 1221 1222 if (GlobalCand.size() <= NumCands) 1223 GlobalCand.resize(NumCands+1); 1224 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1225 Cand.reset(IntfCache, PhysReg); 1226 1227 SpillPlacer->prepare(Cand.LiveBundles); 1228 BlockFrequency Cost; 1229 if (!addSplitConstraints(Cand.Intf, Cost)) { 1230 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1231 continue; 1232 } 1233 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1234 if (Cost >= BestCost) { 1235 DEBUG({ 1236 if (BestCand == NoCand) 1237 dbgs() << " worse than no bundles\n"; 1238 else 1239 dbgs() << " worse than " 1240 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1241 }); 1242 continue; 1243 } 1244 growRegion(Cand); 1245 1246 SpillPlacer->finish(); 1247 1248 // No live bundles, defer to splitSingleBlocks(). 1249 if (!Cand.LiveBundles.any()) { 1250 DEBUG(dbgs() << " no bundles.\n"); 1251 continue; 1252 } 1253 1254 Cost += calcGlobalSplitCost(Cand); 1255 DEBUG({ 1256 dbgs() << ", total = " << Cost << " with bundles"; 1257 for (int i = Cand.LiveBundles.find_first(); i>=0; 1258 i = Cand.LiveBundles.find_next(i)) 1259 dbgs() << " EB#" << i; 1260 dbgs() << ".\n"; 1261 }); 1262 if (Cost < BestCost) { 1263 BestCand = NumCands; 1264 BestCost = Cost; 1265 } 1266 ++NumCands; 1267 } 1268 1269 // No solutions found, fall back to single block splitting. 1270 if (!HasCompact && BestCand == NoCand) 1271 return 0; 1272 1273 // Prepare split editor. 1274 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1275 SE->reset(LREdit, SplitSpillMode); 1276 1277 // Assign all edge bundles to the preferred candidate, or NoCand. 1278 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1279 1280 // Assign bundles for the best candidate region. 1281 if (BestCand != NoCand) { 1282 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1283 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1284 UsedCands.push_back(BestCand); 1285 Cand.IntvIdx = SE->openIntv(); 1286 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1287 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1288 (void)B; 1289 } 1290 } 1291 1292 // Assign bundles for the compact region. 1293 if (HasCompact) { 1294 GlobalSplitCandidate &Cand = GlobalCand.front(); 1295 assert(!Cand.PhysReg && "Compact region has no physreg"); 1296 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1297 UsedCands.push_back(0); 1298 Cand.IntvIdx = SE->openIntv(); 1299 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1300 << Cand.IntvIdx << ".\n"); 1301 (void)B; 1302 } 1303 } 1304 1305 splitAroundRegion(LREdit, UsedCands); 1306 return 0; 1307 } 1308 1309 1310 //===----------------------------------------------------------------------===// 1311 // Per-Block Splitting 1312 //===----------------------------------------------------------------------===// 1313 1314 /// tryBlockSplit - Split a global live range around every block with uses. This 1315 /// creates a lot of local live ranges, that will be split by tryLocalSplit if 1316 /// they don't allocate. 1317 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1318 SmallVectorImpl<unsigned> &NewVRegs) { 1319 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1320 unsigned Reg = VirtReg.reg; 1321 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1322 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1323 SE->reset(LREdit, SplitSpillMode); 1324 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1325 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1326 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1327 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1328 SE->splitSingleBlock(BI); 1329 } 1330 // No blocks were split. 1331 if (LREdit.empty()) 1332 return 0; 1333 1334 // We did split for some blocks. 1335 SmallVector<unsigned, 8> IntvMap; 1336 SE->finish(&IntvMap); 1337 1338 // Tell LiveDebugVariables about the new ranges. 1339 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS); 1340 1341 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1342 1343 // Sort out the new intervals created by splitting. The remainder interval 1344 // goes straight to spilling, the new local ranges get to stay RS_New. 1345 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1346 LiveInterval &LI = LIS->getInterval(LREdit.get(i)); 1347 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1348 setStage(LI, RS_Spill); 1349 } 1350 1351 if (VerifyEnabled) 1352 MF->verify(this, "After splitting live range around basic blocks"); 1353 return 0; 1354 } 1355 1356 1357 //===----------------------------------------------------------------------===// 1358 // Per-Instruction Splitting 1359 //===----------------------------------------------------------------------===// 1360 1361 /// tryInstructionSplit - Split a live range around individual instructions. 1362 /// This is normally not worthwhile since the spiller is doing essentially the 1363 /// same thing. However, when the live range is in a constrained register 1364 /// class, it may help to insert copies such that parts of the live range can 1365 /// be moved to a larger register class. 1366 /// 1367 /// This is similar to spilling to a larger register class. 1368 unsigned 1369 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1370 SmallVectorImpl<unsigned> &NewVRegs) { 1371 // There is no point to this if there are no larger sub-classes. 1372 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1373 return 0; 1374 1375 // Always enable split spill mode, since we're effectively spilling to a 1376 // register. 1377 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1378 SE->reset(LREdit, SplitEditor::SM_Size); 1379 1380 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1381 if (Uses.size() <= 1) 1382 return 0; 1383 1384 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1385 1386 // Split around every non-copy instruction. 1387 for (unsigned i = 0; i != Uses.size(); ++i) { 1388 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1389 if (MI->isFullCopy()) { 1390 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1391 continue; 1392 } 1393 SE->openIntv(); 1394 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1395 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1396 SE->useIntv(SegStart, SegStop); 1397 } 1398 1399 if (LREdit.empty()) { 1400 DEBUG(dbgs() << "All uses were copies.\n"); 1401 return 0; 1402 } 1403 1404 SmallVector<unsigned, 8> IntvMap; 1405 SE->finish(&IntvMap); 1406 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1407 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1408 1409 // Assign all new registers to RS_Spill. This was the last chance. 1410 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1411 return 0; 1412 } 1413 1414 1415 //===----------------------------------------------------------------------===// 1416 // Local Splitting 1417 //===----------------------------------------------------------------------===// 1418 1419 1420 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1421 /// in order to use PhysReg between two entries in SA->UseSlots. 1422 /// 1423 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1424 /// 1425 void RAGreedy::calcGapWeights(unsigned PhysReg, 1426 SmallVectorImpl<float> &GapWeight) { 1427 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1428 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1429 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1430 const unsigned NumGaps = Uses.size()-1; 1431 1432 // Start and end points for the interference check. 1433 SlotIndex StartIdx = 1434 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1435 SlotIndex StopIdx = 1436 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1437 1438 GapWeight.assign(NumGaps, 0.0f); 1439 1440 // Add interference from each overlapping register. 1441 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1442 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units) 1443 .checkInterference()) 1444 continue; 1445 1446 // We know that VirtReg is a continuous interval from FirstInstr to 1447 // LastInstr, so we don't need InterferenceQuery. 1448 // 1449 // Interference that overlaps an instruction is counted in both gaps 1450 // surrounding the instruction. The exception is interference before 1451 // StartIdx and after StopIdx. 1452 // 1453 LiveIntervalUnion::SegmentIter IntI = 1454 Matrix->getLiveUnions()[*Units] .find(StartIdx); 1455 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1456 // Skip the gaps before IntI. 1457 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1458 if (++Gap == NumGaps) 1459 break; 1460 if (Gap == NumGaps) 1461 break; 1462 1463 // Update the gaps covered by IntI. 1464 const float weight = IntI.value()->weight; 1465 for (; Gap != NumGaps; ++Gap) { 1466 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1467 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1468 break; 1469 } 1470 if (Gap == NumGaps) 1471 break; 1472 } 1473 } 1474 1475 // Add fixed interference. 1476 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) { 1477 const LiveRange &LR = LIS->getRegUnit(*Units); 1478 LiveRange::const_iterator I = LR.find(StartIdx); 1479 LiveRange::const_iterator E = LR.end(); 1480 1481 // Same loop as above. Mark any overlapped gaps as HUGE_VALF. 1482 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) { 1483 while (Uses[Gap+1].getBoundaryIndex() < I->start) 1484 if (++Gap == NumGaps) 1485 break; 1486 if (Gap == NumGaps) 1487 break; 1488 1489 for (; Gap != NumGaps; ++Gap) { 1490 GapWeight[Gap] = llvm::huge_valf; 1491 if (Uses[Gap+1].getBaseIndex() >= I->end) 1492 break; 1493 } 1494 if (Gap == NumGaps) 1495 break; 1496 } 1497 } 1498 } 1499 1500 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1501 /// basic block. 1502 /// 1503 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1504 SmallVectorImpl<unsigned> &NewVRegs) { 1505 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1506 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1507 1508 // Note that it is possible to have an interval that is live-in or live-out 1509 // while only covering a single block - A phi-def can use undef values from 1510 // predecessors, and the block could be a single-block loop. 1511 // We don't bother doing anything clever about such a case, we simply assume 1512 // that the interval is continuous from FirstInstr to LastInstr. We should 1513 // make sure that we don't do anything illegal to such an interval, though. 1514 1515 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1516 if (Uses.size() <= 2) 1517 return 0; 1518 const unsigned NumGaps = Uses.size()-1; 1519 1520 DEBUG({ 1521 dbgs() << "tryLocalSplit: "; 1522 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1523 dbgs() << ' ' << Uses[i]; 1524 dbgs() << '\n'; 1525 }); 1526 1527 // If VirtReg is live across any register mask operands, compute a list of 1528 // gaps with register masks. 1529 SmallVector<unsigned, 8> RegMaskGaps; 1530 if (Matrix->checkRegMaskInterference(VirtReg)) { 1531 // Get regmask slots for the whole block. 1532 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1533 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1534 // Constrain to VirtReg's live range. 1535 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1536 Uses.front().getRegSlot()) - RMS.begin(); 1537 unsigned re = RMS.size(); 1538 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1539 // Look for Uses[i] <= RMS <= Uses[i+1]. 1540 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1541 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1542 continue; 1543 // Skip a regmask on the same instruction as the last use. It doesn't 1544 // overlap the live range. 1545 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1546 break; 1547 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1548 RegMaskGaps.push_back(i); 1549 // Advance ri to the next gap. A regmask on one of the uses counts in 1550 // both gaps. 1551 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1552 ++ri; 1553 } 1554 DEBUG(dbgs() << '\n'); 1555 } 1556 1557 // Since we allow local split results to be split again, there is a risk of 1558 // creating infinite loops. It is tempting to require that the new live 1559 // ranges have less instructions than the original. That would guarantee 1560 // convergence, but it is too strict. A live range with 3 instructions can be 1561 // split 2+3 (including the COPY), and we want to allow that. 1562 // 1563 // Instead we use these rules: 1564 // 1565 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1566 // noop split, of course). 1567 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1568 // the new ranges must have fewer instructions than before the split. 1569 // 3. New ranges with the same number of instructions are marked RS_Split2, 1570 // smaller ranges are marked RS_New. 1571 // 1572 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1573 // excessive splitting and infinite loops. 1574 // 1575 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1576 1577 // Best split candidate. 1578 unsigned BestBefore = NumGaps; 1579 unsigned BestAfter = 0; 1580 float BestDiff = 0; 1581 1582 const float blockFreq = 1583 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() * 1584 (1.0f / BlockFrequency::getEntryFrequency()); 1585 SmallVector<float, 8> GapWeight; 1586 1587 Order.rewind(); 1588 while (unsigned PhysReg = Order.next()) { 1589 // Keep track of the largest spill weight that would need to be evicted in 1590 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1591 calcGapWeights(PhysReg, GapWeight); 1592 1593 // Remove any gaps with regmask clobbers. 1594 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg)) 1595 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1596 GapWeight[RegMaskGaps[i]] = llvm::huge_valf; 1597 1598 // Try to find the best sequence of gaps to close. 1599 // The new spill weight must be larger than any gap interference. 1600 1601 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1602 unsigned SplitBefore = 0, SplitAfter = 1; 1603 1604 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1605 // It is the spill weight that needs to be evicted. 1606 float MaxGap = GapWeight[0]; 1607 1608 for (;;) { 1609 // Live before/after split? 1610 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1611 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1612 1613 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1614 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1615 << " i=" << MaxGap); 1616 1617 // Stop before the interval gets so big we wouldn't be making progress. 1618 if (!LiveBefore && !LiveAfter) { 1619 DEBUG(dbgs() << " all\n"); 1620 break; 1621 } 1622 // Should the interval be extended or shrunk? 1623 bool Shrink = true; 1624 1625 // How many gaps would the new range have? 1626 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1627 1628 // Legally, without causing looping? 1629 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1630 1631 if (Legal && MaxGap < llvm::huge_valf) { 1632 // Estimate the new spill weight. Each instruction reads or writes the 1633 // register. Conservatively assume there are no read-modify-write 1634 // instructions. 1635 // 1636 // Try to guess the size of the new interval. 1637 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1638 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1639 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1640 // Would this split be possible to allocate? 1641 // Never allocate all gaps, we wouldn't be making progress. 1642 DEBUG(dbgs() << " w=" << EstWeight); 1643 if (EstWeight * Hysteresis >= MaxGap) { 1644 Shrink = false; 1645 float Diff = EstWeight - MaxGap; 1646 if (Diff > BestDiff) { 1647 DEBUG(dbgs() << " (best)"); 1648 BestDiff = Hysteresis * Diff; 1649 BestBefore = SplitBefore; 1650 BestAfter = SplitAfter; 1651 } 1652 } 1653 } 1654 1655 // Try to shrink. 1656 if (Shrink) { 1657 if (++SplitBefore < SplitAfter) { 1658 DEBUG(dbgs() << " shrink\n"); 1659 // Recompute the max when necessary. 1660 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1661 MaxGap = GapWeight[SplitBefore]; 1662 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1663 MaxGap = std::max(MaxGap, GapWeight[i]); 1664 } 1665 continue; 1666 } 1667 MaxGap = 0; 1668 } 1669 1670 // Try to extend the interval. 1671 if (SplitAfter >= NumGaps) { 1672 DEBUG(dbgs() << " end\n"); 1673 break; 1674 } 1675 1676 DEBUG(dbgs() << " extend\n"); 1677 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1678 } 1679 } 1680 1681 // Didn't find any candidates? 1682 if (BestBefore == NumGaps) 1683 return 0; 1684 1685 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1686 << '-' << Uses[BestAfter] << ", " << BestDiff 1687 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1688 1689 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1690 SE->reset(LREdit); 1691 1692 SE->openIntv(); 1693 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1694 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1695 SE->useIntv(SegStart, SegStop); 1696 SmallVector<unsigned, 8> IntvMap; 1697 SE->finish(&IntvMap); 1698 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS); 1699 1700 // If the new range has the same number of instructions as before, mark it as 1701 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1702 // leave the new intervals as RS_New so they can compete. 1703 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1704 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1705 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1706 if (NewGaps >= NumGaps) { 1707 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1708 assert(!ProgressRequired && "Didn't make progress when it was required."); 1709 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1710 if (IntvMap[i] == 1) { 1711 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2); 1712 DEBUG(dbgs() << PrintReg(LREdit.get(i))); 1713 } 1714 DEBUG(dbgs() << '\n'); 1715 } 1716 ++NumLocalSplits; 1717 1718 return 0; 1719 } 1720 1721 //===----------------------------------------------------------------------===// 1722 // Live Range Splitting 1723 //===----------------------------------------------------------------------===// 1724 1725 /// trySplit - Try to split VirtReg or one of its interferences, making it 1726 /// assignable. 1727 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1728 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1729 SmallVectorImpl<unsigned>&NewVRegs) { 1730 // Ranges must be Split2 or less. 1731 if (getStage(VirtReg) >= RS_Spill) 1732 return 0; 1733 1734 // Local intervals are handled separately. 1735 if (LIS->intervalIsInOneMBB(VirtReg)) { 1736 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1737 SA->analyze(&VirtReg); 1738 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1739 if (PhysReg || !NewVRegs.empty()) 1740 return PhysReg; 1741 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1742 } 1743 1744 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1745 1746 SA->analyze(&VirtReg); 1747 1748 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1749 // coalescer. That may cause the range to become allocatable which means that 1750 // tryRegionSplit won't be making progress. This check should be replaced with 1751 // an assertion when the coalescer is fixed. 1752 if (SA->didRepairRange()) { 1753 // VirtReg has changed, so all cached queries are invalid. 1754 Matrix->invalidateVirtRegs(); 1755 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1756 return PhysReg; 1757 } 1758 1759 // First try to split around a region spanning multiple blocks. RS_Split2 1760 // ranges already made dubious progress with region splitting, so they go 1761 // straight to single block splitting. 1762 if (getStage(VirtReg) < RS_Split2) { 1763 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1764 if (PhysReg || !NewVRegs.empty()) 1765 return PhysReg; 1766 } 1767 1768 // Then isolate blocks. 1769 return tryBlockSplit(VirtReg, Order, NewVRegs); 1770 } 1771 1772 1773 //===----------------------------------------------------------------------===// 1774 // Main Entry Point 1775 //===----------------------------------------------------------------------===// 1776 1777 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1778 SmallVectorImpl<unsigned> &NewVRegs) { 1779 // First try assigning a free register. 1780 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1781 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1782 return PhysReg; 1783 1784 LiveRangeStage Stage = getStage(VirtReg); 1785 DEBUG(dbgs() << StageName[Stage] 1786 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1787 1788 // Try to evict a less worthy live range, but only for ranges from the primary 1789 // queue. The RS_Split ranges already failed to do this, and they should not 1790 // get a second chance until they have been split. 1791 if (Stage != RS_Split) 1792 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1793 return PhysReg; 1794 1795 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1796 1797 // The first time we see a live range, don't try to split or spill. 1798 // Wait until the second time, when all smaller ranges have been allocated. 1799 // This gives a better picture of the interference to split around. 1800 if (Stage < RS_Split) { 1801 setStage(VirtReg, RS_Split); 1802 DEBUG(dbgs() << "wait for second round\n"); 1803 NewVRegs.push_back(VirtReg.reg); 1804 return 0; 1805 } 1806 1807 // If we couldn't allocate a register from spilling, there is probably some 1808 // invalid inline assembly. The base class wil report it. 1809 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1810 return ~0u; 1811 1812 // Try splitting VirtReg or interferences. 1813 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1814 if (PhysReg || !NewVRegs.empty()) 1815 return PhysReg; 1816 1817 // Finally spill VirtReg itself. 1818 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1819 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1820 spiller().spill(LRE); 1821 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1822 1823 if (VerifyEnabled) 1824 MF->verify(this, "After spilling"); 1825 1826 // The live virtual register requesting allocation was spilled, so tell 1827 // the caller not to allocate anything during this round. 1828 return 0; 1829 } 1830 1831 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1832 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1833 << "********** Function: " << mf.getName() << '\n'); 1834 1835 MF = &mf; 1836 if (VerifyEnabled) 1837 MF->verify(this, "Before greedy register allocator"); 1838 1839 RegAllocBase::init(getAnalysis<VirtRegMap>(), 1840 getAnalysis<LiveIntervals>(), 1841 getAnalysis<LiveRegMatrix>()); 1842 Indexes = &getAnalysis<SlotIndexes>(); 1843 MBFI = &getAnalysis<MachineBlockFrequencyInfo>(); 1844 DomTree = &getAnalysis<MachineDominatorTree>(); 1845 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1846 Loops = &getAnalysis<MachineLoopInfo>(); 1847 Bundles = &getAnalysis<EdgeBundles>(); 1848 SpillPlacer = &getAnalysis<SpillPlacement>(); 1849 DebugVars = &getAnalysis<LiveDebugVariables>(); 1850 1851 calculateSpillWeightsAndHints(*LIS, mf, *Loops, *MBFI); 1852 1853 DEBUG(LIS->dump()); 1854 1855 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1856 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI)); 1857 ExtraRegInfo.clear(); 1858 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1859 NextCascade = 1; 1860 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI); 1861 GlobalCand.resize(32); // This will grow as needed. 1862 1863 allocatePhysRegs(); 1864 releaseMemory(); 1865 return true; 1866 } 1867