1 //===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines the RAGreedy function pass for register allocation in 11 // optimized builds. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "AllocationOrder.h" 17 #include "InterferenceCache.h" 18 #include "LiveDebugVariables.h" 19 #include "LiveRangeEdit.h" 20 #include "RegAllocBase.h" 21 #include "Spiller.h" 22 #include "SpillPlacement.h" 23 #include "SplitKit.h" 24 #include "VirtRegMap.h" 25 #include "RegisterCoalescer.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Function.h" 29 #include "llvm/PassAnalysisSupport.h" 30 #include "llvm/CodeGen/CalcSpillWeights.h" 31 #include "llvm/CodeGen/EdgeBundles.h" 32 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 33 #include "llvm/CodeGen/LiveStackAnalysis.h" 34 #include "llvm/CodeGen/MachineDominators.h" 35 #include "llvm/CodeGen/MachineFunctionPass.h" 36 #include "llvm/CodeGen/MachineLoopInfo.h" 37 #include "llvm/CodeGen/MachineLoopRanges.h" 38 #include "llvm/CodeGen/MachineRegisterInfo.h" 39 #include "llvm/CodeGen/Passes.h" 40 #include "llvm/CodeGen/RegAllocRegistry.h" 41 #include "llvm/Target/TargetOptions.h" 42 #include "llvm/Support/Debug.h" 43 #include "llvm/Support/ErrorHandling.h" 44 #include "llvm/Support/raw_ostream.h" 45 #include "llvm/Support/Timer.h" 46 47 #include <queue> 48 49 using namespace llvm; 50 51 STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 52 STATISTIC(NumLocalSplits, "Number of split local live ranges"); 53 STATISTIC(NumEvicted, "Number of interferences evicted"); 54 55 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 56 createGreedyRegisterAllocator); 57 58 namespace { 59 class RAGreedy : public MachineFunctionPass, 60 public RegAllocBase, 61 private LiveRangeEdit::Delegate { 62 63 // context 64 MachineFunction *MF; 65 66 // analyses 67 SlotIndexes *Indexes; 68 LiveStacks *LS; 69 MachineDominatorTree *DomTree; 70 MachineLoopInfo *Loops; 71 MachineLoopRanges *LoopRanges; 72 EdgeBundles *Bundles; 73 SpillPlacement *SpillPlacer; 74 LiveDebugVariables *DebugVars; 75 76 // state 77 std::auto_ptr<Spiller> SpillerInstance; 78 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 79 unsigned NextCascade; 80 81 // Live ranges pass through a number of stages as we try to allocate them. 82 // Some of the stages may also create new live ranges: 83 // 84 // - Region splitting. 85 // - Per-block splitting. 86 // - Local splitting. 87 // - Spilling. 88 // 89 // Ranges produced by one of the stages skip the previous stages when they are 90 // dequeued. This improves performance because we can skip interference checks 91 // that are unlikely to give any results. It also guarantees that the live 92 // range splitting algorithm terminates, something that is otherwise hard to 93 // ensure. 94 enum LiveRangeStage { 95 RS_New, ///< Never seen before. 96 RS_First, ///< First time in the queue. 97 RS_Second, ///< Second time in the queue. 98 RS_Global, ///< Produced by global splitting. 99 RS_Local, ///< Produced by local splitting. 100 RS_Spill ///< Produced by spilling. 101 }; 102 103 static const char *const StageName[]; 104 105 // RegInfo - Keep additional information about each live range. 106 struct RegInfo { 107 LiveRangeStage Stage; 108 109 // Cascade - Eviction loop prevention. See canEvictInterference(). 110 unsigned Cascade; 111 112 RegInfo() : Stage(RS_New), Cascade(0) {} 113 }; 114 115 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 116 117 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 118 return ExtraRegInfo[VirtReg.reg].Stage; 119 } 120 121 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 122 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 123 ExtraRegInfo[VirtReg.reg].Stage = Stage; 124 } 125 126 template<typename Iterator> 127 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 128 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 129 for (;Begin != End; ++Begin) { 130 unsigned Reg = (*Begin)->reg; 131 if (ExtraRegInfo[Reg].Stage == RS_New) 132 ExtraRegInfo[Reg].Stage = NewStage; 133 } 134 } 135 136 /// Cost of evicting interference. 137 struct EvictionCost { 138 unsigned BrokenHints; ///< Total number of broken hints. 139 float MaxWeight; ///< Maximum spill weight evicted. 140 141 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 142 143 bool operator<(const EvictionCost &O) const { 144 if (BrokenHints != O.BrokenHints) 145 return BrokenHints < O.BrokenHints; 146 return MaxWeight < O.MaxWeight; 147 } 148 }; 149 150 // splitting state. 151 std::auto_ptr<SplitAnalysis> SA; 152 std::auto_ptr<SplitEditor> SE; 153 154 /// Cached per-block interference maps 155 InterferenceCache IntfCache; 156 157 /// All basic blocks where the current register has uses. 158 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 159 160 /// Global live range splitting candidate info. 161 struct GlobalSplitCandidate { 162 unsigned PhysReg; 163 BitVector LiveBundles; 164 SmallVector<unsigned, 8> ActiveBlocks; 165 166 void reset(unsigned Reg) { 167 PhysReg = Reg; 168 LiveBundles.clear(); 169 ActiveBlocks.clear(); 170 } 171 }; 172 173 /// Candidate info for for each PhysReg in AllocationOrder. 174 /// This vector never shrinks, but grows to the size of the largest register 175 /// class. 176 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 177 178 public: 179 RAGreedy(); 180 181 /// Return the pass name. 182 virtual const char* getPassName() const { 183 return "Greedy Register Allocator"; 184 } 185 186 /// RAGreedy analysis usage. 187 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 188 virtual void releaseMemory(); 189 virtual Spiller &spiller() { return *SpillerInstance; } 190 virtual void enqueue(LiveInterval *LI); 191 virtual LiveInterval *dequeue(); 192 virtual unsigned selectOrSplit(LiveInterval&, 193 SmallVectorImpl<LiveInterval*>&); 194 195 /// Perform register allocation. 196 virtual bool runOnMachineFunction(MachineFunction &mf); 197 198 static char ID; 199 200 private: 201 void LRE_WillEraseInstruction(MachineInstr*); 202 bool LRE_CanEraseVirtReg(unsigned); 203 void LRE_WillShrinkVirtReg(unsigned); 204 void LRE_DidCloneVirtReg(unsigned, unsigned); 205 206 float calcSpillCost(); 207 bool addSplitConstraints(InterferenceCache::Cursor, float&); 208 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 209 void growRegion(GlobalSplitCandidate &Cand, InterferenceCache::Cursor); 210 float calcGlobalSplitCost(GlobalSplitCandidate&, InterferenceCache::Cursor); 211 void splitAroundRegion(LiveInterval&, GlobalSplitCandidate&, 212 SmallVectorImpl<LiveInterval*>&); 213 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 214 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 215 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 216 void evictInterference(LiveInterval&, unsigned, 217 SmallVectorImpl<LiveInterval*>&); 218 219 unsigned tryAssign(LiveInterval&, AllocationOrder&, 220 SmallVectorImpl<LiveInterval*>&); 221 unsigned tryEvict(LiveInterval&, AllocationOrder&, 222 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 223 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 224 SmallVectorImpl<LiveInterval*>&); 225 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 226 SmallVectorImpl<LiveInterval*>&); 227 unsigned trySplit(LiveInterval&, AllocationOrder&, 228 SmallVectorImpl<LiveInterval*>&); 229 }; 230 } // end anonymous namespace 231 232 char RAGreedy::ID = 0; 233 234 #ifndef NDEBUG 235 const char *const RAGreedy::StageName[] = { 236 "RS_New", 237 "RS_First", 238 "RS_Second", 239 "RS_Global", 240 "RS_Local", 241 "RS_Spill" 242 }; 243 #endif 244 245 // Hysteresis to use when comparing floats. 246 // This helps stabilize decisions based on float comparisons. 247 const float Hysteresis = 0.98f; 248 249 250 FunctionPass* llvm::createGreedyRegisterAllocator() { 251 return new RAGreedy(); 252 } 253 254 RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 255 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 256 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 257 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 258 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 259 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry()); 260 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 261 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 262 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 263 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 264 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 265 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry()); 266 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 267 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 268 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 269 } 270 271 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 272 AU.setPreservesCFG(); 273 AU.addRequired<AliasAnalysis>(); 274 AU.addPreserved<AliasAnalysis>(); 275 AU.addRequired<LiveIntervals>(); 276 AU.addRequired<SlotIndexes>(); 277 AU.addPreserved<SlotIndexes>(); 278 AU.addRequired<LiveDebugVariables>(); 279 AU.addPreserved<LiveDebugVariables>(); 280 if (StrongPHIElim) 281 AU.addRequiredID(StrongPHIEliminationID); 282 AU.addRequiredTransitive<RegisterCoalescer>(); 283 AU.addRequired<CalculateSpillWeights>(); 284 AU.addRequired<LiveStacks>(); 285 AU.addPreserved<LiveStacks>(); 286 AU.addRequired<MachineDominatorTree>(); 287 AU.addPreserved<MachineDominatorTree>(); 288 AU.addRequired<MachineLoopInfo>(); 289 AU.addPreserved<MachineLoopInfo>(); 290 AU.addRequired<MachineLoopRanges>(); 291 AU.addPreserved<MachineLoopRanges>(); 292 AU.addRequired<VirtRegMap>(); 293 AU.addPreserved<VirtRegMap>(); 294 AU.addRequired<EdgeBundles>(); 295 AU.addRequired<SpillPlacement>(); 296 MachineFunctionPass::getAnalysisUsage(AU); 297 } 298 299 300 //===----------------------------------------------------------------------===// 301 // LiveRangeEdit delegate methods 302 //===----------------------------------------------------------------------===// 303 304 void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) { 305 // LRE itself will remove from SlotIndexes and parent basic block. 306 VRM->RemoveMachineInstrFromMaps(MI); 307 } 308 309 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 310 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 311 unassign(LIS->getInterval(VirtReg), PhysReg); 312 return true; 313 } 314 // Unassigned virtreg is probably in the priority queue. 315 // RegAllocBase will erase it after dequeueing. 316 return false; 317 } 318 319 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 320 unsigned PhysReg = VRM->getPhys(VirtReg); 321 if (!PhysReg) 322 return; 323 324 // Register is assigned, put it back on the queue for reassignment. 325 LiveInterval &LI = LIS->getInterval(VirtReg); 326 unassign(LI, PhysReg); 327 enqueue(&LI); 328 } 329 330 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 331 // LRE may clone a virtual register because dead code elimination causes it to 332 // be split into connected components. Ensure that the new register gets the 333 // same stage as the parent. 334 ExtraRegInfo.grow(New); 335 ExtraRegInfo[New] = ExtraRegInfo[Old]; 336 } 337 338 void RAGreedy::releaseMemory() { 339 SpillerInstance.reset(0); 340 ExtraRegInfo.clear(); 341 GlobalCand.clear(); 342 RegAllocBase::releaseMemory(); 343 } 344 345 void RAGreedy::enqueue(LiveInterval *LI) { 346 // Prioritize live ranges by size, assigning larger ranges first. 347 // The queue holds (size, reg) pairs. 348 const unsigned Size = LI->getSize(); 349 const unsigned Reg = LI->reg; 350 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 351 "Can only enqueue virtual registers"); 352 unsigned Prio; 353 354 ExtraRegInfo.grow(Reg); 355 if (ExtraRegInfo[Reg].Stage == RS_New) 356 ExtraRegInfo[Reg].Stage = RS_First; 357 358 if (ExtraRegInfo[Reg].Stage == RS_Second) 359 // Unsplit ranges that couldn't be allocated immediately are deferred until 360 // everything else has been allocated. Long ranges are allocated last so 361 // they are split against realistic interference. 362 Prio = (1u << 31) - Size; 363 else { 364 // Everything else is allocated in long->short order. Long ranges that don't 365 // fit should be spilled ASAP so they don't create interference. 366 Prio = (1u << 31) + Size; 367 368 // Boost ranges that have a physical register hint. 369 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 370 Prio |= (1u << 30); 371 } 372 373 Queue.push(std::make_pair(Prio, Reg)); 374 } 375 376 LiveInterval *RAGreedy::dequeue() { 377 if (Queue.empty()) 378 return 0; 379 LiveInterval *LI = &LIS->getInterval(Queue.top().second); 380 Queue.pop(); 381 return LI; 382 } 383 384 385 //===----------------------------------------------------------------------===// 386 // Direct Assignment 387 //===----------------------------------------------------------------------===// 388 389 /// tryAssign - Try to assign VirtReg to an available register. 390 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 391 AllocationOrder &Order, 392 SmallVectorImpl<LiveInterval*> &NewVRegs) { 393 Order.rewind(); 394 unsigned PhysReg; 395 while ((PhysReg = Order.next())) 396 if (!checkPhysRegInterference(VirtReg, PhysReg)) 397 break; 398 if (!PhysReg || Order.isHint(PhysReg)) 399 return PhysReg; 400 401 // PhysReg is available, but there may be a better choice. 402 403 // If we missed a simple hint, try to cheaply evict interference from the 404 // preferred register. 405 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 406 if (Order.isHint(Hint)) { 407 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 408 EvictionCost MaxCost(1); 409 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 410 evictInterference(VirtReg, Hint, NewVRegs); 411 return Hint; 412 } 413 } 414 415 // Try to evict interference from a cheaper alternative. 416 unsigned Cost = TRI->getCostPerUse(PhysReg); 417 418 // Most registers have 0 additional cost. 419 if (!Cost) 420 return PhysReg; 421 422 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 423 << '\n'); 424 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 425 return CheapReg ? CheapReg : PhysReg; 426 } 427 428 429 //===----------------------------------------------------------------------===// 430 // Interference eviction 431 //===----------------------------------------------------------------------===// 432 433 /// shouldEvict - determine if A should evict the assigned live range B. The 434 /// eviction policy defined by this function together with the allocation order 435 /// defined by enqueue() decides which registers ultimately end up being split 436 /// and spilled. 437 /// 438 /// Cascade numbers are used to prevent infinite loops if this function is a 439 /// cyclic relation. 440 /// 441 /// @param A The live range to be assigned. 442 /// @param IsHint True when A is about to be assigned to its preferred 443 /// register. 444 /// @param B The live range to be evicted. 445 /// @param BreaksHint True when B is already assigned to its preferred register. 446 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 447 LiveInterval &B, bool BreaksHint) { 448 bool CanSplit = getStage(B) <= RS_Second; 449 450 // Be fairly aggressive about following hints as long as the evictee can be 451 // split. 452 if (CanSplit && IsHint && !BreaksHint) 453 return true; 454 455 return A.weight > B.weight; 456 } 457 458 /// canEvictInterference - Return true if all interferences between VirtReg and 459 /// PhysReg can be evicted. When OnlyCheap is set, don't do anything 460 /// 461 /// @param VirtReg Live range that is about to be assigned. 462 /// @param PhysReg Desired register for assignment. 463 /// @prarm IsHint True when PhysReg is VirtReg's preferred register. 464 /// @param MaxCost Only look for cheaper candidates and update with new cost 465 /// when returning true. 466 /// @returns True when interference can be evicted cheaper than MaxCost. 467 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 468 bool IsHint, EvictionCost &MaxCost) { 469 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 470 // involved in an eviction before. If a cascade number was assigned, deny 471 // evicting anything with the same or a newer cascade number. This prevents 472 // infinite eviction loops. 473 // 474 // This works out so a register without a cascade number is allowed to evict 475 // anything, and it can be evicted by anything. 476 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 477 if (!Cascade) 478 Cascade = NextCascade; 479 480 EvictionCost Cost; 481 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 482 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 483 // If there is 10 or more interferences, chances are one is heavier. 484 if (Q.collectInterferingVRegs(10) >= 10) 485 return false; 486 487 // Check if any interfering live range is heavier than MaxWeight. 488 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 489 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 490 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 491 return false; 492 // Never evict spill products. They cannot split or spill. 493 if (getStage(*Intf) == RS_Spill) 494 return false; 495 // Once a live range becomes small enough, it is urgent that we find a 496 // register for it. This is indicated by an infinite spill weight. These 497 // urgent live ranges get to evict almost anything. 498 bool Urgent = !VirtReg.isSpillable() && Intf->isSpillable(); 499 // Only evict older cascades or live ranges without a cascade. 500 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 501 if (Cascade <= IntfCascade) { 502 if (!Urgent) 503 return false; 504 // We permit breaking cascades for urgent evictions. It should be the 505 // last resort, though, so make it really expensive. 506 Cost.BrokenHints += 10; 507 } 508 // Would this break a satisfied hint? 509 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 510 // Update eviction cost. 511 Cost.BrokenHints += BreaksHint; 512 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 513 // Abort if this would be too expensive. 514 if (!(Cost < MaxCost)) 515 return false; 516 // Finally, apply the eviction policy for non-urgent evictions. 517 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 518 return false; 519 } 520 } 521 MaxCost = Cost; 522 return true; 523 } 524 525 /// evictInterference - Evict any interferring registers that prevent VirtReg 526 /// from being assigned to Physreg. This assumes that canEvictInterference 527 /// returned true. 528 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 529 SmallVectorImpl<LiveInterval*> &NewVRegs) { 530 // Make sure that VirtReg has a cascade number, and assign that cascade 531 // number to every evicted register. These live ranges than then only be 532 // evicted by a newer cascade, preventing infinite loops. 533 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 534 if (!Cascade) 535 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 536 537 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 538 << " interference: Cascade " << Cascade << '\n'); 539 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 540 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 541 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 542 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 543 LiveInterval *Intf = Q.interferingVRegs()[i]; 544 unassign(*Intf, VRM->getPhys(Intf->reg)); 545 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 546 VirtReg.isSpillable() < Intf->isSpillable()) && 547 "Cannot decrease cascade number, illegal eviction"); 548 ExtraRegInfo[Intf->reg].Cascade = Cascade; 549 ++NumEvicted; 550 NewVRegs.push_back(Intf); 551 } 552 } 553 } 554 555 /// tryEvict - Try to evict all interferences for a physreg. 556 /// @param VirtReg Currently unassigned virtual register. 557 /// @param Order Physregs to try. 558 /// @return Physreg to assign VirtReg, or 0. 559 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 560 AllocationOrder &Order, 561 SmallVectorImpl<LiveInterval*> &NewVRegs, 562 unsigned CostPerUseLimit) { 563 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 564 565 // Keep track of the cheapest interference seen so far. 566 EvictionCost BestCost(~0u); 567 unsigned BestPhys = 0; 568 569 // When we are just looking for a reduced cost per use, don't break any 570 // hints, and only evict smaller spill weights. 571 if (CostPerUseLimit < ~0u) { 572 BestCost.BrokenHints = 0; 573 BestCost.MaxWeight = VirtReg.weight; 574 } 575 576 Order.rewind(); 577 while (unsigned PhysReg = Order.next()) { 578 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 579 continue; 580 // The first use of a callee-saved register in a function has cost 1. 581 // Don't start using a CSR when the CostPerUseLimit is low. 582 if (CostPerUseLimit == 1) 583 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 584 if (!MRI->isPhysRegUsed(CSR)) { 585 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 586 << PrintReg(CSR, TRI) << '\n'); 587 continue; 588 } 589 590 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 591 continue; 592 593 // Best so far. 594 BestPhys = PhysReg; 595 596 // Stop if the hint can be used. 597 if (Order.isHint(PhysReg)) 598 break; 599 } 600 601 if (!BestPhys) 602 return 0; 603 604 evictInterference(VirtReg, BestPhys, NewVRegs); 605 return BestPhys; 606 } 607 608 609 //===----------------------------------------------------------------------===// 610 // Region Splitting 611 //===----------------------------------------------------------------------===// 612 613 /// addSplitConstraints - Fill out the SplitConstraints vector based on the 614 /// interference pattern in Physreg and its aliases. Add the constraints to 615 /// SpillPlacement and return the static cost of this split in Cost, assuming 616 /// that all preferences in SplitConstraints are met. 617 /// Return false if there are no bundles with positive bias. 618 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 619 float &Cost) { 620 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 621 622 // Reset interference dependent info. 623 SplitConstraints.resize(UseBlocks.size()); 624 float StaticCost = 0; 625 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 626 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 627 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 628 629 BC.Number = BI.MBB->getNumber(); 630 Intf.moveToBlock(BC.Number); 631 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 632 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 633 634 if (!Intf.hasInterference()) 635 continue; 636 637 // Number of spill code instructions to insert. 638 unsigned Ins = 0; 639 640 // Interference for the live-in value. 641 if (BI.LiveIn) { 642 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 643 BC.Entry = SpillPlacement::MustSpill, ++Ins; 644 else if (Intf.first() < BI.FirstUse) 645 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 646 else if (Intf.first() < BI.LastUse) 647 ++Ins; 648 } 649 650 // Interference for the live-out value. 651 if (BI.LiveOut) { 652 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 653 BC.Exit = SpillPlacement::MustSpill, ++Ins; 654 else if (Intf.last() > BI.LastUse) 655 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 656 else if (Intf.last() > BI.FirstUse) 657 ++Ins; 658 } 659 660 // Accumulate the total frequency of inserted spill code. 661 if (Ins) 662 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 663 } 664 Cost = StaticCost; 665 666 // Add constraints for use-blocks. Note that these are the only constraints 667 // that may add a positive bias, it is downhill from here. 668 SpillPlacer->addConstraints(SplitConstraints); 669 return SpillPlacer->scanActiveBundles(); 670 } 671 672 673 /// addThroughConstraints - Add constraints and links to SpillPlacer from the 674 /// live-through blocks in Blocks. 675 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 676 ArrayRef<unsigned> Blocks) { 677 const unsigned GroupSize = 8; 678 SpillPlacement::BlockConstraint BCS[GroupSize]; 679 unsigned TBS[GroupSize]; 680 unsigned B = 0, T = 0; 681 682 for (unsigned i = 0; i != Blocks.size(); ++i) { 683 unsigned Number = Blocks[i]; 684 Intf.moveToBlock(Number); 685 686 if (!Intf.hasInterference()) { 687 assert(T < GroupSize && "Array overflow"); 688 TBS[T] = Number; 689 if (++T == GroupSize) { 690 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T)); 691 T = 0; 692 } 693 continue; 694 } 695 696 assert(B < GroupSize && "Array overflow"); 697 BCS[B].Number = Number; 698 699 // Interference for the live-in value. 700 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 701 BCS[B].Entry = SpillPlacement::MustSpill; 702 else 703 BCS[B].Entry = SpillPlacement::PrefSpill; 704 705 // Interference for the live-out value. 706 if (Intf.last() >= SA->getLastSplitPoint(Number)) 707 BCS[B].Exit = SpillPlacement::MustSpill; 708 else 709 BCS[B].Exit = SpillPlacement::PrefSpill; 710 711 if (++B == GroupSize) { 712 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 713 SpillPlacer->addConstraints(Array); 714 B = 0; 715 } 716 } 717 718 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 719 SpillPlacer->addConstraints(Array); 720 SpillPlacer->addLinks(ArrayRef<unsigned>(TBS, T)); 721 } 722 723 void RAGreedy::growRegion(GlobalSplitCandidate &Cand, 724 InterferenceCache::Cursor Intf) { 725 // Keep track of through blocks that have not been added to SpillPlacer. 726 BitVector Todo = SA->getThroughBlocks(); 727 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 728 unsigned AddedTo = 0; 729 #ifndef NDEBUG 730 unsigned Visited = 0; 731 #endif 732 733 for (;;) { 734 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 735 // Find new through blocks in the periphery of PrefRegBundles. 736 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 737 unsigned Bundle = NewBundles[i]; 738 // Look at all blocks connected to Bundle in the full graph. 739 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 740 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 741 I != E; ++I) { 742 unsigned Block = *I; 743 if (!Todo.test(Block)) 744 continue; 745 Todo.reset(Block); 746 // This is a new through block. Add it to SpillPlacer later. 747 ActiveBlocks.push_back(Block); 748 #ifndef NDEBUG 749 ++Visited; 750 #endif 751 } 752 } 753 // Any new blocks to add? 754 if (ActiveBlocks.size() == AddedTo) 755 break; 756 addThroughConstraints(Intf, 757 ArrayRef<unsigned>(ActiveBlocks).slice(AddedTo)); 758 AddedTo = ActiveBlocks.size(); 759 760 // Perhaps iterating can enable more bundles? 761 SpillPlacer->iterate(); 762 } 763 DEBUG(dbgs() << ", v=" << Visited); 764 } 765 766 /// calcSpillCost - Compute how expensive it would be to split the live range in 767 /// SA around all use blocks instead of forming bundle regions. 768 float RAGreedy::calcSpillCost() { 769 float Cost = 0; 770 const LiveInterval &LI = SA->getParent(); 771 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 772 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 773 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 774 unsigned Number = BI.MBB->getNumber(); 775 // We normally only need one spill instruction - a load or a store. 776 Cost += SpillPlacer->getBlockFrequency(Number); 777 778 // Unless the value is redefined in the block. 779 if (BI.LiveIn && BI.LiveOut) { 780 SlotIndex Start, Stop; 781 tie(Start, Stop) = Indexes->getMBBRange(Number); 782 LiveInterval::const_iterator I = LI.find(Start); 783 assert(I != LI.end() && "Expected live-in value"); 784 // Is there a different live-out value? If so, we need an extra spill 785 // instruction. 786 if (I->end < Stop) 787 Cost += SpillPlacer->getBlockFrequency(Number); 788 } 789 } 790 return Cost; 791 } 792 793 /// calcGlobalSplitCost - Return the global split cost of following the split 794 /// pattern in LiveBundles. This cost should be added to the local cost of the 795 /// interference pattern in SplitConstraints. 796 /// 797 float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand, 798 InterferenceCache::Cursor Intf) { 799 float GlobalCost = 0; 800 const BitVector &LiveBundles = Cand.LiveBundles; 801 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 802 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 803 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 804 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 805 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 806 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 807 unsigned Ins = 0; 808 809 if (BI.LiveIn) 810 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 811 if (BI.LiveOut) 812 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 813 if (Ins) 814 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 815 } 816 817 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 818 unsigned Number = Cand.ActiveBlocks[i]; 819 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 820 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 821 if (!RegIn && !RegOut) 822 continue; 823 if (RegIn && RegOut) { 824 // We need double spill code if this block has interference. 825 Intf.moveToBlock(Number); 826 if (Intf.hasInterference()) 827 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 828 continue; 829 } 830 // live-in / stack-out or stack-in live-out. 831 GlobalCost += SpillPlacer->getBlockFrequency(Number); 832 } 833 return GlobalCost; 834 } 835 836 /// splitAroundRegion - Split VirtReg around the region determined by 837 /// LiveBundles. Make an effort to avoid interference from PhysReg. 838 /// 839 /// The 'register' interval is going to contain as many uses as possible while 840 /// avoiding interference. The 'stack' interval is the complement constructed by 841 /// SplitEditor. It will contain the rest. 842 /// 843 void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, 844 GlobalSplitCandidate &Cand, 845 SmallVectorImpl<LiveInterval*> &NewVRegs) { 846 const BitVector &LiveBundles = Cand.LiveBundles; 847 848 DEBUG({ 849 dbgs() << "Splitting around region for " << PrintReg(Cand.PhysReg, TRI) 850 << " with bundles"; 851 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i)) 852 dbgs() << " EB#" << i; 853 dbgs() << ".\n"; 854 }); 855 856 InterferenceCache::Cursor Intf(IntfCache, Cand.PhysReg); 857 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 858 SE->reset(LREdit); 859 860 // Create the main cross-block interval. 861 const unsigned MainIntv = SE->openIntv(); 862 863 // First handle all the blocks with uses. 864 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 865 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 866 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 867 bool RegIn = BI.LiveIn && 868 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)]; 869 bool RegOut = BI.LiveOut && 870 LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)]; 871 872 // Create separate intervals for isolated blocks with multiple uses. 873 // 874 // |---o---o---| Enter and leave on the stack. 875 // ____-----____ Create local interval for uses. 876 // 877 // | o---o---| Defined in block, leave on stack. 878 // -----____ Create local interval for uses. 879 // 880 // |---o---x | Enter on stack, killed in block. 881 // ____----- Create local interval for uses. 882 // 883 if (!RegIn && !RegOut) { 884 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 885 if (!BI.isOneInstr()) { 886 SE->splitSingleBlock(BI); 887 SE->selectIntv(MainIntv); 888 } 889 continue; 890 } 891 892 SlotIndex Start, Stop; 893 tie(Start, Stop) = Indexes->getMBBRange(BI.MBB); 894 Intf.moveToBlock(BI.MBB->getNumber()); 895 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0) 896 << (BI.LiveIn ? (RegIn ? " => " : " -> ") : " ") 897 << "BB#" << BI.MBB->getNumber() 898 << (BI.LiveOut ? (RegOut ? " => " : " -> ") : " ") 899 << " EB#" << Bundles->getBundle(BI.MBB->getNumber(), 1) 900 << " [" << Start << ';' 901 << SA->getLastSplitPoint(BI.MBB->getNumber()) << '-' << Stop 902 << ") uses [" << BI.FirstUse << ';' << BI.LastUse 903 << ") intf [" << Intf.first() << ';' << Intf.last() << ')'); 904 905 // The interference interval should either be invalid or overlap MBB. 906 assert((!Intf.hasInterference() || Intf.first() < Stop) 907 && "Bad interference"); 908 assert((!Intf.hasInterference() || Intf.last() > Start) 909 && "Bad interference"); 910 911 // We are now ready to decide where to split in the current block. There 912 // are many variables guiding the decision: 913 // 914 // - RegIn / RegOut: The global splitting algorithm's decisions for our 915 // ingoing and outgoing bundles. 916 // 917 // - BI.BlockIn / BI.BlockOut: Is the live range live-in and/or live-out 918 // from this block. 919 // 920 // - Intf.hasInterference(): Is there interference in this block. 921 // 922 // - Intf.first() / Inft.last(): The range of interference. 923 // 924 // The live range should be split such that MainIntv is live-in when RegIn 925 // is set, and live-out when RegOut is set. MainIntv should never overlap 926 // the interference, and the stack interval should never have more than one 927 // use per block. 928 929 // No splits can be inserted after LastSplitPoint, overlap instead. 930 SlotIndex LastSplitPoint = Stop; 931 if (BI.LiveOut) 932 LastSplitPoint = SA->getLastSplitPoint(BI.MBB->getNumber()); 933 934 // At this point, we know that either RegIn or RegOut is set. We dealt with 935 // the all-stack case above. 936 937 // Blocks without interference are relatively easy. 938 if (!Intf.hasInterference()) { 939 DEBUG(dbgs() << ", no interference.\n"); 940 SE->selectIntv(MainIntv); 941 // The easiest case has MainIntv live through. 942 // 943 // |---o---o---| Live-in, live-out. 944 // ============= Use MainIntv everywhere. 945 // 946 SlotIndex From = Start, To = Stop; 947 948 // Block entry. Reload before the first use if MainIntv is not live-in. 949 // 950 // |---o-- Enter on stack. 951 // ____=== Reload before first use. 952 // 953 // | o-- Defined in block. 954 // === Use MainIntv from def. 955 // 956 if (!RegIn) 957 From = SE->enterIntvBefore(BI.FirstUse); 958 959 // Block exit. Handle cases where MainIntv is not live-out. 960 if (!BI.LiveOut) 961 // 962 // --x | Killed in block. 963 // === Use MainIntv up to kill. 964 // 965 To = SE->leaveIntvAfter(BI.LastUse); 966 else if (!RegOut) { 967 // 968 // --o---| Live-out on stack. 969 // ===____ Use MainIntv up to last use, switch to stack. 970 // 971 // -----o| Live-out on stack, last use after last split point. 972 // ====== Extend MainIntv to last use, overlapping. 973 // \____ Copy to stack interval before last split point. 974 // 975 if (BI.LastUse < LastSplitPoint) 976 To = SE->leaveIntvAfter(BI.LastUse); 977 else { 978 // The last use is after the last split point, it is probably an 979 // indirect branch. 980 To = SE->leaveIntvBefore(LastSplitPoint); 981 // Run a double interval from the split to the last use. This makes 982 // it possible to spill the complement without affecting the indirect 983 // branch. 984 SE->overlapIntv(To, BI.LastUse); 985 } 986 } 987 988 // Paint in MainIntv liveness for this block. 989 SE->useIntv(From, To); 990 continue; 991 } 992 993 // We are now looking at a block with interference, and we know that either 994 // RegIn or RegOut is set. 995 assert(Intf.hasInterference() && (RegIn || RegOut) && "Bad invariant"); 996 997 // If the live range is not live through the block, it is possible that the 998 // interference doesn't even overlap. Deal with those cases first. Since 999 // no copy instructions are required, we can tolerate interference starting 1000 // or ending at the same instruction that kills or defines our live range. 1001 1002 // Live-in, killed before interference. 1003 // 1004 // ~~~ Interference after kill. 1005 // |---o---x | Killed in block. 1006 // ========= Use MainIntv everywhere. 1007 // 1008 if (RegIn && !BI.LiveOut && BI.LastUse <= Intf.first()) { 1009 DEBUG(dbgs() << ", live-in, killed before interference.\n"); 1010 SE->selectIntv(MainIntv); 1011 SlotIndex To = SE->leaveIntvAfter(BI.LastUse); 1012 SE->useIntv(Start, To); 1013 continue; 1014 } 1015 1016 // Live-out, defined after interference. 1017 // 1018 // ~~~ Interference before def. 1019 // | o---o---| Defined in block. 1020 // ========= Use MainIntv everywhere. 1021 // 1022 if (RegOut && !BI.LiveIn && BI.FirstUse >= Intf.last()) { 1023 DEBUG(dbgs() << ", live-out, defined after interference.\n"); 1024 SE->selectIntv(MainIntv); 1025 SlotIndex From = SE->enterIntvBefore(BI.FirstUse); 1026 SE->useIntv(From, Stop); 1027 continue; 1028 } 1029 1030 // The interference is now known to overlap the live range, but it may 1031 // still be easy to avoid if all the interference is on one side of the 1032 // uses, and we enter or leave on the stack. 1033 1034 // Live-out on stack, interference after last use. 1035 // 1036 // ~~~ Interference after last use. 1037 // |---o---o---| Live-out on stack. 1038 // =========____ Leave MainIntv after last use. 1039 // 1040 // ~ Interference after last use. 1041 // |---o---o--o| Live-out on stack, late last use. 1042 // ============ Copy to stack after LSP, overlap MainIntv. 1043 // \_____ Stack interval is live-out. 1044 // 1045 if (!RegOut && Intf.first() > BI.LastUse.getBoundaryIndex()) { 1046 assert(RegIn && "Stack-in, stack-out should already be handled"); 1047 if (BI.LastUse < LastSplitPoint) { 1048 DEBUG(dbgs() << ", live-in, stack-out, interference after last use.\n"); 1049 SE->selectIntv(MainIntv); 1050 SlotIndex To = SE->leaveIntvAfter(BI.LastUse); 1051 assert(To <= Intf.first() && "Expected to avoid interference"); 1052 SE->useIntv(Start, To); 1053 } else { 1054 DEBUG(dbgs() << ", live-in, stack-out, avoid last split point\n"); 1055 SE->selectIntv(MainIntv); 1056 SlotIndex To = SE->leaveIntvBefore(LastSplitPoint); 1057 assert(To <= Intf.first() && "Expected to avoid interference"); 1058 SE->overlapIntv(To, BI.LastUse); 1059 SE->useIntv(Start, To); 1060 } 1061 continue; 1062 } 1063 1064 // Live-in on stack, interference before first use. 1065 // 1066 // ~~~ Interference before first use. 1067 // |---o---o---| Live-in on stack. 1068 // ____========= Enter MainIntv before first use. 1069 // 1070 if (!RegIn && Intf.last() < BI.FirstUse.getBaseIndex()) { 1071 assert(RegOut && "Stack-in, stack-out should already be handled"); 1072 DEBUG(dbgs() << ", stack-in, interference before first use.\n"); 1073 SE->selectIntv(MainIntv); 1074 SlotIndex From = SE->enterIntvBefore(BI.FirstUse); 1075 assert(From >= Intf.last() && "Expected to avoid interference"); 1076 SE->useIntv(From, Stop); 1077 continue; 1078 } 1079 1080 // The interference is overlapping somewhere we wanted to use MainIntv. That 1081 // means we need to create a local interval that can be allocated a 1082 // different register. 1083 unsigned LocalIntv = SE->openIntv(); 1084 DEBUG(dbgs() << ", creating local interval " << LocalIntv << ".\n"); 1085 1086 // We may be creating copies directly between MainIntv and LocalIntv, 1087 // bypassing the stack interval. When we do that, we should never use the 1088 // leaveIntv* methods as they define values in the stack interval. By 1089 // starting from the end of the block and working our way backwards, we can 1090 // get by with only enterIntv* methods. 1091 // 1092 // When selecting split points, we generally try to maximize the stack 1093 // interval as long at it contains no uses, maximize the main interval as 1094 // long as it doesn't overlap interference, and minimize the local interval 1095 // that we don't know how to allocate yet. 1096 1097 // Handle the block exit, set Pos to the first handled slot. 1098 SlotIndex Pos = BI.LastUse; 1099 if (RegOut) { 1100 assert(Intf.last() < LastSplitPoint && "Cannot be live-out in register"); 1101 // Create a snippet of MainIntv that is live-out. 1102 // 1103 // ~~~ Interference overlapping uses. 1104 // --o---| Live-out in MainIntv. 1105 // ----=== Switch from LocalIntv to MainIntv after interference. 1106 // 1107 SE->selectIntv(MainIntv); 1108 Pos = SE->enterIntvAfter(Intf.last()); 1109 assert(Pos >= Intf.last() && "Expected to avoid interference"); 1110 SE->useIntv(Pos, Stop); 1111 SE->selectIntv(LocalIntv); 1112 } else if (BI.LiveOut) { 1113 if (BI.LastUse < LastSplitPoint) { 1114 // Live-out on the stack. 1115 // 1116 // ~~~ Interference overlapping uses. 1117 // --o---| Live-out on stack. 1118 // ---____ Switch from LocalIntv to stack after last use. 1119 // 1120 Pos = SE->leaveIntvAfter(BI.LastUse); 1121 } else { 1122 // Live-out on the stack, last use after last split point. 1123 // 1124 // ~~~ Interference overlapping uses. 1125 // --o--o| Live-out on stack, late use. 1126 // ------ Copy to stack before LSP, overlap LocalIntv. 1127 // \__ 1128 // 1129 Pos = SE->leaveIntvBefore(LastSplitPoint); 1130 // We need to overlap LocalIntv so it can reach LastUse. 1131 SE->overlapIntv(Pos, BI.LastUse); 1132 } 1133 } 1134 1135 // When not live-out, leave Pos at LastUse. We have handled everything from 1136 // Pos to Stop. Find the starting point for LocalIntv. 1137 assert(SE->currentIntv() == LocalIntv && "Expecting local interval"); 1138 1139 if (RegIn) { 1140 assert(Start < Intf.first() && "Cannot be live-in with interference"); 1141 // Live-in in MainIntv, only use LocalIntv for interference. 1142 // 1143 // ~~~ Interference overlapping uses. 1144 // |---o-- Live-in in MainIntv. 1145 // ====--- Switch to LocalIntv before interference. 1146 // 1147 SlotIndex Switch = SE->enterIntvBefore(std::min(Pos, Intf.first())); 1148 assert(Switch <= Intf.first() && "Expected to avoid interference"); 1149 SE->useIntv(Switch, Pos); 1150 SE->selectIntv(MainIntv); 1151 SE->useIntv(Start, Switch); 1152 } else { 1153 // Live-in on stack, enter LocalIntv before first use. 1154 // 1155 // ~~~ Interference overlapping uses. 1156 // |---o-- Live-in in MainIntv. 1157 // ____--- Reload to LocalIntv before interference. 1158 // 1159 // Defined in block. 1160 // 1161 // ~~~ Interference overlapping uses. 1162 // | o-- Defined in block. 1163 // --- Begin LocalIntv at first use. 1164 // 1165 SlotIndex Switch = SE->enterIntvBefore(std::min(Pos, BI.FirstUse)); 1166 SE->useIntv(Switch, Pos); 1167 } 1168 } 1169 1170 // Handle live-through blocks. 1171 SE->selectIntv(MainIntv); 1172 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 1173 unsigned Number = Cand.ActiveBlocks[i]; 1174 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 1175 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 1176 DEBUG(dbgs() << "Live through BB#" << Number << '\n'); 1177 if (RegIn && RegOut) { 1178 Intf.moveToBlock(Number); 1179 if (!Intf.hasInterference()) { 1180 SE->useIntv(Indexes->getMBBStartIdx(Number), 1181 Indexes->getMBBEndIdx(Number)); 1182 continue; 1183 } 1184 } 1185 MachineBasicBlock *MBB = MF->getBlockNumbered(Number); 1186 if (RegIn) 1187 SE->leaveIntvAtTop(*MBB); 1188 if (RegOut) 1189 SE->enterIntvAtEnd(*MBB); 1190 } 1191 1192 ++NumGlobalSplits; 1193 1194 SmallVector<unsigned, 8> IntvMap; 1195 SE->finish(&IntvMap); 1196 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1197 1198 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1199 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1200 1201 // Sort out the new intervals created by splitting. We get four kinds: 1202 // - Remainder intervals should not be split again. 1203 // - Candidate intervals can be assigned to Cand.PhysReg. 1204 // - Block-local splits are candidates for local splitting. 1205 // - DCE leftovers should go back on the queue. 1206 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1207 LiveInterval &Reg = *LREdit.get(i); 1208 1209 // Ignore old intervals from DCE. 1210 if (getStage(Reg) != RS_New) 1211 continue; 1212 1213 // Remainder interval. Don't try splitting again, spill if it doesn't 1214 // allocate. 1215 if (IntvMap[i] == 0) { 1216 setStage(Reg, RS_Global); 1217 continue; 1218 } 1219 1220 // Main interval. Allow repeated splitting as long as the number of live 1221 // blocks is strictly decreasing. 1222 if (IntvMap[i] == MainIntv) { 1223 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1224 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1225 << " blocks as original.\n"); 1226 // Don't allow repeated splitting as a safe guard against looping. 1227 setStage(Reg, RS_Global); 1228 } 1229 continue; 1230 } 1231 1232 // Other intervals are treated as new. This includes local intervals created 1233 // for blocks with multiple uses, and anything created by DCE. 1234 } 1235 1236 if (VerifyEnabled) 1237 MF->verify(this, "After splitting live range around region"); 1238 } 1239 1240 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1241 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1242 float BestCost = Hysteresis * calcSpillCost(); 1243 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1244 const unsigned NoCand = ~0u; 1245 unsigned BestCand = NoCand; 1246 1247 Order.rewind(); 1248 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) { 1249 if (GlobalCand.size() <= Cand) 1250 GlobalCand.resize(Cand+1); 1251 GlobalCand[Cand].reset(PhysReg); 1252 1253 SpillPlacer->prepare(GlobalCand[Cand].LiveBundles); 1254 float Cost; 1255 InterferenceCache::Cursor Intf(IntfCache, PhysReg); 1256 if (!addSplitConstraints(Intf, Cost)) { 1257 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1258 continue; 1259 } 1260 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1261 if (Cost >= BestCost) { 1262 DEBUG({ 1263 if (BestCand == NoCand) 1264 dbgs() << " worse than no bundles\n"; 1265 else 1266 dbgs() << " worse than " 1267 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1268 }); 1269 continue; 1270 } 1271 growRegion(GlobalCand[Cand], Intf); 1272 1273 SpillPlacer->finish(); 1274 1275 // No live bundles, defer to splitSingleBlocks(). 1276 if (!GlobalCand[Cand].LiveBundles.any()) { 1277 DEBUG(dbgs() << " no bundles.\n"); 1278 continue; 1279 } 1280 1281 Cost += calcGlobalSplitCost(GlobalCand[Cand], Intf); 1282 DEBUG({ 1283 dbgs() << ", total = " << Cost << " with bundles"; 1284 for (int i = GlobalCand[Cand].LiveBundles.find_first(); i>=0; 1285 i = GlobalCand[Cand].LiveBundles.find_next(i)) 1286 dbgs() << " EB#" << i; 1287 dbgs() << ".\n"; 1288 }); 1289 if (Cost < BestCost) { 1290 BestCand = Cand; 1291 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1292 } 1293 } 1294 1295 if (BestCand == NoCand) 1296 return 0; 1297 1298 splitAroundRegion(VirtReg, GlobalCand[BestCand], NewVRegs); 1299 return 0; 1300 } 1301 1302 1303 //===----------------------------------------------------------------------===// 1304 // Local Splitting 1305 //===----------------------------------------------------------------------===// 1306 1307 1308 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1309 /// in order to use PhysReg between two entries in SA->UseSlots. 1310 /// 1311 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1312 /// 1313 void RAGreedy::calcGapWeights(unsigned PhysReg, 1314 SmallVectorImpl<float> &GapWeight) { 1315 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1316 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1317 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 1318 const unsigned NumGaps = Uses.size()-1; 1319 1320 // Start and end points for the interference check. 1321 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse; 1322 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse; 1323 1324 GapWeight.assign(NumGaps, 0.0f); 1325 1326 // Add interference from each overlapping register. 1327 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { 1328 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1329 .checkInterference()) 1330 continue; 1331 1332 // We know that VirtReg is a continuous interval from FirstUse to LastUse, 1333 // so we don't need InterferenceQuery. 1334 // 1335 // Interference that overlaps an instruction is counted in both gaps 1336 // surrounding the instruction. The exception is interference before 1337 // StartIdx and after StopIdx. 1338 // 1339 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx); 1340 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1341 // Skip the gaps before IntI. 1342 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1343 if (++Gap == NumGaps) 1344 break; 1345 if (Gap == NumGaps) 1346 break; 1347 1348 // Update the gaps covered by IntI. 1349 const float weight = IntI.value()->weight; 1350 for (; Gap != NumGaps; ++Gap) { 1351 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1352 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1353 break; 1354 } 1355 if (Gap == NumGaps) 1356 break; 1357 } 1358 } 1359 } 1360 1361 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1362 /// basic block. 1363 /// 1364 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1365 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1366 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1367 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1368 1369 // Note that it is possible to have an interval that is live-in or live-out 1370 // while only covering a single block - A phi-def can use undef values from 1371 // predecessors, and the block could be a single-block loop. 1372 // We don't bother doing anything clever about such a case, we simply assume 1373 // that the interval is continuous from FirstUse to LastUse. We should make 1374 // sure that we don't do anything illegal to such an interval, though. 1375 1376 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 1377 if (Uses.size() <= 2) 1378 return 0; 1379 const unsigned NumGaps = Uses.size()-1; 1380 1381 DEBUG({ 1382 dbgs() << "tryLocalSplit: "; 1383 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1384 dbgs() << ' ' << SA->UseSlots[i]; 1385 dbgs() << '\n'; 1386 }); 1387 1388 // Since we allow local split results to be split again, there is a risk of 1389 // creating infinite loops. It is tempting to require that the new live 1390 // ranges have less instructions than the original. That would guarantee 1391 // convergence, but it is too strict. A live range with 3 instructions can be 1392 // split 2+3 (including the COPY), and we want to allow that. 1393 // 1394 // Instead we use these rules: 1395 // 1396 // 1. Allow any split for ranges with getStage() < RS_Local. (Except for the 1397 // noop split, of course). 1398 // 2. Require progress be made for ranges with getStage() >= RS_Local. All 1399 // the new ranges must have fewer instructions than before the split. 1400 // 3. New ranges with the same number of instructions are marked RS_Local, 1401 // smaller ranges are marked RS_New. 1402 // 1403 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1404 // excessive splitting and infinite loops. 1405 // 1406 bool ProgressRequired = getStage(VirtReg) >= RS_Local; 1407 1408 // Best split candidate. 1409 unsigned BestBefore = NumGaps; 1410 unsigned BestAfter = 0; 1411 float BestDiff = 0; 1412 1413 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1414 SmallVector<float, 8> GapWeight; 1415 1416 Order.rewind(); 1417 while (unsigned PhysReg = Order.next()) { 1418 // Keep track of the largest spill weight that would need to be evicted in 1419 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1420 calcGapWeights(PhysReg, GapWeight); 1421 1422 // Try to find the best sequence of gaps to close. 1423 // The new spill weight must be larger than any gap interference. 1424 1425 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1426 unsigned SplitBefore = 0, SplitAfter = 1; 1427 1428 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1429 // It is the spill weight that needs to be evicted. 1430 float MaxGap = GapWeight[0]; 1431 1432 for (;;) { 1433 // Live before/after split? 1434 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1435 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1436 1437 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1438 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1439 << " i=" << MaxGap); 1440 1441 // Stop before the interval gets so big we wouldn't be making progress. 1442 if (!LiveBefore && !LiveAfter) { 1443 DEBUG(dbgs() << " all\n"); 1444 break; 1445 } 1446 // Should the interval be extended or shrunk? 1447 bool Shrink = true; 1448 1449 // How many gaps would the new range have? 1450 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1451 1452 // Legally, without causing looping? 1453 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1454 1455 if (Legal && MaxGap < HUGE_VALF) { 1456 // Estimate the new spill weight. Each instruction reads or writes the 1457 // register. Conservatively assume there are no read-modify-write 1458 // instructions. 1459 // 1460 // Try to guess the size of the new interval. 1461 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1462 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1463 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1464 // Would this split be possible to allocate? 1465 // Never allocate all gaps, we wouldn't be making progress. 1466 DEBUG(dbgs() << " w=" << EstWeight); 1467 if (EstWeight * Hysteresis >= MaxGap) { 1468 Shrink = false; 1469 float Diff = EstWeight - MaxGap; 1470 if (Diff > BestDiff) { 1471 DEBUG(dbgs() << " (best)"); 1472 BestDiff = Hysteresis * Diff; 1473 BestBefore = SplitBefore; 1474 BestAfter = SplitAfter; 1475 } 1476 } 1477 } 1478 1479 // Try to shrink. 1480 if (Shrink) { 1481 if (++SplitBefore < SplitAfter) { 1482 DEBUG(dbgs() << " shrink\n"); 1483 // Recompute the max when necessary. 1484 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1485 MaxGap = GapWeight[SplitBefore]; 1486 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1487 MaxGap = std::max(MaxGap, GapWeight[i]); 1488 } 1489 continue; 1490 } 1491 MaxGap = 0; 1492 } 1493 1494 // Try to extend the interval. 1495 if (SplitAfter >= NumGaps) { 1496 DEBUG(dbgs() << " end\n"); 1497 break; 1498 } 1499 1500 DEBUG(dbgs() << " extend\n"); 1501 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1502 } 1503 } 1504 1505 // Didn't find any candidates? 1506 if (BestBefore == NumGaps) 1507 return 0; 1508 1509 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1510 << '-' << Uses[BestAfter] << ", " << BestDiff 1511 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1512 1513 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1514 SE->reset(LREdit); 1515 1516 SE->openIntv(); 1517 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1518 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1519 SE->useIntv(SegStart, SegStop); 1520 SmallVector<unsigned, 8> IntvMap; 1521 SE->finish(&IntvMap); 1522 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1523 1524 // If the new range has the same number of instructions as before, mark it as 1525 // RS_Local so the next split will be forced to make progress. Otherwise, 1526 // leave the new intervals as RS_New so they can compete. 1527 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1528 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1529 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1530 if (NewGaps >= NumGaps) { 1531 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1532 assert(!ProgressRequired && "Didn't make progress when it was required."); 1533 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1534 if (IntvMap[i] == 1) { 1535 setStage(*LREdit.get(i), RS_Local); 1536 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1537 } 1538 DEBUG(dbgs() << '\n'); 1539 } 1540 ++NumLocalSplits; 1541 1542 return 0; 1543 } 1544 1545 //===----------------------------------------------------------------------===// 1546 // Live Range Splitting 1547 //===----------------------------------------------------------------------===// 1548 1549 /// trySplit - Try to split VirtReg or one of its interferences, making it 1550 /// assignable. 1551 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1552 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1553 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1554 // Local intervals are handled separately. 1555 if (LIS->intervalIsInOneMBB(VirtReg)) { 1556 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1557 SA->analyze(&VirtReg); 1558 return tryLocalSplit(VirtReg, Order, NewVRegs); 1559 } 1560 1561 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1562 1563 // Don't iterate global splitting. 1564 // Move straight to spilling if this range was produced by a global split. 1565 if (getStage(VirtReg) >= RS_Global) 1566 return 0; 1567 1568 SA->analyze(&VirtReg); 1569 1570 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1571 // coalescer. That may cause the range to become allocatable which means that 1572 // tryRegionSplit won't be making progress. This check should be replaced with 1573 // an assertion when the coalescer is fixed. 1574 if (SA->didRepairRange()) { 1575 // VirtReg has changed, so all cached queries are invalid. 1576 invalidateVirtRegs(); 1577 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1578 return PhysReg; 1579 } 1580 1581 // First try to split around a region spanning multiple blocks. 1582 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1583 if (PhysReg || !NewVRegs.empty()) 1584 return PhysReg; 1585 1586 // Then isolate blocks with multiple uses. 1587 SplitAnalysis::BlockPtrSet Blocks; 1588 if (SA->getMultiUseBlocks(Blocks)) { 1589 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1590 SE->reset(LREdit); 1591 SE->splitSingleBlocks(Blocks); 1592 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Global); 1593 if (VerifyEnabled) 1594 MF->verify(this, "After splitting live range around basic blocks"); 1595 } 1596 1597 // Don't assign any physregs. 1598 return 0; 1599 } 1600 1601 1602 //===----------------------------------------------------------------------===// 1603 // Main Entry Point 1604 //===----------------------------------------------------------------------===// 1605 1606 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1607 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1608 // First try assigning a free register. 1609 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1610 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1611 return PhysReg; 1612 1613 LiveRangeStage Stage = getStage(VirtReg); 1614 DEBUG(dbgs() << StageName[Stage] 1615 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1616 1617 // Try to evict a less worthy live range, but only for ranges from the primary 1618 // queue. The RS_Second ranges already failed to do this, and they should not 1619 // get a second chance until they have been split. 1620 if (Stage != RS_Second) 1621 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1622 return PhysReg; 1623 1624 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1625 1626 // The first time we see a live range, don't try to split or spill. 1627 // Wait until the second time, when all smaller ranges have been allocated. 1628 // This gives a better picture of the interference to split around. 1629 if (Stage == RS_First) { 1630 setStage(VirtReg, RS_Second); 1631 DEBUG(dbgs() << "wait for second round\n"); 1632 NewVRegs.push_back(&VirtReg); 1633 return 0; 1634 } 1635 1636 // If we couldn't allocate a register from spilling, there is probably some 1637 // invalid inline assembly. The base class wil report it. 1638 if (Stage >= RS_Spill || !VirtReg.isSpillable()) 1639 return ~0u; 1640 1641 // Try splitting VirtReg or interferences. 1642 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1643 if (PhysReg || !NewVRegs.empty()) 1644 return PhysReg; 1645 1646 // Finally spill VirtReg itself. 1647 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1648 LiveRangeEdit LRE(VirtReg, NewVRegs, this); 1649 spiller().spill(LRE); 1650 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill); 1651 1652 if (VerifyEnabled) 1653 MF->verify(this, "After spilling"); 1654 1655 // The live virtual register requesting allocation was spilled, so tell 1656 // the caller not to allocate anything during this round. 1657 return 0; 1658 } 1659 1660 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1661 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1662 << "********** Function: " 1663 << ((Value*)mf.getFunction())->getName() << '\n'); 1664 1665 MF = &mf; 1666 if (VerifyEnabled) 1667 MF->verify(this, "Before greedy register allocator"); 1668 1669 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1670 Indexes = &getAnalysis<SlotIndexes>(); 1671 DomTree = &getAnalysis<MachineDominatorTree>(); 1672 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1673 Loops = &getAnalysis<MachineLoopInfo>(); 1674 LoopRanges = &getAnalysis<MachineLoopRanges>(); 1675 Bundles = &getAnalysis<EdgeBundles>(); 1676 SpillPlacer = &getAnalysis<SpillPlacement>(); 1677 DebugVars = &getAnalysis<LiveDebugVariables>(); 1678 1679 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1680 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1681 ExtraRegInfo.clear(); 1682 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1683 NextCascade = 1; 1684 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI); 1685 1686 allocatePhysRegs(); 1687 addMBBLiveIns(MF); 1688 LIS->addKillFlags(); 1689 1690 // Run rewriter 1691 { 1692 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1693 VRM->rewrite(Indexes); 1694 } 1695 1696 // Write out new DBG_VALUE instructions. 1697 DebugVars->emitDebugValues(VRM); 1698 1699 // The pass output is in VirtRegMap. Release all the transient data. 1700 releaseMemory(); 1701 1702 return true; 1703 } 1704