1 //===- RegAllocFast.cpp - A fast register allocator for debug code --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file This register allocator allocates registers to a basic block at a 10 /// time, attempting to keep values in registers and reusing registers as 11 /// appropriate. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/IndexedMap.h" 18 #include "llvm/ADT/SmallSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/SparseSet.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/CodeGen/MachineBasicBlock.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunction.h" 25 #include "llvm/CodeGen/MachineFunctionPass.h" 26 #include "llvm/CodeGen/MachineInstr.h" 27 #include "llvm/CodeGen/MachineInstrBuilder.h" 28 #include "llvm/CodeGen/MachineOperand.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/RegAllocRegistry.h" 31 #include "llvm/CodeGen/RegisterClassInfo.h" 32 #include "llvm/CodeGen/TargetInstrInfo.h" 33 #include "llvm/CodeGen/TargetOpcodes.h" 34 #include "llvm/CodeGen/TargetRegisterInfo.h" 35 #include "llvm/CodeGen/TargetSubtargetInfo.h" 36 #include "llvm/IR/DebugLoc.h" 37 #include "llvm/IR/Metadata.h" 38 #include "llvm/InitializePasses.h" 39 #include "llvm/MC/MCInstrDesc.h" 40 #include "llvm/MC/MCRegisterInfo.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/Casting.h" 43 #include "llvm/Support/Compiler.h" 44 #include "llvm/Support/Debug.h" 45 #include "llvm/Support/ErrorHandling.h" 46 #include "llvm/Support/raw_ostream.h" 47 #include <cassert> 48 #include <tuple> 49 #include <vector> 50 51 using namespace llvm; 52 53 #define DEBUG_TYPE "regalloc" 54 55 STATISTIC(NumStores, "Number of stores added"); 56 STATISTIC(NumLoads , "Number of loads added"); 57 STATISTIC(NumCoalesced, "Number of copies coalesced"); 58 59 static RegisterRegAlloc 60 fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator); 61 62 namespace { 63 64 class RegAllocFast : public MachineFunctionPass { 65 public: 66 static char ID; 67 68 RegAllocFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1) {} 69 70 private: 71 MachineFrameInfo *MFI; 72 MachineRegisterInfo *MRI; 73 const TargetRegisterInfo *TRI; 74 const TargetInstrInfo *TII; 75 RegisterClassInfo RegClassInfo; 76 77 /// Basic block currently being allocated. 78 MachineBasicBlock *MBB; 79 80 /// Maps virtual regs to the frame index where these values are spilled. 81 IndexedMap<int, VirtReg2IndexFunctor> StackSlotForVirtReg; 82 83 /// Everything we know about a live virtual register. 84 struct LiveReg { 85 MachineInstr *LastUse = nullptr; ///< Last instr to use reg. 86 Register VirtReg; ///< Virtual register number. 87 MCPhysReg PhysReg = 0; ///< Currently held here. 88 unsigned short LastOpNum = 0; ///< OpNum on LastUse. 89 bool Dirty = false; ///< Register needs spill. 90 91 explicit LiveReg(Register VirtReg) : VirtReg(VirtReg) {} 92 93 unsigned getSparseSetIndex() const { 94 return Register::virtReg2Index(VirtReg); 95 } 96 }; 97 98 using LiveRegMap = SparseSet<LiveReg>; 99 /// This map contains entries for each virtual register that is currently 100 /// available in a physical register. 101 LiveRegMap LiveVirtRegs; 102 103 DenseMap<unsigned, SmallVector<MachineInstr *, 2>> LiveDbgValueMap; 104 105 /// Has a bit set for every virtual register for which it was determined 106 /// that it is alive across blocks. 107 BitVector MayLiveAcrossBlocks; 108 109 /// State of a register unit. 110 enum RegUnitState { 111 /// A free register is not currently in use and can be allocated 112 /// immediately without checking aliases. 113 regFree, 114 115 /// A reserved register has been assigned explicitly (e.g., setting up a 116 /// call parameter), and it remains reserved until it is used. 117 regReserved 118 119 /// A register state may also be a virtual register number, indication 120 /// that the physical register is currently allocated to a virtual 121 /// register. In that case, LiveVirtRegs contains the inverse mapping. 122 }; 123 124 /// Maps each physical register to a RegUnitState enum or virtual register. 125 std::vector<unsigned> RegUnitStates; 126 127 SmallVector<Register, 16> VirtDead; 128 SmallVector<MachineInstr *, 32> Coalesced; 129 130 using RegUnitSet = SparseSet<uint16_t, identity<uint16_t>>; 131 /// Set of register units that are used in the current instruction, and so 132 /// cannot be allocated. 133 RegUnitSet UsedInInstr; 134 135 void setPhysRegState(MCPhysReg PhysReg, unsigned NewState); 136 137 /// Mark a physreg as used in this instruction. 138 void markRegUsedInInstr(MCPhysReg PhysReg) { 139 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) 140 UsedInInstr.insert(*Units); 141 } 142 143 /// Check if a physreg or any of its aliases are used in this instruction. 144 bool isRegUsedInInstr(MCPhysReg PhysReg) const { 145 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) 146 if (UsedInInstr.count(*Units)) 147 return true; 148 return false; 149 } 150 151 enum : unsigned { 152 spillClean = 50, 153 spillDirty = 100, 154 spillPrefBonus = 20, 155 spillImpossible = ~0u 156 }; 157 158 public: 159 StringRef getPassName() const override { return "Fast Register Allocator"; } 160 161 void getAnalysisUsage(AnalysisUsage &AU) const override { 162 AU.setPreservesCFG(); 163 MachineFunctionPass::getAnalysisUsage(AU); 164 } 165 166 MachineFunctionProperties getRequiredProperties() const override { 167 return MachineFunctionProperties().set( 168 MachineFunctionProperties::Property::NoPHIs); 169 } 170 171 MachineFunctionProperties getSetProperties() const override { 172 return MachineFunctionProperties().set( 173 MachineFunctionProperties::Property::NoVRegs); 174 } 175 176 private: 177 bool runOnMachineFunction(MachineFunction &MF) override; 178 179 void allocateBasicBlock(MachineBasicBlock &MBB); 180 void allocateInstruction(MachineInstr &MI); 181 void handleDebugValue(MachineInstr &MI); 182 void handleThroughOperands(MachineInstr &MI, 183 SmallVectorImpl<Register> &VirtDead); 184 bool isLastUseOfLocalReg(const MachineOperand &MO) const; 185 186 void addKillFlag(const LiveReg &LRI); 187 bool verifyRegStateMapping(const LiveReg &LR) const; 188 void killVirtReg(LiveReg &LR); 189 void killVirtReg(Register VirtReg); 190 void spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR); 191 void spillVirtReg(MachineBasicBlock::iterator MI, Register VirtReg); 192 193 void usePhysReg(MachineOperand &MO); 194 void definePhysReg(MachineBasicBlock::iterator MI, MCPhysReg PhysReg, 195 unsigned NewState); 196 unsigned calcSpillCost(MCPhysReg PhysReg) const; 197 void assignVirtToPhysReg(LiveReg &, MCPhysReg PhysReg); 198 199 LiveRegMap::iterator findLiveVirtReg(Register VirtReg) { 200 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg)); 201 } 202 203 LiveRegMap::const_iterator findLiveVirtReg(Register VirtReg) const { 204 return LiveVirtRegs.find(Register::virtReg2Index(VirtReg)); 205 } 206 207 void allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint); 208 void allocVirtRegUndef(MachineOperand &MO); 209 MCPhysReg defineVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg, 210 Register Hint); 211 LiveReg &reloadVirtReg(MachineInstr &MI, unsigned OpNum, Register VirtReg, 212 Register Hint); 213 void spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut); 214 bool setPhysReg(MachineInstr &MI, MachineOperand &MO, MCPhysReg PhysReg); 215 216 Register traceCopies(Register VirtReg) const; 217 Register traceCopyChain(Register Reg) const; 218 219 int getStackSpaceFor(Register VirtReg); 220 void spill(MachineBasicBlock::iterator Before, Register VirtReg, 221 MCPhysReg AssignedReg, bool Kill); 222 void reload(MachineBasicBlock::iterator Before, Register VirtReg, 223 MCPhysReg PhysReg); 224 225 bool mayLiveOut(Register VirtReg); 226 bool mayLiveIn(Register VirtReg); 227 228 void dumpState() const; 229 }; 230 231 } // end anonymous namespace 232 233 char RegAllocFast::ID = 0; 234 235 INITIALIZE_PASS(RegAllocFast, "regallocfast", "Fast Register Allocator", false, 236 false) 237 238 void RegAllocFast::setPhysRegState(MCPhysReg PhysReg, unsigned NewState) { 239 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) 240 RegUnitStates[*UI] = NewState; 241 } 242 243 /// This allocates space for the specified virtual register to be held on the 244 /// stack. 245 int RegAllocFast::getStackSpaceFor(Register VirtReg) { 246 // Find the location Reg would belong... 247 int SS = StackSlotForVirtReg[VirtReg]; 248 // Already has space allocated? 249 if (SS != -1) 250 return SS; 251 252 // Allocate a new stack object for this spill location... 253 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); 254 unsigned Size = TRI->getSpillSize(RC); 255 Align Alignment = TRI->getSpillAlign(RC); 256 int FrameIdx = MFI->CreateSpillStackObject(Size, Alignment); 257 258 // Assign the slot. 259 StackSlotForVirtReg[VirtReg] = FrameIdx; 260 return FrameIdx; 261 } 262 263 /// Returns false if \p VirtReg is known to not live out of the current block. 264 bool RegAllocFast::mayLiveOut(Register VirtReg) { 265 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) { 266 // Cannot be live-out if there are no successors. 267 return !MBB->succ_empty(); 268 } 269 270 // If this block loops back to itself, it would be necessary to check whether 271 // the use comes after the def. 272 if (MBB->isSuccessor(MBB)) { 273 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); 274 return true; 275 } 276 277 // See if the first \p Limit uses of the register are all in the current 278 // block. 279 static const unsigned Limit = 8; 280 unsigned C = 0; 281 for (const MachineInstr &UseInst : MRI->reg_nodbg_instructions(VirtReg)) { 282 if (UseInst.getParent() != MBB || ++C >= Limit) { 283 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); 284 // Cannot be live-out if there are no successors. 285 return !MBB->succ_empty(); 286 } 287 } 288 289 return false; 290 } 291 292 /// Returns false if \p VirtReg is known to not be live into the current block. 293 bool RegAllocFast::mayLiveIn(Register VirtReg) { 294 if (MayLiveAcrossBlocks.test(Register::virtReg2Index(VirtReg))) 295 return !MBB->pred_empty(); 296 297 // See if the first \p Limit def of the register are all in the current block. 298 static const unsigned Limit = 8; 299 unsigned C = 0; 300 for (const MachineInstr &DefInst : MRI->def_instructions(VirtReg)) { 301 if (DefInst.getParent() != MBB || ++C >= Limit) { 302 MayLiveAcrossBlocks.set(Register::virtReg2Index(VirtReg)); 303 return !MBB->pred_empty(); 304 } 305 } 306 307 return false; 308 } 309 310 /// Insert spill instruction for \p AssignedReg before \p Before. Update 311 /// DBG_VALUEs with \p VirtReg operands with the stack slot. 312 void RegAllocFast::spill(MachineBasicBlock::iterator Before, Register VirtReg, 313 MCPhysReg AssignedReg, bool Kill) { 314 LLVM_DEBUG(dbgs() << "Spilling " << printReg(VirtReg, TRI) 315 << " in " << printReg(AssignedReg, TRI)); 316 int FI = getStackSpaceFor(VirtReg); 317 LLVM_DEBUG(dbgs() << " to stack slot #" << FI << '\n'); 318 319 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); 320 TII->storeRegToStackSlot(*MBB, Before, AssignedReg, Kill, FI, &RC, TRI); 321 ++NumStores; 322 323 // If this register is used by DBG_VALUE then insert new DBG_VALUE to 324 // identify spilled location as the place to find corresponding variable's 325 // value. 326 SmallVectorImpl<MachineInstr *> &LRIDbgValues = LiveDbgValueMap[VirtReg]; 327 for (MachineInstr *DBG : LRIDbgValues) { 328 MachineInstr *NewDV = buildDbgValueForSpill(*MBB, Before, *DBG, FI); 329 assert(NewDV->getParent() == MBB && "dangling parent pointer"); 330 (void)NewDV; 331 LLVM_DEBUG(dbgs() << "Inserting debug info due to spill:\n" << *NewDV); 332 } 333 // Now this register is spilled there is should not be any DBG_VALUE 334 // pointing to this register because they are all pointing to spilled value 335 // now. 336 LRIDbgValues.clear(); 337 } 338 339 /// Insert reload instruction for \p PhysReg before \p Before. 340 void RegAllocFast::reload(MachineBasicBlock::iterator Before, Register VirtReg, 341 MCPhysReg PhysReg) { 342 LLVM_DEBUG(dbgs() << "Reloading " << printReg(VirtReg, TRI) << " into " 343 << printReg(PhysReg, TRI) << '\n'); 344 int FI = getStackSpaceFor(VirtReg); 345 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); 346 TII->loadRegFromStackSlot(*MBB, Before, PhysReg, FI, &RC, TRI); 347 ++NumLoads; 348 } 349 350 /// Return true if MO is the only remaining reference to its virtual register, 351 /// and it is guaranteed to be a block-local register. 352 bool RegAllocFast::isLastUseOfLocalReg(const MachineOperand &MO) const { 353 // If the register has ever been spilled or reloaded, we conservatively assume 354 // it is a global register used in multiple blocks. 355 if (StackSlotForVirtReg[MO.getReg()] != -1) 356 return false; 357 358 // Check that the use/def chain has exactly one operand - MO. 359 MachineRegisterInfo::reg_nodbg_iterator I = MRI->reg_nodbg_begin(MO.getReg()); 360 if (&*I != &MO) 361 return false; 362 return ++I == MRI->reg_nodbg_end(); 363 } 364 365 /// Set kill flags on last use of a virtual register. 366 void RegAllocFast::addKillFlag(const LiveReg &LR) { 367 if (!LR.LastUse) return; 368 MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum); 369 if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) { 370 if (MO.getReg() == LR.PhysReg) 371 MO.setIsKill(); 372 // else, don't do anything we are problably redefining a 373 // subreg of this register and given we don't track which 374 // lanes are actually dead, we cannot insert a kill flag here. 375 // Otherwise we may end up in a situation like this: 376 // ... = (MO) physreg:sub1, implicit killed physreg 377 // ... <== Here we would allow later pass to reuse physreg:sub1 378 // which is potentially wrong. 379 // LR:sub0 = ... 380 // ... = LR.sub1 <== This is going to use physreg:sub1 381 } 382 } 383 384 bool RegAllocFast::verifyRegStateMapping(const LiveReg &LR) const { 385 for (MCRegUnitIterator UI(LR.PhysReg, TRI); UI.isValid(); ++UI) { 386 if (RegUnitStates[*UI] != LR.VirtReg) 387 return false; 388 } 389 390 return true; 391 } 392 393 /// Mark virtreg as no longer available. 394 void RegAllocFast::killVirtReg(LiveReg &LR) { 395 assert(verifyRegStateMapping(LR) && "Broken RegState mapping"); 396 addKillFlag(LR); 397 MCPhysReg PhysReg = LR.PhysReg; 398 setPhysRegState(PhysReg, regFree); 399 LR.PhysReg = 0; 400 } 401 402 /// Mark virtreg as no longer available. 403 void RegAllocFast::killVirtReg(Register VirtReg) { 404 assert(Register::isVirtualRegister(VirtReg) && 405 "killVirtReg needs a virtual register"); 406 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); 407 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) 408 killVirtReg(*LRI); 409 } 410 411 /// This method spills the value specified by VirtReg into the corresponding 412 /// stack slot if needed. 413 void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, 414 Register VirtReg) { 415 assert(Register::isVirtualRegister(VirtReg) && 416 "Spilling a physical register is illegal!"); 417 LiveRegMap::iterator LRI = findLiveVirtReg(VirtReg); 418 assert(LRI != LiveVirtRegs.end() && LRI->PhysReg && 419 "Spilling unmapped virtual register"); 420 spillVirtReg(MI, *LRI); 421 } 422 423 /// Do the actual work of spilling. 424 void RegAllocFast::spillVirtReg(MachineBasicBlock::iterator MI, LiveReg &LR) { 425 assert(verifyRegStateMapping(LR) && "Broken RegState mapping"); 426 427 MCPhysReg PhysReg = LR.PhysReg; 428 429 if (LR.Dirty) { 430 // If this physreg is used by the instruction, we want to kill it on the 431 // instruction, not on the spill. 432 bool SpillKill = MachineBasicBlock::iterator(LR.LastUse) != MI; 433 LR.Dirty = false; 434 435 spill(MI, LR.VirtReg, PhysReg, SpillKill); 436 437 if (SpillKill) 438 LR.LastUse = nullptr; // Don't kill register again 439 } 440 killVirtReg(LR); 441 } 442 443 /// Spill all dirty virtregs without killing them. 444 void RegAllocFast::spillAll(MachineBasicBlock::iterator MI, bool OnlyLiveOut) { 445 if (LiveVirtRegs.empty()) 446 return; 447 // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order 448 // of spilling here is deterministic, if arbitrary. 449 for (LiveReg &LR : LiveVirtRegs) { 450 if (!LR.PhysReg) 451 continue; 452 if (OnlyLiveOut && !mayLiveOut(LR.VirtReg)) 453 continue; 454 spillVirtReg(MI, LR); 455 } 456 LiveVirtRegs.clear(); 457 } 458 459 /// Handle the direct use of a physical register. Check that the register is 460 /// not used by a virtreg. Kill the physreg, marking it free. This may add 461 /// implicit kills to MO->getParent() and invalidate MO. 462 void RegAllocFast::usePhysReg(MachineOperand &MO) { 463 // Ignore undef uses. 464 if (MO.isUndef()) 465 return; 466 467 Register PhysReg = MO.getReg(); 468 assert(PhysReg.isPhysical() && "Bad usePhysReg operand"); 469 470 markRegUsedInInstr(PhysReg); 471 472 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { 473 switch (RegUnitStates[*UI]) { 474 case regReserved: 475 RegUnitStates[*UI] = regFree; 476 LLVM_FALLTHROUGH; 477 case regFree: 478 break; 479 default: 480 llvm_unreachable("Unexpected reg unit state"); 481 } 482 } 483 484 // All aliases are disabled, bring register into working set. 485 setPhysRegState(PhysReg, regFree); 486 MO.setIsKill(); 487 } 488 489 /// Mark PhysReg as reserved or free after spilling any virtregs. This is very 490 /// similar to defineVirtReg except the physreg is reserved instead of 491 /// allocated. 492 void RegAllocFast::definePhysReg(MachineBasicBlock::iterator MI, 493 MCPhysReg PhysReg, unsigned NewState) { 494 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { 495 switch (unsigned VirtReg = RegUnitStates[*UI]) { 496 default: 497 spillVirtReg(MI, VirtReg); 498 break; 499 case regFree: 500 case regReserved: 501 break; 502 } 503 } 504 505 markRegUsedInInstr(PhysReg); 506 setPhysRegState(PhysReg, NewState); 507 } 508 509 /// Return the cost of spilling clearing out PhysReg and aliases so it is free 510 /// for allocation. Returns 0 when PhysReg is free or disabled with all aliases 511 /// disabled - it can be allocated directly. 512 /// \returns spillImpossible when PhysReg or an alias can't be spilled. 513 unsigned RegAllocFast::calcSpillCost(MCPhysReg PhysReg) const { 514 if (isRegUsedInInstr(PhysReg)) { 515 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) 516 << " is already used in instr.\n"); 517 return spillImpossible; 518 } 519 520 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { 521 switch (unsigned VirtReg = RegUnitStates[*UI]) { 522 case regFree: 523 break; 524 case regReserved: 525 LLVM_DEBUG(dbgs() << printReg(VirtReg, TRI) << " corresponding " 526 << printReg(PhysReg, TRI) << " is reserved already.\n"); 527 return spillImpossible; 528 default: { 529 LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); 530 assert(LRI != LiveVirtRegs.end() && LRI->PhysReg && 531 "Missing VirtReg entry"); 532 return LRI->Dirty ? spillDirty : spillClean; 533 } 534 } 535 } 536 return 0; 537 } 538 539 /// This method updates local state so that we know that PhysReg is the 540 /// proper container for VirtReg now. The physical register must not be used 541 /// for anything else when this is called. 542 void RegAllocFast::assignVirtToPhysReg(LiveReg &LR, MCPhysReg PhysReg) { 543 Register VirtReg = LR.VirtReg; 544 LLVM_DEBUG(dbgs() << "Assigning " << printReg(VirtReg, TRI) << " to " 545 << printReg(PhysReg, TRI) << '\n'); 546 assert(LR.PhysReg == 0 && "Already assigned a physreg"); 547 assert(PhysReg != 0 && "Trying to assign no register"); 548 LR.PhysReg = PhysReg; 549 setPhysRegState(PhysReg, VirtReg); 550 } 551 552 static bool isCoalescable(const MachineInstr &MI) { 553 return MI.isFullCopy(); 554 } 555 556 Register RegAllocFast::traceCopyChain(Register Reg) const { 557 static const unsigned ChainLengthLimit = 3; 558 unsigned C = 0; 559 do { 560 if (Reg.isPhysical()) 561 return Reg; 562 assert(Reg.isVirtual()); 563 564 MachineInstr *VRegDef = MRI->getUniqueVRegDef(Reg); 565 if (!VRegDef || !isCoalescable(*VRegDef)) 566 return 0; 567 Reg = VRegDef->getOperand(1).getReg(); 568 } while (++C <= ChainLengthLimit); 569 return 0; 570 } 571 572 /// Check if any of \p VirtReg's definitions is a copy. If it is follow the 573 /// chain of copies to check whether we reach a physical register we can 574 /// coalesce with. 575 Register RegAllocFast::traceCopies(Register VirtReg) const { 576 static const unsigned DefLimit = 3; 577 unsigned C = 0; 578 for (const MachineInstr &MI : MRI->def_instructions(VirtReg)) { 579 if (isCoalescable(MI)) { 580 Register Reg = MI.getOperand(1).getReg(); 581 Reg = traceCopyChain(Reg); 582 if (Reg.isValid()) 583 return Reg; 584 } 585 586 if (++C >= DefLimit) 587 break; 588 } 589 return Register(); 590 } 591 592 /// Allocates a physical register for VirtReg. 593 void RegAllocFast::allocVirtReg(MachineInstr &MI, LiveReg &LR, Register Hint0) { 594 const Register VirtReg = LR.VirtReg; 595 596 assert(Register::isVirtualRegister(VirtReg) && 597 "Can only allocate virtual registers"); 598 599 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); 600 LLVM_DEBUG(dbgs() << "Search register for " << printReg(VirtReg) 601 << " in class " << TRI->getRegClassName(&RC) 602 << " with hint " << printReg(Hint0, TRI) << '\n'); 603 604 // Take hint when possible. 605 if (Hint0.isPhysical() && MRI->isAllocatable(Hint0) && 606 RC.contains(Hint0)) { 607 // Ignore the hint if we would have to spill a dirty register. 608 unsigned Cost = calcSpillCost(Hint0); 609 if (Cost < spillDirty) { 610 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI) 611 << '\n'); 612 if (Cost) 613 definePhysReg(MI, Hint0, regFree); 614 assignVirtToPhysReg(LR, Hint0); 615 return; 616 } else { 617 LLVM_DEBUG(dbgs() << "\tPreferred Register 1: " << printReg(Hint0, TRI) 618 << "occupied\n"); 619 } 620 } else { 621 Hint0 = Register(); 622 } 623 624 // Try other hint. 625 Register Hint1 = traceCopies(VirtReg); 626 if (Hint1.isPhysical() && MRI->isAllocatable(Hint1) && 627 RC.contains(Hint1) && !isRegUsedInInstr(Hint1)) { 628 // Ignore the hint if we would have to spill a dirty register. 629 unsigned Cost = calcSpillCost(Hint1); 630 if (Cost < spillDirty) { 631 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI) 632 << '\n'); 633 if (Cost) 634 definePhysReg(MI, Hint1, regFree); 635 assignVirtToPhysReg(LR, Hint1); 636 return; 637 } else { 638 LLVM_DEBUG(dbgs() << "\tPreferred Register 0: " << printReg(Hint1, TRI) 639 << "occupied\n"); 640 } 641 } else { 642 Hint1 = Register(); 643 } 644 645 MCPhysReg BestReg = 0; 646 unsigned BestCost = spillImpossible; 647 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC); 648 for (MCPhysReg PhysReg : AllocationOrder) { 649 LLVM_DEBUG(dbgs() << "\tRegister: " << printReg(PhysReg, TRI) << ' '); 650 unsigned Cost = calcSpillCost(PhysReg); 651 LLVM_DEBUG(dbgs() << "Cost: " << Cost << " BestCost: " << BestCost << '\n'); 652 // Immediate take a register with cost 0. 653 if (Cost == 0) { 654 assignVirtToPhysReg(LR, PhysReg); 655 return; 656 } 657 658 if (PhysReg == Hint1 || PhysReg == Hint0) 659 Cost -= spillPrefBonus; 660 661 if (Cost < BestCost) { 662 BestReg = PhysReg; 663 BestCost = Cost; 664 } 665 } 666 667 if (!BestReg) { 668 // Nothing we can do: Report an error and keep going with an invalid 669 // allocation. 670 if (MI.isInlineAsm()) 671 MI.emitError("inline assembly requires more registers than available"); 672 else 673 MI.emitError("ran out of registers during register allocation"); 674 definePhysReg(MI, *AllocationOrder.begin(), regFree); 675 assignVirtToPhysReg(LR, *AllocationOrder.begin()); 676 return; 677 } 678 679 definePhysReg(MI, BestReg, regFree); 680 assignVirtToPhysReg(LR, BestReg); 681 } 682 683 void RegAllocFast::allocVirtRegUndef(MachineOperand &MO) { 684 assert(MO.isUndef() && "expected undef use"); 685 Register VirtReg = MO.getReg(); 686 assert(Register::isVirtualRegister(VirtReg) && "Expected virtreg"); 687 688 LiveRegMap::const_iterator LRI = findLiveVirtReg(VirtReg); 689 MCPhysReg PhysReg; 690 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) { 691 PhysReg = LRI->PhysReg; 692 } else { 693 const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg); 694 ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC); 695 assert(!AllocationOrder.empty() && "Allocation order must not be empty"); 696 PhysReg = AllocationOrder[0]; 697 } 698 699 unsigned SubRegIdx = MO.getSubReg(); 700 if (SubRegIdx != 0) { 701 PhysReg = TRI->getSubReg(PhysReg, SubRegIdx); 702 MO.setSubReg(0); 703 } 704 MO.setReg(PhysReg); 705 MO.setIsRenamable(true); 706 } 707 708 /// Allocates a register for VirtReg and mark it as dirty. 709 MCPhysReg RegAllocFast::defineVirtReg(MachineInstr &MI, unsigned OpNum, 710 Register VirtReg, Register Hint) { 711 assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register"); 712 LiveRegMap::iterator LRI; 713 bool New; 714 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg)); 715 if (!LRI->PhysReg) { 716 // If there is no hint, peek at the only use of this register. 717 if ((!Hint || !Hint.isPhysical()) && 718 MRI->hasOneNonDBGUse(VirtReg)) { 719 const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg); 720 // It's a copy, use the destination register as a hint. 721 if (UseMI.isCopyLike()) 722 Hint = UseMI.getOperand(0).getReg(); 723 } 724 allocVirtReg(MI, *LRI, Hint); 725 } else if (LRI->LastUse) { 726 // Redefining a live register - kill at the last use, unless it is this 727 // instruction defining VirtReg multiple times. 728 if (LRI->LastUse != &MI || LRI->LastUse->getOperand(LRI->LastOpNum).isUse()) 729 addKillFlag(*LRI); 730 } 731 assert(LRI->PhysReg && "Register not assigned"); 732 LRI->LastUse = &MI; 733 LRI->LastOpNum = OpNum; 734 LRI->Dirty = true; 735 markRegUsedInInstr(LRI->PhysReg); 736 return LRI->PhysReg; 737 } 738 739 /// Make sure VirtReg is available in a physreg and return it. 740 RegAllocFast::LiveReg &RegAllocFast::reloadVirtReg(MachineInstr &MI, 741 unsigned OpNum, 742 Register VirtReg, 743 Register Hint) { 744 assert(Register::isVirtualRegister(VirtReg) && "Not a virtual register"); 745 LiveRegMap::iterator LRI; 746 bool New; 747 std::tie(LRI, New) = LiveVirtRegs.insert(LiveReg(VirtReg)); 748 MachineOperand &MO = MI.getOperand(OpNum); 749 if (!LRI->PhysReg) { 750 allocVirtReg(MI, *LRI, Hint); 751 reload(MI, VirtReg, LRI->PhysReg); 752 } else if (LRI->Dirty) { 753 if (isLastUseOfLocalReg(MO)) { 754 LLVM_DEBUG(dbgs() << "Killing last use: " << MO << '\n'); 755 if (MO.isUse()) 756 MO.setIsKill(); 757 else 758 MO.setIsDead(); 759 } else if (MO.isKill()) { 760 LLVM_DEBUG(dbgs() << "Clearing dubious kill: " << MO << '\n'); 761 MO.setIsKill(false); 762 } else if (MO.isDead()) { 763 LLVM_DEBUG(dbgs() << "Clearing dubious dead: " << MO << '\n'); 764 MO.setIsDead(false); 765 } 766 } else if (MO.isKill()) { 767 // We must remove kill flags from uses of reloaded registers because the 768 // register would be killed immediately, and there might be a second use: 769 // %foo = OR killed %x, %x 770 // This would cause a second reload of %x into a different register. 771 LLVM_DEBUG(dbgs() << "Clearing clean kill: " << MO << '\n'); 772 MO.setIsKill(false); 773 } else if (MO.isDead()) { 774 LLVM_DEBUG(dbgs() << "Clearing clean dead: " << MO << '\n'); 775 MO.setIsDead(false); 776 } 777 assert(LRI->PhysReg && "Register not assigned"); 778 LRI->LastUse = &MI; 779 LRI->LastOpNum = OpNum; 780 markRegUsedInInstr(LRI->PhysReg); 781 return *LRI; 782 } 783 784 /// Changes operand OpNum in MI the refer the PhysReg, considering subregs. This 785 /// may invalidate any operand pointers. Return true if the operand kills its 786 /// register. 787 bool RegAllocFast::setPhysReg(MachineInstr &MI, MachineOperand &MO, 788 MCPhysReg PhysReg) { 789 bool Dead = MO.isDead(); 790 if (!MO.getSubReg()) { 791 MO.setReg(PhysReg); 792 MO.setIsRenamable(true); 793 return MO.isKill() || Dead; 794 } 795 796 // Handle subregister index. 797 MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : Register()); 798 MO.setIsRenamable(true); 799 MO.setSubReg(0); 800 801 // A kill flag implies killing the full register. Add corresponding super 802 // register kill. 803 if (MO.isKill()) { 804 MI.addRegisterKilled(PhysReg, TRI, true); 805 return true; 806 } 807 808 // A <def,read-undef> of a sub-register requires an implicit def of the full 809 // register. 810 if (MO.isDef() && MO.isUndef()) 811 MI.addRegisterDefined(PhysReg, TRI); 812 813 return Dead; 814 } 815 816 // Handles special instruction operand like early clobbers and tied ops when 817 // there are additional physreg defines. 818 void RegAllocFast::handleThroughOperands(MachineInstr &MI, 819 SmallVectorImpl<Register> &VirtDead) { 820 LLVM_DEBUG(dbgs() << "Scanning for through registers:"); 821 SmallSet<Register, 8> ThroughRegs; 822 for (const MachineOperand &MO : MI.operands()) { 823 if (!MO.isReg()) continue; 824 Register Reg = MO.getReg(); 825 if (!Reg.isVirtual()) 826 continue; 827 if (MO.isEarlyClobber() || (MO.isUse() && MO.isTied()) || 828 (MO.getSubReg() && MI.readsVirtualRegister(Reg))) { 829 if (ThroughRegs.insert(Reg).second) 830 LLVM_DEBUG(dbgs() << ' ' << printReg(Reg)); 831 } 832 } 833 834 // If any physreg defines collide with preallocated through registers, 835 // we must spill and reallocate. 836 LLVM_DEBUG(dbgs() << "\nChecking for physdef collisions.\n"); 837 for (const MachineOperand &MO : MI.operands()) { 838 if (!MO.isReg() || !MO.isDef()) continue; 839 Register Reg = MO.getReg(); 840 if (!Reg || !Reg.isPhysical()) 841 continue; 842 markRegUsedInInstr(Reg); 843 844 for (MCRegUnitIterator UI(Reg, TRI); UI.isValid(); ++UI) { 845 if (!ThroughRegs.count(RegUnitStates[*UI])) 846 continue; 847 848 // Need to spill any aliasing registers. 849 for (MCRegUnitRootIterator RI(*UI, TRI); RI.isValid(); ++RI) { 850 for (MCSuperRegIterator SI(*RI, TRI, true); SI.isValid(); ++SI) { 851 definePhysReg(MI, *SI, regFree); 852 } 853 } 854 } 855 } 856 857 SmallVector<Register, 8> PartialDefs; 858 LLVM_DEBUG(dbgs() << "Allocating tied uses.\n"); 859 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 860 MachineOperand &MO = MI.getOperand(I); 861 if (!MO.isReg()) continue; 862 Register Reg = MO.getReg(); 863 if (!Register::isVirtualRegister(Reg)) 864 continue; 865 if (MO.isUse()) { 866 if (!MO.isTied()) continue; 867 LLVM_DEBUG(dbgs() << "Operand " << I << "(" << MO 868 << ") is tied to operand " << MI.findTiedOperandIdx(I) 869 << ".\n"); 870 LiveReg &LR = reloadVirtReg(MI, I, Reg, 0); 871 MCPhysReg PhysReg = LR.PhysReg; 872 setPhysReg(MI, MO, PhysReg); 873 // Note: we don't update the def operand yet. That would cause the normal 874 // def-scan to attempt spilling. 875 } else if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) { 876 LLVM_DEBUG(dbgs() << "Partial redefine: " << MO << '\n'); 877 // Reload the register, but don't assign to the operand just yet. 878 // That would confuse the later phys-def processing pass. 879 LiveReg &LR = reloadVirtReg(MI, I, Reg, 0); 880 PartialDefs.push_back(LR.PhysReg); 881 } 882 } 883 884 LLVM_DEBUG(dbgs() << "Allocating early clobbers.\n"); 885 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) { 886 const MachineOperand &MO = MI.getOperand(I); 887 if (!MO.isReg()) continue; 888 Register Reg = MO.getReg(); 889 if (!Register::isVirtualRegister(Reg)) 890 continue; 891 if (!MO.isEarlyClobber()) 892 continue; 893 // Note: defineVirtReg may invalidate MO. 894 MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, 0); 895 if (setPhysReg(MI, MI.getOperand(I), PhysReg)) 896 VirtDead.push_back(Reg); 897 } 898 899 // Restore UsedInInstr to a state usable for allocating normal virtual uses. 900 UsedInInstr.clear(); 901 for (const MachineOperand &MO : MI.operands()) { 902 if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue; 903 Register Reg = MO.getReg(); 904 if (!Reg || !Reg.isPhysical()) 905 continue; 906 LLVM_DEBUG(dbgs() << "\tSetting " << printReg(Reg, TRI) 907 << " as used in instr\n"); 908 markRegUsedInInstr(Reg); 909 } 910 911 // Also mark PartialDefs as used to avoid reallocation. 912 for (Register PartialDef : PartialDefs) 913 markRegUsedInInstr(PartialDef); 914 } 915 916 #ifndef NDEBUG 917 918 void RegAllocFast::dumpState() const { 919 for (unsigned Unit = 1, UnitE = TRI->getNumRegUnits(); Unit != UnitE; 920 ++Unit) { 921 switch (unsigned VirtReg = RegUnitStates[Unit]) { 922 case regFree: 923 break; 924 case regReserved: 925 dbgs() << " " << printRegUnit(Unit, TRI) << "[P]"; 926 break; 927 default: { 928 dbgs() << ' ' << printRegUnit(Unit, TRI) << '=' << printReg(VirtReg); 929 LiveRegMap::const_iterator I = findLiveVirtReg(VirtReg); 930 assert(I != LiveVirtRegs.end() && "have LiveVirtRegs entry"); 931 if (I->Dirty) 932 dbgs() << "[D]"; 933 assert(TRI->hasRegUnit(I->PhysReg, Unit) && "inverse mapping present"); 934 break; 935 } 936 } 937 } 938 dbgs() << '\n'; 939 // Check that LiveVirtRegs is the inverse. 940 for (const LiveReg &LR : LiveVirtRegs) { 941 Register VirtReg = LR.VirtReg; 942 assert(VirtReg.isVirtual() && "Bad map key"); 943 MCPhysReg PhysReg = LR.PhysReg; 944 if (PhysReg != 0) { 945 assert(Register::isPhysicalRegister(PhysReg) && 946 "mapped to physreg"); 947 for (MCRegUnitIterator UI(PhysReg, TRI); UI.isValid(); ++UI) { 948 assert(RegUnitStates[*UI] == VirtReg && "inverse map valid"); 949 } 950 } 951 } 952 } 953 #endif 954 955 void RegAllocFast::allocateInstruction(MachineInstr &MI) { 956 const MCInstrDesc &MCID = MI.getDesc(); 957 958 // If this is a copy, we may be able to coalesce. 959 Register CopySrcReg; 960 Register CopyDstReg; 961 unsigned CopySrcSub = 0; 962 unsigned CopyDstSub = 0; 963 if (MI.isCopy()) { 964 CopyDstReg = MI.getOperand(0).getReg(); 965 CopySrcReg = MI.getOperand(1).getReg(); 966 CopyDstSub = MI.getOperand(0).getSubReg(); 967 CopySrcSub = MI.getOperand(1).getSubReg(); 968 } 969 970 // Track registers used by instruction. 971 UsedInInstr.clear(); 972 973 // First scan. 974 // Mark physreg uses and early clobbers as used. 975 // Find the end of the virtreg operands 976 unsigned VirtOpEnd = 0; 977 bool hasTiedOps = false; 978 bool hasEarlyClobbers = false; 979 bool hasPartialRedefs = false; 980 bool hasPhysDefs = false; 981 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 982 MachineOperand &MO = MI.getOperand(i); 983 // Make sure MRI knows about registers clobbered by regmasks. 984 if (MO.isRegMask()) { 985 MRI->addPhysRegsUsedFromRegMask(MO.getRegMask()); 986 continue; 987 } 988 if (!MO.isReg()) continue; 989 Register Reg = MO.getReg(); 990 if (!Reg) continue; 991 if (Register::isVirtualRegister(Reg)) { 992 VirtOpEnd = i+1; 993 if (MO.isUse()) { 994 hasTiedOps = hasTiedOps || 995 MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1; 996 } else { 997 if (MO.isEarlyClobber()) 998 hasEarlyClobbers = true; 999 if (MO.getSubReg() && MI.readsVirtualRegister(Reg)) 1000 hasPartialRedefs = true; 1001 } 1002 continue; 1003 } 1004 if (!MRI->isAllocatable(Reg)) continue; 1005 if (MO.isUse()) { 1006 usePhysReg(MO); 1007 } else if (MO.isEarlyClobber()) { 1008 definePhysReg(MI, Reg, 1009 (MO.isImplicit() || MO.isDead()) ? regFree : regReserved); 1010 hasEarlyClobbers = true; 1011 } else 1012 hasPhysDefs = true; 1013 } 1014 1015 // The instruction may have virtual register operands that must be allocated 1016 // the same register at use-time and def-time: early clobbers and tied 1017 // operands. If there are also physical defs, these registers must avoid 1018 // both physical defs and uses, making them more constrained than normal 1019 // operands. 1020 // Similarly, if there are multiple defs and tied operands, we must make 1021 // sure the same register is allocated to uses and defs. 1022 // We didn't detect inline asm tied operands above, so just make this extra 1023 // pass for all inline asm. 1024 if (MI.isInlineAsm() || hasEarlyClobbers || hasPartialRedefs || 1025 (hasTiedOps && (hasPhysDefs || MCID.getNumDefs() > 1))) { 1026 handleThroughOperands(MI, VirtDead); 1027 // Don't attempt coalescing when we have funny stuff going on. 1028 CopyDstReg = Register(); 1029 // Pretend we have early clobbers so the use operands get marked below. 1030 // This is not necessary for the common case of a single tied use. 1031 hasEarlyClobbers = true; 1032 } 1033 1034 // Second scan. 1035 // Allocate virtreg uses. 1036 bool HasUndefUse = false; 1037 for (unsigned I = 0; I != VirtOpEnd; ++I) { 1038 MachineOperand &MO = MI.getOperand(I); 1039 if (!MO.isReg()) continue; 1040 Register Reg = MO.getReg(); 1041 if (!Reg.isVirtual()) 1042 continue; 1043 if (MO.isUse()) { 1044 if (MO.isUndef()) { 1045 HasUndefUse = true; 1046 // There is no need to allocate a register for an undef use. 1047 continue; 1048 } 1049 1050 // Populate MayLiveAcrossBlocks in case the use block is allocated before 1051 // the def block (removing the vreg uses). 1052 mayLiveIn(Reg); 1053 1054 LiveReg &LR = reloadVirtReg(MI, I, Reg, CopyDstReg); 1055 MCPhysReg PhysReg = LR.PhysReg; 1056 CopySrcReg = (CopySrcReg == Reg || CopySrcReg == PhysReg) ? PhysReg : 0; 1057 if (setPhysReg(MI, MO, PhysReg)) 1058 killVirtReg(LR); 1059 } 1060 } 1061 1062 // Allocate undef operands. This is a separate step because in a situation 1063 // like ` = OP undef %X, %X` both operands need the same register assign 1064 // so we should perform the normal assignment first. 1065 if (HasUndefUse) { 1066 for (MachineOperand &MO : MI.uses()) { 1067 if (!MO.isReg() || !MO.isUse()) 1068 continue; 1069 Register Reg = MO.getReg(); 1070 if (!Reg.isVirtual()) 1071 continue; 1072 1073 assert(MO.isUndef() && "Should only have undef virtreg uses left"); 1074 allocVirtRegUndef(MO); 1075 } 1076 } 1077 1078 // Track registers defined by instruction - early clobbers and tied uses at 1079 // this point. 1080 UsedInInstr.clear(); 1081 if (hasEarlyClobbers) { 1082 for (const MachineOperand &MO : MI.operands()) { 1083 if (!MO.isReg()) continue; 1084 Register Reg = MO.getReg(); 1085 if (!Reg || !Reg.isPhysical()) 1086 continue; 1087 // Look for physreg defs and tied uses. 1088 if (!MO.isDef() && !MO.isTied()) continue; 1089 markRegUsedInInstr(Reg); 1090 } 1091 } 1092 1093 unsigned DefOpEnd = MI.getNumOperands(); 1094 if (MI.isCall()) { 1095 // Spill all virtregs before a call. This serves one purpose: If an 1096 // exception is thrown, the landing pad is going to expect to find 1097 // registers in their spill slots. 1098 // Note: although this is appealing to just consider all definitions 1099 // as call-clobbered, this is not correct because some of those 1100 // definitions may be used later on and we do not want to reuse 1101 // those for virtual registers in between. 1102 LLVM_DEBUG(dbgs() << " Spilling remaining registers before call.\n"); 1103 spillAll(MI, /*OnlyLiveOut*/ false); 1104 } 1105 1106 // Third scan. 1107 // Mark all physreg defs as used before allocating virtreg defs. 1108 for (unsigned I = 0; I != DefOpEnd; ++I) { 1109 const MachineOperand &MO = MI.getOperand(I); 1110 if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber()) 1111 continue; 1112 Register Reg = MO.getReg(); 1113 1114 if (!Reg || !Reg.isPhysical() || !MRI->isAllocatable(Reg)) 1115 continue; 1116 definePhysReg(MI, Reg, MO.isDead() ? regFree : regReserved); 1117 } 1118 1119 // Fourth scan. 1120 // Allocate defs and collect dead defs. 1121 for (unsigned I = 0; I != DefOpEnd; ++I) { 1122 const MachineOperand &MO = MI.getOperand(I); 1123 if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber()) 1124 continue; 1125 Register Reg = MO.getReg(); 1126 1127 // We have already dealt with phys regs in the previous scan. 1128 if (Reg.isPhysical()) 1129 continue; 1130 MCPhysReg PhysReg = defineVirtReg(MI, I, Reg, CopySrcReg); 1131 if (setPhysReg(MI, MI.getOperand(I), PhysReg)) { 1132 VirtDead.push_back(Reg); 1133 CopyDstReg = Register(); // cancel coalescing; 1134 } else 1135 CopyDstReg = (CopyDstReg == Reg || CopyDstReg == PhysReg) ? PhysReg : 0; 1136 } 1137 1138 // Kill dead defs after the scan to ensure that multiple defs of the same 1139 // register are allocated identically. We didn't need to do this for uses 1140 // because we are crerating our own kill flags, and they are always at the 1141 // last use. 1142 for (Register VirtReg : VirtDead) 1143 killVirtReg(VirtReg); 1144 VirtDead.clear(); 1145 1146 LLVM_DEBUG(dbgs() << "<< " << MI); 1147 if (CopyDstReg && CopyDstReg == CopySrcReg && CopyDstSub == CopySrcSub) { 1148 LLVM_DEBUG(dbgs() << "Mark identity copy for removal\n"); 1149 Coalesced.push_back(&MI); 1150 } 1151 } 1152 1153 void RegAllocFast::handleDebugValue(MachineInstr &MI) { 1154 MachineOperand &MO = MI.getOperand(0); 1155 1156 // Ignore DBG_VALUEs that aren't based on virtual registers. These are 1157 // mostly constants and frame indices. 1158 if (!MO.isReg()) 1159 return; 1160 Register Reg = MO.getReg(); 1161 if (!Register::isVirtualRegister(Reg)) 1162 return; 1163 1164 // See if this virtual register has already been allocated to a physical 1165 // register or spilled to a stack slot. 1166 LiveRegMap::iterator LRI = findLiveVirtReg(Reg); 1167 if (LRI != LiveVirtRegs.end() && LRI->PhysReg) { 1168 setPhysReg(MI, MO, LRI->PhysReg); 1169 } else { 1170 int SS = StackSlotForVirtReg[Reg]; 1171 if (SS != -1) { 1172 // Modify DBG_VALUE now that the value is in a spill slot. 1173 updateDbgValueForSpill(MI, SS); 1174 LLVM_DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << MI); 1175 return; 1176 } 1177 1178 // We can't allocate a physreg for a DebugValue, sorry! 1179 LLVM_DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE"); 1180 MO.setReg(Register()); 1181 } 1182 1183 // If Reg hasn't been spilled, put this DBG_VALUE in LiveDbgValueMap so 1184 // that future spills of Reg will have DBG_VALUEs. 1185 LiveDbgValueMap[Reg].push_back(&MI); 1186 } 1187 1188 void RegAllocFast::allocateBasicBlock(MachineBasicBlock &MBB) { 1189 this->MBB = &MBB; 1190 LLVM_DEBUG(dbgs() << "\nAllocating " << MBB); 1191 1192 RegUnitStates.assign(TRI->getNumRegUnits(), regFree); 1193 assert(LiveVirtRegs.empty() && "Mapping not cleared from last block?"); 1194 1195 MachineBasicBlock::iterator MII = MBB.begin(); 1196 1197 // Add live-in registers as live. 1198 for (const MachineBasicBlock::RegisterMaskPair &LI : MBB.liveins()) 1199 if (MRI->isAllocatable(LI.PhysReg)) 1200 definePhysReg(MII, LI.PhysReg, regReserved); 1201 1202 VirtDead.clear(); 1203 Coalesced.clear(); 1204 1205 // Otherwise, sequentially allocate each instruction in the MBB. 1206 for (MachineInstr &MI : MBB) { 1207 LLVM_DEBUG( 1208 dbgs() << "\n>> " << MI << "Regs:"; 1209 dumpState() 1210 ); 1211 1212 // Special handling for debug values. Note that they are not allowed to 1213 // affect codegen of the other instructions in any way. 1214 if (MI.isDebugValue()) { 1215 handleDebugValue(MI); 1216 continue; 1217 } 1218 1219 allocateInstruction(MI); 1220 } 1221 1222 // Spill all physical registers holding virtual registers now. 1223 LLVM_DEBUG(dbgs() << "Spilling live registers at end of block.\n"); 1224 spillAll(MBB.getFirstTerminator(), /*OnlyLiveOut*/ true); 1225 1226 // Erase all the coalesced copies. We are delaying it until now because 1227 // LiveVirtRegs might refer to the instrs. 1228 for (MachineInstr *MI : Coalesced) 1229 MBB.erase(MI); 1230 NumCoalesced += Coalesced.size(); 1231 1232 LLVM_DEBUG(MBB.dump()); 1233 } 1234 1235 bool RegAllocFast::runOnMachineFunction(MachineFunction &MF) { 1236 LLVM_DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n" 1237 << "********** Function: " << MF.getName() << '\n'); 1238 MRI = &MF.getRegInfo(); 1239 const TargetSubtargetInfo &STI = MF.getSubtarget(); 1240 TRI = STI.getRegisterInfo(); 1241 TII = STI.getInstrInfo(); 1242 MFI = &MF.getFrameInfo(); 1243 MRI->freezeReservedRegs(MF); 1244 RegClassInfo.runOnMachineFunction(MF); 1245 UsedInInstr.clear(); 1246 UsedInInstr.setUniverse(TRI->getNumRegUnits()); 1247 1248 // initialize the virtual->physical register map to have a 'null' 1249 // mapping for all virtual registers 1250 unsigned NumVirtRegs = MRI->getNumVirtRegs(); 1251 StackSlotForVirtReg.resize(NumVirtRegs); 1252 LiveVirtRegs.setUniverse(NumVirtRegs); 1253 MayLiveAcrossBlocks.clear(); 1254 MayLiveAcrossBlocks.resize(NumVirtRegs); 1255 1256 // Loop over all of the basic blocks, eliminating virtual register references 1257 for (MachineBasicBlock &MBB : MF) 1258 allocateBasicBlock(MBB); 1259 1260 // All machine operands and other references to virtual registers have been 1261 // replaced. Remove the virtual registers. 1262 MRI->clearVirtRegs(); 1263 1264 StackSlotForVirtReg.clear(); 1265 LiveDbgValueMap.clear(); 1266 return true; 1267 } 1268 1269 FunctionPass *llvm::createFastRegisterAllocator() { 1270 return new RegAllocFast(); 1271 } 1272