1 //===-------- InlineSpiller.cpp - Insert spills and restores inline -------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // The inline spiller modifies the machine function directly instead of 11 // inserting spills and restores in VirtRegMap. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #define DEBUG_TYPE "regalloc" 16 #include "Spiller.h" 17 #include "LiveRangeEdit.h" 18 #include "VirtRegMap.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 21 #include "llvm/CodeGen/LiveStackAnalysis.h" 22 #include "llvm/CodeGen/MachineFrameInfo.h" 23 #include "llvm/CodeGen/MachineFunction.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/Target/TargetMachine.h" 26 #include "llvm/Target/TargetInstrInfo.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/raw_ostream.h" 30 31 using namespace llvm; 32 33 static cl::opt<bool> 34 VerifySpills("verify-spills", cl::desc("Verify after each spill/split")); 35 36 namespace { 37 class InlineSpiller : public Spiller { 38 MachineFunctionPass &pass_; 39 MachineFunction &mf_; 40 LiveIntervals &lis_; 41 LiveStacks &lss_; 42 AliasAnalysis *aa_; 43 VirtRegMap &vrm_; 44 MachineFrameInfo &mfi_; 45 MachineRegisterInfo &mri_; 46 const TargetInstrInfo &tii_; 47 const TargetRegisterInfo &tri_; 48 const BitVector reserved_; 49 50 // Variables that are valid during spill(), but used by multiple methods. 51 LiveRangeEdit *edit_; 52 const TargetRegisterClass *rc_; 53 int stackSlot_; 54 55 // Values that failed to remat at some point. 56 SmallPtrSet<VNInfo*, 8> usedValues_; 57 58 ~InlineSpiller() {} 59 60 public: 61 InlineSpiller(MachineFunctionPass &pass, 62 MachineFunction &mf, 63 VirtRegMap &vrm) 64 : pass_(pass), 65 mf_(mf), 66 lis_(pass.getAnalysis<LiveIntervals>()), 67 lss_(pass.getAnalysis<LiveStacks>()), 68 aa_(&pass.getAnalysis<AliasAnalysis>()), 69 vrm_(vrm), 70 mfi_(*mf.getFrameInfo()), 71 mri_(mf.getRegInfo()), 72 tii_(*mf.getTarget().getInstrInfo()), 73 tri_(*mf.getTarget().getRegisterInfo()), 74 reserved_(tri_.getReservedRegs(mf_)) {} 75 76 void spill(LiveInterval *li, 77 SmallVectorImpl<LiveInterval*> &newIntervals, 78 const SmallVectorImpl<LiveInterval*> &spillIs); 79 80 void spill(LiveRangeEdit &); 81 82 private: 83 bool reMaterializeFor(MachineBasicBlock::iterator MI); 84 void reMaterializeAll(); 85 86 bool coalesceStackAccess(MachineInstr *MI); 87 bool foldMemoryOperand(MachineBasicBlock::iterator MI, 88 const SmallVectorImpl<unsigned> &Ops, 89 MachineInstr *LoadMI = 0); 90 void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 91 void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI); 92 }; 93 } 94 95 namespace llvm { 96 Spiller *createInlineSpiller(MachineFunctionPass &pass, 97 MachineFunction &mf, 98 VirtRegMap &vrm) { 99 if (VerifySpills) 100 mf.verify(&pass, "When creating inline spiller"); 101 return new InlineSpiller(pass, mf, vrm); 102 } 103 } 104 105 /// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of 106 /// reloading it. 107 bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) { 108 SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex(); 109 VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx); 110 111 if (!OrigVNI) { 112 DEBUG(dbgs() << "\tadding <undef> flags: "); 113 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 114 MachineOperand &MO = MI->getOperand(i); 115 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) 116 MO.setIsUndef(); 117 } 118 DEBUG(dbgs() << UseIdx << '\t' << *MI); 119 return true; 120 } 121 122 LiveRangeEdit::Remat RM(OrigVNI); 123 if (!edit_->canRematerializeAt(RM, UseIdx, false, lis_)) { 124 usedValues_.insert(OrigVNI); 125 DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI); 126 return false; 127 } 128 129 // If the instruction also writes edit_->getReg(), it had better not require 130 // the same register for uses and defs. 131 bool Reads, Writes; 132 SmallVector<unsigned, 8> Ops; 133 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops); 134 if (Writes) { 135 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 136 MachineOperand &MO = MI->getOperand(Ops[i]); 137 if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) { 138 usedValues_.insert(OrigVNI); 139 DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI); 140 return false; 141 } 142 } 143 } 144 145 // Before rematerializing into a register for a single instruction, try to 146 // fold a load into the instruction. That avoids allocating a new register. 147 if (RM.OrigMI->getDesc().canFoldAsLoad() && 148 foldMemoryOperand(MI, Ops, RM.OrigMI)) { 149 edit_->markRematerialized(RM.ParentVNI); 150 return true; 151 } 152 153 // Alocate a new register for the remat. 154 LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_); 155 NewLI.markNotSpillable(); 156 157 // Finally we can rematerialize OrigMI before MI. 158 SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM, 159 lis_, tii_, tri_); 160 DEBUG(dbgs() << "\tremat: " << DefIdx << '\n'); 161 162 // Replace operands 163 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 164 MachineOperand &MO = MI->getOperand(Ops[i]); 165 if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) { 166 MO.setReg(NewLI.reg); 167 MO.setIsKill(); 168 } 169 } 170 DEBUG(dbgs() << "\t " << UseIdx << '\t' << *MI); 171 172 VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator()); 173 NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI)); 174 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 175 return true; 176 } 177 178 /// reMaterializeAll - Try to rematerialize as many uses as possible, 179 /// and trim the live ranges after. 180 void InlineSpiller::reMaterializeAll() { 181 // Do a quick scan of the interval values to find if any are remattable. 182 if (!edit_->anyRematerializable(lis_, tii_, aa_)) 183 return; 184 185 usedValues_.clear(); 186 187 // Try to remat before all uses of edit_->getReg(). 188 bool anyRemat = false; 189 for (MachineRegisterInfo::use_nodbg_iterator 190 RI = mri_.use_nodbg_begin(edit_->getReg()); 191 MachineInstr *MI = RI.skipInstruction();) 192 anyRemat |= reMaterializeFor(MI); 193 194 if (!anyRemat) 195 return; 196 197 // Remove any values that were completely rematted. 198 bool anyRemoved = false; 199 for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(), 200 E = edit_->getParent().vni_end(); I != E; ++I) { 201 VNInfo *VNI = *I; 202 if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) || 203 usedValues_.count(VNI)) 204 continue; 205 MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def); 206 DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI); 207 lis_.RemoveMachineInstrFromMaps(DefMI); 208 vrm_.RemoveMachineInstrFromMaps(DefMI); 209 DefMI->eraseFromParent(); 210 VNI->def = SlotIndex(); 211 anyRemoved = true; 212 } 213 214 if (!anyRemoved) 215 return; 216 217 // Removing values may cause debug uses where parent is not live. 218 for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg()); 219 MachineInstr *MI = RI.skipInstruction();) { 220 if (!MI->isDebugValue()) 221 continue; 222 // Try to preserve the debug value if parent is live immediately after it. 223 MachineBasicBlock::iterator NextMI = MI; 224 ++NextMI; 225 if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) { 226 SlotIndex Idx = lis_.getInstructionIndex(NextMI); 227 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx); 228 if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI))) 229 continue; 230 } 231 DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI); 232 MI->eraseFromParent(); 233 } 234 } 235 236 /// If MI is a load or store of stackSlot_, it can be removed. 237 bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) { 238 int FI = 0; 239 unsigned reg; 240 if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) && 241 !(reg = tii_.isStoreToStackSlot(MI, FI))) 242 return false; 243 244 // We have a stack access. Is it the right register and slot? 245 if (reg != edit_->getReg() || FI != stackSlot_) 246 return false; 247 248 DEBUG(dbgs() << "Coalescing stack access: " << *MI); 249 lis_.RemoveMachineInstrFromMaps(MI); 250 MI->eraseFromParent(); 251 return true; 252 } 253 254 /// foldMemoryOperand - Try folding stack slot references in Ops into MI. 255 /// @param MI Instruction using or defining the current register. 256 /// @param Ops Operand indices from readsWritesVirtualRegister(). 257 /// @param LoadMI Load instruction to use instead of stack slot when non-null. 258 /// @return True on success, and MI will be erased. 259 bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI, 260 const SmallVectorImpl<unsigned> &Ops, 261 MachineInstr *LoadMI) { 262 // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied 263 // operands. 264 SmallVector<unsigned, 8> FoldOps; 265 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 266 unsigned Idx = Ops[i]; 267 MachineOperand &MO = MI->getOperand(Idx); 268 if (MO.isImplicit()) 269 continue; 270 // FIXME: Teach targets to deal with subregs. 271 if (MO.getSubReg()) 272 return false; 273 // Tied use operands should not be passed to foldMemoryOperand. 274 if (!MI->isRegTiedToDefOperand(Idx)) 275 FoldOps.push_back(Idx); 276 } 277 278 MachineInstr *FoldMI = 279 LoadMI ? tii_.foldMemoryOperand(MI, FoldOps, LoadMI) 280 : tii_.foldMemoryOperand(MI, FoldOps, stackSlot_); 281 if (!FoldMI) 282 return false; 283 lis_.ReplaceMachineInstrInMaps(MI, FoldMI); 284 if (!LoadMI) 285 vrm_.addSpillSlotUse(stackSlot_, FoldMI); 286 MI->eraseFromParent(); 287 DEBUG(dbgs() << "\tfolded: " << *FoldMI); 288 return true; 289 } 290 291 /// insertReload - Insert a reload of NewLI.reg before MI. 292 void InlineSpiller::insertReload(LiveInterval &NewLI, 293 MachineBasicBlock::iterator MI) { 294 MachineBasicBlock &MBB = *MI->getParent(); 295 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 296 tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_); 297 --MI; // Point to load instruction. 298 SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 299 vrm_.addSpillSlotUse(stackSlot_, MI); 300 DEBUG(dbgs() << "\treload: " << LoadIdx << '\t' << *MI); 301 VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, 302 lis_.getVNInfoAllocator()); 303 NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI)); 304 } 305 306 /// insertSpill - Insert a spill of NewLI.reg after MI. 307 void InlineSpiller::insertSpill(LiveInterval &NewLI, 308 MachineBasicBlock::iterator MI) { 309 MachineBasicBlock &MBB = *MI->getParent(); 310 311 // Get the defined value. It could be an early clobber so keep the def index. 312 SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex(); 313 VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx); 314 assert(VNI && VNI->def.getDefIndex() == Idx && "Inconsistent VNInfo"); 315 Idx = VNI->def; 316 317 tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_); 318 --MI; // Point to store instruction. 319 SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex(); 320 vrm_.addSpillSlotUse(stackSlot_, MI); 321 DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI); 322 VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator()); 323 NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI)); 324 } 325 326 void InlineSpiller::spill(LiveInterval *li, 327 SmallVectorImpl<LiveInterval*> &newIntervals, 328 const SmallVectorImpl<LiveInterval*> &spillIs) { 329 LiveRangeEdit edit(*li, newIntervals, spillIs); 330 spill(edit); 331 if (VerifySpills) 332 mf_.verify(&pass_, "After inline spill"); 333 } 334 335 void InlineSpiller::spill(LiveRangeEdit &edit) { 336 edit_ = &edit; 337 assert(!TargetRegisterInfo::isStackSlot(edit.getReg()) 338 && "Trying to spill a stack slot."); 339 DEBUG(dbgs() << "Inline spilling " 340 << mri_.getRegClass(edit.getReg())->getName() 341 << ':' << edit.getParent() << "\n"); 342 assert(edit.getParent().isSpillable() && 343 "Attempting to spill already spilled value."); 344 345 reMaterializeAll(); 346 347 // Remat may handle everything. 348 if (edit_->getParent().empty()) 349 return; 350 351 rc_ = mri_.getRegClass(edit.getReg()); 352 stackSlot_ = vrm_.assignVirt2StackSlot(edit_->getReg()); 353 354 // Update LiveStacks now that we are committed to spilling. 355 LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_); 356 assert(stacklvr.empty() && "Just created stack slot not empty"); 357 stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator()); 358 stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0)); 359 360 // Iterate over instructions using register. 361 for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg()); 362 MachineInstr *MI = RI.skipInstruction();) { 363 364 // Debug values are not allowed to affect codegen. 365 if (MI->isDebugValue()) { 366 // Modify DBG_VALUE now that the value is in a spill slot. 367 uint64_t Offset = MI->getOperand(1).getImm(); 368 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 369 DebugLoc DL = MI->getDebugLoc(); 370 if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_, 371 Offset, MDPtr, DL)) { 372 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 373 MachineBasicBlock *MBB = MI->getParent(); 374 MBB->insert(MBB->erase(MI), NewDV); 375 } else { 376 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 377 MI->eraseFromParent(); 378 } 379 continue; 380 } 381 382 // Stack slot accesses may coalesce away. 383 if (coalesceStackAccess(MI)) 384 continue; 385 386 // Analyze instruction. 387 bool Reads, Writes; 388 SmallVector<unsigned, 8> Ops; 389 tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops); 390 391 // Attempt to fold memory ops. 392 if (foldMemoryOperand(MI, Ops)) 393 continue; 394 395 // Allocate interval around instruction. 396 // FIXME: Infer regclass from instruction alone. 397 LiveInterval &NewLI = edit.create(mri_, lis_, vrm_); 398 NewLI.markNotSpillable(); 399 400 if (Reads) 401 insertReload(NewLI, MI); 402 403 // Rewrite instruction operands. 404 bool hasLiveDef = false; 405 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 406 MachineOperand &MO = MI->getOperand(Ops[i]); 407 MO.setReg(NewLI.reg); 408 if (MO.isUse()) { 409 if (!MI->isRegTiedToDefOperand(Ops[i])) 410 MO.setIsKill(); 411 } else { 412 if (!MO.isDead()) 413 hasLiveDef = true; 414 } 415 } 416 417 // FIXME: Use a second vreg if instruction has no tied ops. 418 if (Writes && hasLiveDef) 419 insertSpill(NewLI, MI); 420 421 DEBUG(dbgs() << "\tinterval: " << NewLI << '\n'); 422 } 423 } 424