1 //===-- LiveRangeShrink.cpp - Move instructions to shrink live range ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 ///===---------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This pass moves instructions close to the definition of its operands to 12 /// shrink live range of the def instruction. The code motion is limited within 13 /// the basic block. The moved instruction should have 1 def, and more than one 14 /// uses, all of which are the only use of the def. 15 /// 16 ///===---------------------------------------------------------------------===// 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/CodeGen/MachineFunctionPass.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/Passes.h" 21 #include "llvm/Support/Debug.h" 22 23 #define DEBUG_TYPE "lrshrink" 24 25 STATISTIC(NumInstrsHoistedToShrinkLiveRange, 26 "Number of insructions hoisted to shrink live range."); 27 28 using namespace llvm; 29 30 namespace { 31 class LiveRangeShrink : public MachineFunctionPass { 32 public: 33 static char ID; 34 35 LiveRangeShrink() : MachineFunctionPass(ID) { 36 initializeLiveRangeShrinkPass(*PassRegistry::getPassRegistry()); 37 } 38 39 void getAnalysisUsage(AnalysisUsage &AU) const override { 40 AU.setPreservesCFG(); 41 MachineFunctionPass::getAnalysisUsage(AU); 42 } 43 44 StringRef getPassName() const override { return "Live Range Shrink"; } 45 46 bool runOnMachineFunction(MachineFunction &MF) override; 47 }; 48 } // End anonymous namespace. 49 50 char LiveRangeShrink::ID = 0; 51 char &llvm::LiveRangeShrinkID = LiveRangeShrink::ID; 52 53 INITIALIZE_PASS(LiveRangeShrink, "lrshrink", "Live Range Shrink Pass", false, 54 false) 55 namespace { 56 typedef DenseMap<MachineInstr *, unsigned> InstOrderMap; 57 58 /// Returns \p New if it's dominated by \p Old, otherwise return \p Old. 59 /// \p M maintains a map from instruction to its dominating order that satisfies 60 /// M[A] > M[B] guarantees that A is dominated by B. 61 /// If \p New is not in \p M, return \p Old. Otherwise if \p Old is null, return 62 /// \p New. 63 MachineInstr *FindDominatedInstruction(MachineInstr &New, MachineInstr *Old, 64 const InstOrderMap &M) { 65 auto NewIter = M.find(&New); 66 if (NewIter == M.end()) 67 return Old; 68 if (Old == nullptr) 69 return &New; 70 unsigned OrderOld = M.find(Old)->second; 71 unsigned OrderNew = NewIter->second; 72 if (OrderOld != OrderNew) 73 return OrderOld < OrderNew ? &New : Old; 74 // OrderOld == OrderNew, we need to iterate down from Old to see if it 75 // can reach New, if yes, New is dominated by Old. 76 for (MachineInstr *I = Old->getNextNode(); M.find(I)->second == OrderNew; 77 I = I->getNextNode()) 78 if (I == &New) 79 return &New; 80 return Old; 81 } 82 83 /// Builds Instruction to its dominating order number map \p M by traversing 84 /// from instruction \p Start. 85 void BuildInstOrderMap(MachineBasicBlock::iterator Start, InstOrderMap &M) { 86 M.clear(); 87 unsigned i = 0; 88 for (MachineInstr &I : make_range(Start, Start->getParent()->end())) 89 M[&I] = i++; 90 } 91 } // end anonymous namespace 92 93 bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) { 94 if (skipFunction(*MF.getFunction())) 95 return false; 96 97 MachineRegisterInfo &MRI = MF.getRegInfo(); 98 99 DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n'); 100 101 InstOrderMap IOM; 102 // Map from register to instruction order (value of IOM) where the 103 // register is used last. When moving instructions up, we need to 104 // make sure all its defs (including dead def) will not cross its 105 // last use when moving up. 106 DenseMap<unsigned, std::pair<unsigned, MachineInstr *>> UseMap; 107 108 for (MachineBasicBlock &MBB : MF) { 109 if (MBB.empty()) 110 continue; 111 bool SawStore = false; 112 BuildInstOrderMap(MBB.begin(), IOM); 113 UseMap.clear(); 114 115 for (MachineBasicBlock::iterator Next = MBB.begin(); Next != MBB.end();) { 116 MachineInstr &MI = *Next; 117 ++Next; 118 if (MI.isPHI() || MI.isDebugValue()) 119 continue; 120 if (MI.mayStore()) 121 SawStore = true; 122 123 unsigned CurrentOrder = IOM[&MI]; 124 unsigned Barrier = 0; 125 MachineInstr *BarrierMI = nullptr; 126 for (const MachineOperand &MO : MI.operands()) { 127 if (!MO.isReg() || MO.isDebug()) 128 continue; 129 if (MO.isUse()) 130 UseMap[MO.getReg()] = std::make_pair(CurrentOrder, &MI); 131 else if (MO.isDead() && UseMap.count(MO.getReg())) 132 // Barrier is the last instruction where MO get used. MI should not 133 // be moved above Barrier. 134 if (Barrier < UseMap[MO.getReg()].first) { 135 Barrier = UseMap[MO.getReg()].first; 136 BarrierMI = UseMap[MO.getReg()].second; 137 } 138 } 139 140 if (!MI.isSafeToMove(nullptr, SawStore)) { 141 // If MI has side effects, it should become a barrier for code motion. 142 // IOM is rebuild from the next instruction to prevent later 143 // instructions from being moved before this MI. 144 if (MI.hasUnmodeledSideEffects() && Next != MBB.end()) { 145 BuildInstOrderMap(Next, IOM); 146 SawStore = false; 147 } 148 continue; 149 } 150 151 const MachineOperand *DefMO = nullptr; 152 MachineInstr *Insert = nullptr; 153 154 // Number of live-ranges that will be shortened. We do not count 155 // live-ranges that are defined by a COPY as it could be coalesced later. 156 unsigned NumEligibleUse = 0; 157 158 for (const MachineOperand &MO : MI.operands()) { 159 if (!MO.isReg() || MO.isDead() || MO.isDebug()) 160 continue; 161 unsigned Reg = MO.getReg(); 162 // Do not move the instruction if it def/uses a physical register, 163 // unless it is a constant physical register or a noreg. 164 if (!TargetRegisterInfo::isVirtualRegister(Reg)) { 165 if (!Reg || MRI.isConstantPhysReg(Reg)) 166 continue; 167 Insert = nullptr; 168 break; 169 } 170 if (MO.isDef()) { 171 // Do not move if there is more than one def. 172 if (DefMO) { 173 Insert = nullptr; 174 break; 175 } 176 DefMO = &MO; 177 } else if (MRI.hasOneNonDBGUse(Reg) && MRI.hasOneDef(Reg) && DefMO && 178 MRI.getRegClass(DefMO->getReg()) == 179 MRI.getRegClass(MO.getReg())) { 180 // The heuristic does not handle different register classes yet 181 // (registers of different sizes, looser/tighter constraints). This 182 // is because it needs more accurate model to handle register 183 // pressure correctly. 184 MachineInstr &DefInstr = *MRI.def_instr_begin(Reg); 185 if (!DefInstr.isCopy()) 186 NumEligibleUse++; 187 Insert = FindDominatedInstruction(DefInstr, Insert, IOM); 188 } else { 189 Insert = nullptr; 190 break; 191 } 192 } 193 194 // If Barrier equals IOM[I], traverse forward to find if BarrierMI is 195 // after Insert, if yes, then we should not hoist. 196 for (MachineInstr *I = Insert; I && IOM[I] == Barrier; 197 I = I->getNextNode()) 198 if (I == BarrierMI) { 199 Insert = nullptr; 200 break; 201 } 202 // Move the instruction when # of shrunk live range > 1. 203 if (DefMO && Insert && NumEligibleUse > 1 && Barrier <= IOM[Insert]) { 204 MachineBasicBlock::iterator I = std::next(Insert->getIterator()); 205 // Skip all the PHI and debug instructions. 206 while (I != MBB.end() && (I->isPHI() || I->isDebugValue())) 207 I = std::next(I); 208 if (I == MI.getIterator()) 209 continue; 210 211 // Update the dominator order to be the same as the insertion point. 212 // We do this to maintain a non-decreasing order without need to update 213 // all instruction orders after the insertion point. 214 unsigned NewOrder = IOM[&*I]; 215 IOM[&MI] = NewOrder; 216 NumInstrsHoistedToShrinkLiveRange++; 217 218 // Find MI's debug value following MI. 219 MachineBasicBlock::iterator EndIter = std::next(MI.getIterator()); 220 if (MI.getOperand(0).isReg()) 221 for (; EndIter != MBB.end() && EndIter->isDebugValue() && 222 EndIter->getOperand(0).isReg() && 223 EndIter->getOperand(0).getReg() == MI.getOperand(0).getReg(); 224 ++EndIter, ++Next) 225 IOM[&*EndIter] = NewOrder; 226 MBB.splice(I, &MBB, MI.getIterator(), EndIter); 227 } 228 } 229 } 230 return false; 231 } 232