1 //===- CalcSpillWeights.cpp -----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "llvm/CodeGen/CalcSpillWeights.h" 11 #include "llvm/ADT/SmallPtrSet.h" 12 #include "llvm/CodeGen/LiveInterval.h" 13 #include "llvm/CodeGen/LiveIntervals.h" 14 #include "llvm/CodeGen/MachineFunction.h" 15 #include "llvm/CodeGen/MachineInstr.h" 16 #include "llvm/CodeGen/MachineLoopInfo.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetInstrInfo.h" 20 #include "llvm/CodeGen/TargetRegisterInfo.h" 21 #include "llvm/CodeGen/TargetSubtargetInfo.h" 22 #include "llvm/CodeGen/VirtRegMap.h" 23 #include "llvm/Support/Debug.h" 24 #include "llvm/Support/raw_ostream.h" 25 #include <cassert> 26 #include <tuple> 27 28 using namespace llvm; 29 30 #define DEBUG_TYPE "calcspillweights" 31 32 void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS, 33 MachineFunction &MF, 34 VirtRegMap *VRM, 35 const MachineLoopInfo &MLI, 36 const MachineBlockFrequencyInfo &MBFI, 37 VirtRegAuxInfo::NormalizingFn norm) { 38 LLVM_DEBUG(dbgs() << "********** Compute Spill Weights **********\n" 39 << "********** Function: " << MF.getName() << '\n'); 40 41 MachineRegisterInfo &MRI = MF.getRegInfo(); 42 VirtRegAuxInfo VRAI(MF, LIS, VRM, MLI, MBFI, norm); 43 for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) { 44 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); 45 if (MRI.reg_nodbg_empty(Reg)) 46 continue; 47 VRAI.calculateSpillWeightAndHint(LIS.getInterval(Reg)); 48 } 49 } 50 51 // Return the preferred allocation register for reg, given a COPY instruction. 52 static unsigned copyHint(const MachineInstr *mi, unsigned reg, 53 const TargetRegisterInfo &tri, 54 const MachineRegisterInfo &mri) { 55 unsigned sub, hreg, hsub; 56 if (mi->getOperand(0).getReg() == reg) { 57 sub = mi->getOperand(0).getSubReg(); 58 hreg = mi->getOperand(1).getReg(); 59 hsub = mi->getOperand(1).getSubReg(); 60 } else { 61 sub = mi->getOperand(1).getSubReg(); 62 hreg = mi->getOperand(0).getReg(); 63 hsub = mi->getOperand(0).getSubReg(); 64 } 65 66 if (!hreg) 67 return 0; 68 69 if (TargetRegisterInfo::isVirtualRegister(hreg)) 70 return sub == hsub ? hreg : 0; 71 72 const TargetRegisterClass *rc = mri.getRegClass(reg); 73 unsigned CopiedPReg = (hsub ? tri.getSubReg(hreg, hsub) : hreg); 74 if (rc->contains(CopiedPReg)) 75 return CopiedPReg; 76 77 // Check if reg:sub matches so that a super register could be hinted. 78 if (sub) 79 return tri.getMatchingSuperReg(CopiedPReg, sub, rc); 80 81 return 0; 82 } 83 84 // Check if all values in LI are rematerializable 85 static bool isRematerializable(const LiveInterval &LI, 86 const LiveIntervals &LIS, 87 VirtRegMap *VRM, 88 const TargetInstrInfo &TII) { 89 unsigned Reg = LI.reg; 90 unsigned Original = VRM ? VRM->getOriginal(Reg) : 0; 91 for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end(); 92 I != E; ++I) { 93 const VNInfo *VNI = *I; 94 if (VNI->isUnused()) 95 continue; 96 if (VNI->isPHIDef()) 97 return false; 98 99 MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def); 100 assert(MI && "Dead valno in interval"); 101 102 // Trace copies introduced by live range splitting. The inline 103 // spiller can rematerialize through these copies, so the spill 104 // weight must reflect this. 105 if (VRM) { 106 while (MI->isFullCopy()) { 107 // The copy destination must match the interval register. 108 if (MI->getOperand(0).getReg() != Reg) 109 return false; 110 111 // Get the source register. 112 Reg = MI->getOperand(1).getReg(); 113 114 // If the original (pre-splitting) registers match this 115 // copy came from a split. 116 if (!TargetRegisterInfo::isVirtualRegister(Reg) || 117 VRM->getOriginal(Reg) != Original) 118 return false; 119 120 // Follow the copy live-in value. 121 const LiveInterval &SrcLI = LIS.getInterval(Reg); 122 LiveQueryResult SrcQ = SrcLI.Query(VNI->def); 123 VNI = SrcQ.valueIn(); 124 assert(VNI && "Copy from non-existing value"); 125 if (VNI->isPHIDef()) 126 return false; 127 MI = LIS.getInstructionFromIndex(VNI->def); 128 assert(MI && "Dead valno in interval"); 129 } 130 } 131 132 if (!TII.isTriviallyReMaterializable(*MI, LIS.getAliasAnalysis())) 133 return false; 134 } 135 return true; 136 } 137 138 void VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) { 139 float weight = weightCalcHelper(li); 140 // Check if unspillable. 141 if (weight < 0) 142 return; 143 li.weight = weight; 144 } 145 146 float VirtRegAuxInfo::futureWeight(LiveInterval &li, SlotIndex start, 147 SlotIndex end) { 148 return weightCalcHelper(li, &start, &end); 149 } 150 151 float VirtRegAuxInfo::weightCalcHelper(LiveInterval &li, SlotIndex *start, 152 SlotIndex *end) { 153 MachineRegisterInfo &mri = MF.getRegInfo(); 154 const TargetRegisterInfo &tri = *MF.getSubtarget().getRegisterInfo(); 155 MachineBasicBlock *mbb = nullptr; 156 MachineLoop *loop = nullptr; 157 bool isExiting = false; 158 float totalWeight = 0; 159 unsigned numInstr = 0; // Number of instructions using li 160 SmallPtrSet<MachineInstr*, 8> visited; 161 162 std::pair<unsigned, unsigned> TargetHint = mri.getRegAllocationHint(li.reg); 163 164 // Don't recompute spill weight for an unspillable register. 165 bool Spillable = li.isSpillable(); 166 167 bool localSplitArtifact = start && end; 168 169 // Do not update future local split artifacts. 170 bool updateLI = !localSplitArtifact; 171 172 if (localSplitArtifact) { 173 MachineBasicBlock *localMBB = LIS.getMBBFromIndex(*end); 174 assert(localMBB == LIS.getMBBFromIndex(*start) && 175 "start and end are expected to be in the same basic block"); 176 177 // Local split artifact will have 2 additional copy instructions and they 178 // will be in the same BB. 179 // localLI = COPY other 180 // ... 181 // other = COPY localLI 182 totalWeight += LiveIntervals::getSpillWeight(true, false, &MBFI, localMBB); 183 totalWeight += LiveIntervals::getSpillWeight(false, true, &MBFI, localMBB); 184 185 numInstr += 2; 186 } 187 188 // CopyHint is a sortable hint derived from a COPY instruction. 189 struct CopyHint { 190 unsigned Reg; 191 float Weight; 192 bool IsPhys; 193 CopyHint(unsigned R, float W, bool P) : 194 Reg(R), Weight(W), IsPhys(P) {} 195 bool operator<(const CopyHint &rhs) const { 196 // Always prefer any physreg hint. 197 if (IsPhys != rhs.IsPhys) 198 return (IsPhys && !rhs.IsPhys); 199 if (Weight != rhs.Weight) 200 return (Weight > rhs.Weight); 201 return Reg < rhs.Reg; // Tie-breaker. 202 } 203 }; 204 std::set<CopyHint> CopyHints; 205 206 for (MachineRegisterInfo::reg_instr_iterator 207 I = mri.reg_instr_begin(li.reg), E = mri.reg_instr_end(); 208 I != E; ) { 209 MachineInstr *mi = &*(I++); 210 211 // For local split artifacts, we are interested only in instructions between 212 // the expected start and end of the range. 213 SlotIndex si = LIS.getInstructionIndex(*mi); 214 if (localSplitArtifact && ((si < *start) || (si > *end))) 215 continue; 216 217 numInstr++; 218 if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugInstr()) 219 continue; 220 if (!visited.insert(mi).second) 221 continue; 222 223 float weight = 1.0f; 224 if (Spillable) { 225 // Get loop info for mi. 226 if (mi->getParent() != mbb) { 227 mbb = mi->getParent(); 228 loop = Loops.getLoopFor(mbb); 229 isExiting = loop ? loop->isLoopExiting(mbb) : false; 230 } 231 232 // Calculate instr weight. 233 bool reads, writes; 234 std::tie(reads, writes) = mi->readsWritesVirtualRegister(li.reg); 235 weight = LiveIntervals::getSpillWeight(writes, reads, &MBFI, *mi); 236 237 // Give extra weight to what looks like a loop induction variable update. 238 if (writes && isExiting && LIS.isLiveOutOfMBB(li, mbb)) 239 weight *= 3; 240 241 totalWeight += weight; 242 } 243 244 // Get allocation hints from copies. 245 if (!mi->isCopy()) 246 continue; 247 unsigned hint = copyHint(mi, li.reg, tri, mri); 248 if (!hint) 249 continue; 250 // Force hweight onto the stack so that x86 doesn't add hidden precision, 251 // making the comparison incorrectly pass (i.e., 1 > 1 == true??). 252 // 253 // FIXME: we probably shouldn't use floats at all. 254 volatile float hweight = Hint[hint] += weight; 255 if (TargetRegisterInfo::isVirtualRegister(hint) || mri.isAllocatable(hint)) 256 CopyHints.insert(CopyHint(hint, hweight, tri.isPhysicalRegister(hint))); 257 } 258 259 Hint.clear(); 260 261 // Pass all the sorted copy hints to mri. 262 if (updateLI && CopyHints.size()) { 263 // Remove a generic hint if previously added by target. 264 if (TargetHint.first == 0 && TargetHint.second) 265 mri.clearSimpleHint(li.reg); 266 267 std::set<unsigned> HintedRegs; 268 for (auto &Hint : CopyHints) { 269 if (!HintedRegs.insert(Hint.Reg).second || 270 (TargetHint.first != 0 && Hint.Reg == TargetHint.second)) 271 // Don't add the same reg twice or the target-type hint again. 272 continue; 273 mri.addRegAllocationHint(li.reg, Hint.Reg); 274 } 275 276 // Weakly boost the spill weight of hinted registers. 277 totalWeight *= 1.01F; 278 } 279 280 // If the live interval was already unspillable, leave it that way. 281 if (!Spillable) 282 return -1.0; 283 284 // Mark li as unspillable if all live ranges are tiny and the interval 285 // is not live at any reg mask. If the interval is live at a reg mask 286 // spilling may be required. 287 if (updateLI && li.isZeroLength(LIS.getSlotIndexes()) && 288 !li.isLiveAtIndexes(LIS.getRegMaskSlots())) { 289 li.markNotSpillable(); 290 return -1.0; 291 } 292 293 // If all of the definitions of the interval are re-materializable, 294 // it is a preferred candidate for spilling. 295 // FIXME: this gets much more complicated once we support non-trivial 296 // re-materialization. 297 if (isRematerializable(li, LIS, VRM, *MF.getSubtarget().getInstrInfo())) 298 totalWeight *= 0.5F; 299 300 if (localSplitArtifact) 301 return normalize(totalWeight, start->distance(*end), numInstr); 302 return normalize(totalWeight, li.getSize(), numInstr); 303 } 304