1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The machine combiner pass uses machine trace metrics to ensure the combined 10 // instructions do not lengthen the critical path or the resource depth. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/DenseMap.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/Analysis/ProfileSummaryInfo.h" 16 #include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h" 17 #include "llvm/CodeGen/MachineDominators.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineFunctionPass.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/MachineSizeOpts.h" 23 #include "llvm/CodeGen/MachineTraceMetrics.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/RegisterClassInfo.h" 26 #include "llvm/CodeGen/TargetInstrInfo.h" 27 #include "llvm/CodeGen/TargetRegisterInfo.h" 28 #include "llvm/CodeGen/TargetSchedule.h" 29 #include "llvm/CodeGen/TargetSubtargetInfo.h" 30 #include "llvm/InitializePasses.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/raw_ostream.h" 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "machine-combiner" 38 39 STATISTIC(NumInstCombined, "Number of machineinst combined"); 40 41 static cl::opt<unsigned> 42 inc_threshold("machine-combiner-inc-threshold", cl::Hidden, 43 cl::desc("Incremental depth computation will be used for basic " 44 "blocks with more instructions."), cl::init(500)); 45 46 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, 47 cl::desc("Dump all substituted intrs"), 48 cl::init(false)); 49 50 #ifdef EXPENSIVE_CHECKS 51 static cl::opt<bool> VerifyPatternOrder( 52 "machine-combiner-verify-pattern-order", cl::Hidden, 53 cl::desc( 54 "Verify that the generated patterns are ordered by increasing latency"), 55 cl::init(true)); 56 #else 57 static cl::opt<bool> VerifyPatternOrder( 58 "machine-combiner-verify-pattern-order", cl::Hidden, 59 cl::desc( 60 "Verify that the generated patterns are ordered by increasing latency"), 61 cl::init(false)); 62 #endif 63 64 namespace { 65 class MachineCombiner : public MachineFunctionPass { 66 const TargetSubtargetInfo *STI; 67 const TargetInstrInfo *TII; 68 const TargetRegisterInfo *TRI; 69 MCSchedModel SchedModel; 70 MachineRegisterInfo *MRI; 71 MachineLoopInfo *MLI; // Current MachineLoopInfo 72 MachineTraceMetrics *Traces; 73 MachineTraceMetrics::Ensemble *MinInstr; 74 MachineBlockFrequencyInfo *MBFI; 75 ProfileSummaryInfo *PSI; 76 RegisterClassInfo RegClassInfo; 77 78 TargetSchedModel TSchedModel; 79 80 /// True if optimizing for code size. 81 bool OptSize; 82 83 public: 84 static char ID; 85 MachineCombiner() : MachineFunctionPass(ID) { 86 initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); 87 } 88 void getAnalysisUsage(AnalysisUsage &AU) const override; 89 bool runOnMachineFunction(MachineFunction &MF) override; 90 StringRef getPassName() const override { return "Machine InstCombiner"; } 91 92 private: 93 bool doSubstitute(unsigned NewSize, unsigned OldSize, bool OptForSize); 94 bool combineInstructions(MachineBasicBlock *); 95 MachineInstr *getOperandDef(const MachineOperand &MO); 96 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 97 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 98 MachineTraceMetrics::Trace BlockTrace); 99 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, 100 MachineTraceMetrics::Trace BlockTrace); 101 bool 102 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, 103 MachineTraceMetrics::Trace BlockTrace, 104 SmallVectorImpl<MachineInstr *> &InsInstrs, 105 SmallVectorImpl<MachineInstr *> &DelInstrs, 106 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 107 MachineCombinerPattern Pattern, bool SlackIsAccurate); 108 bool reduceRegisterPressure(MachineInstr &Root, MachineBasicBlock *MBB, 109 SmallVectorImpl<MachineInstr *> &InsInstrs, 110 SmallVectorImpl<MachineInstr *> &DelInstrs, 111 MachineCombinerPattern Pattern); 112 bool preservesResourceLen(MachineBasicBlock *MBB, 113 MachineTraceMetrics::Trace BlockTrace, 114 SmallVectorImpl<MachineInstr *> &InsInstrs, 115 SmallVectorImpl<MachineInstr *> &DelInstrs); 116 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs, 117 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC); 118 std::pair<unsigned, unsigned> 119 getLatenciesForInstrSequences(MachineInstr &MI, 120 SmallVectorImpl<MachineInstr *> &InsInstrs, 121 SmallVectorImpl<MachineInstr *> &DelInstrs, 122 MachineTraceMetrics::Trace BlockTrace); 123 124 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root, 125 SmallVector<MachineCombinerPattern, 16> &Patterns); 126 }; 127 } 128 129 char MachineCombiner::ID = 0; 130 char &llvm::MachineCombinerID = MachineCombiner::ID; 131 132 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE, 133 "Machine InstCombiner", false, false) 134 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 135 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) 136 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner", 137 false, false) 138 139 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 140 AU.setPreservesCFG(); 141 AU.addPreserved<MachineDominatorTree>(); 142 AU.addRequired<MachineLoopInfo>(); 143 AU.addPreserved<MachineLoopInfo>(); 144 AU.addRequired<MachineTraceMetrics>(); 145 AU.addPreserved<MachineTraceMetrics>(); 146 AU.addRequired<LazyMachineBlockFrequencyInfoPass>(); 147 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 148 MachineFunctionPass::getAnalysisUsage(AU); 149 } 150 151 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { 152 MachineInstr *DefInstr = nullptr; 153 // We need a virtual register definition. 154 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) 155 DefInstr = MRI->getUniqueVRegDef(MO.getReg()); 156 // PHI's have no depth etc. 157 if (DefInstr && DefInstr->isPHI()) 158 DefInstr = nullptr; 159 return DefInstr; 160 } 161 162 /// Computes depth of instructions in vector \InsInstr. 163 /// 164 /// \param InsInstrs is a vector of machine instructions 165 /// \param InstrIdxForVirtReg is a dense map of virtual register to index 166 /// of defining machine instruction in \p InsInstrs 167 /// \param BlockTrace is a trace of machine instructions 168 /// 169 /// \returns Depth of last instruction in \InsInstrs ("NewRoot") 170 unsigned 171 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 172 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 173 MachineTraceMetrics::Trace BlockTrace) { 174 SmallVector<unsigned, 16> InstrDepth; 175 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 176 "Missing machine model\n"); 177 178 // For each instruction in the new sequence compute the depth based on the 179 // operands. Use the trace information when possible. For new operands which 180 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth 181 for (auto *InstrPtr : InsInstrs) { // for each Use 182 unsigned IDepth = 0; 183 for (const MachineOperand &MO : InstrPtr->operands()) { 184 // Check for virtual register operand. 185 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 186 continue; 187 if (!MO.isUse()) 188 continue; 189 unsigned DepthOp = 0; 190 unsigned LatencyOp = 0; 191 DenseMap<unsigned, unsigned>::iterator II = 192 InstrIdxForVirtReg.find(MO.getReg()); 193 if (II != InstrIdxForVirtReg.end()) { 194 // Operand is new virtual register not in trace 195 assert(II->second < InstrDepth.size() && "Bad Index"); 196 MachineInstr *DefInstr = InsInstrs[II->second]; 197 assert(DefInstr && 198 "There must be a definition for a new virtual register"); 199 DepthOp = InstrDepth[II->second]; 200 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg()); 201 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg()); 202 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx, 203 InstrPtr, UseIdx); 204 } else { 205 MachineInstr *DefInstr = getOperandDef(MO); 206 if (DefInstr) { 207 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth; 208 LatencyOp = TSchedModel.computeOperandLatency( 209 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), 210 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); 211 } 212 } 213 IDepth = std::max(IDepth, DepthOp + LatencyOp); 214 } 215 InstrDepth.push_back(IDepth); 216 } 217 unsigned NewRootIdx = InsInstrs.size() - 1; 218 return InstrDepth[NewRootIdx]; 219 } 220 221 /// Computes instruction latency as max of latency of defined operands. 222 /// 223 /// \param Root is a machine instruction that could be replaced by NewRoot. 224 /// It is used to compute a more accurate latency information for NewRoot in 225 /// case there is a dependent instruction in the same trace (\p BlockTrace) 226 /// \param NewRoot is the instruction for which the latency is computed 227 /// \param BlockTrace is a trace of machine instructions 228 /// 229 /// \returns Latency of \p NewRoot 230 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, 231 MachineTraceMetrics::Trace BlockTrace) { 232 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 233 "Missing machine model\n"); 234 235 // Check each definition in NewRoot and compute the latency 236 unsigned NewRootLatency = 0; 237 238 for (const MachineOperand &MO : NewRoot->operands()) { 239 // Check for virtual register operand. 240 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 241 continue; 242 if (!MO.isDef()) 243 continue; 244 // Get the first instruction that uses MO 245 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); 246 RI++; 247 if (RI == MRI->reg_end()) 248 continue; 249 MachineInstr *UseMO = RI->getParent(); 250 unsigned LatencyOp = 0; 251 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) { 252 LatencyOp = TSchedModel.computeOperandLatency( 253 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, 254 UseMO->findRegisterUseOperandIdx(MO.getReg())); 255 } else { 256 LatencyOp = TSchedModel.computeInstrLatency(NewRoot); 257 } 258 NewRootLatency = std::max(NewRootLatency, LatencyOp); 259 } 260 return NewRootLatency; 261 } 262 263 /// The combiner's goal may differ based on which pattern it is attempting 264 /// to optimize. 265 enum class CombinerObjective { 266 MustReduceDepth, // The data dependency chain must be improved. 267 MustReduceRegisterPressure, // The register pressure must be reduced. 268 Default // The critical path must not be lengthened. 269 }; 270 271 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) { 272 // TODO: If C++ ever gets a real enum class, make this part of the 273 // MachineCombinerPattern class. 274 switch (P) { 275 case MachineCombinerPattern::REASSOC_AX_BY: 276 case MachineCombinerPattern::REASSOC_AX_YB: 277 case MachineCombinerPattern::REASSOC_XA_BY: 278 case MachineCombinerPattern::REASSOC_XA_YB: 279 case MachineCombinerPattern::REASSOC_XY_AMM_BMM: 280 case MachineCombinerPattern::REASSOC_XMM_AMM_BMM: 281 return CombinerObjective::MustReduceDepth; 282 default: 283 return CombinerObjective::Default; 284 } 285 } 286 287 /// Estimate the latency of the new and original instruction sequence by summing 288 /// up the latencies of the inserted and deleted instructions. This assumes 289 /// that the inserted and deleted instructions are dependent instruction chains, 290 /// which might not hold in all cases. 291 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences( 292 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs, 293 SmallVectorImpl<MachineInstr *> &DelInstrs, 294 MachineTraceMetrics::Trace BlockTrace) { 295 assert(!InsInstrs.empty() && "Only support sequences that insert instrs."); 296 unsigned NewRootLatency = 0; 297 // NewRoot is the last instruction in the \p InsInstrs vector. 298 MachineInstr *NewRoot = InsInstrs.back(); 299 for (unsigned i = 0; i < InsInstrs.size() - 1; i++) 300 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]); 301 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace); 302 303 unsigned RootLatency = 0; 304 for (auto I : DelInstrs) 305 RootLatency += TSchedModel.computeInstrLatency(I); 306 307 return {NewRootLatency, RootLatency}; 308 } 309 310 bool MachineCombiner::reduceRegisterPressure( 311 MachineInstr &Root, MachineBasicBlock *MBB, 312 SmallVectorImpl<MachineInstr *> &InsInstrs, 313 SmallVectorImpl<MachineInstr *> &DelInstrs, 314 MachineCombinerPattern Pattern) { 315 // FIXME: for now, we don't do any check for the register pressure patterns. 316 // We treat them as always profitable. But we can do better if we make 317 // RegPressureTracker class be aware of TIE attribute. Then we can get an 318 // accurate compare of register pressure with DelInstrs or InsInstrs. 319 return true; 320 } 321 322 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root. 323 /// The new code sequence ends in MI NewRoot. A necessary condition for the new 324 /// sequence to replace the old sequence is that it cannot lengthen the critical 325 /// path. The definition of "improve" may be restricted by specifying that the 326 /// new path improves the data dependency chain (MustReduceDepth). 327 bool MachineCombiner::improvesCriticalPathLen( 328 MachineBasicBlock *MBB, MachineInstr *Root, 329 MachineTraceMetrics::Trace BlockTrace, 330 SmallVectorImpl<MachineInstr *> &InsInstrs, 331 SmallVectorImpl<MachineInstr *> &DelInstrs, 332 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 333 MachineCombinerPattern Pattern, 334 bool SlackIsAccurate) { 335 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 336 "Missing machine model\n"); 337 // Get depth and latency of NewRoot and Root. 338 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); 339 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth; 340 341 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: " 342 << NewRootDepth << "\tRootDepth: " << RootDepth); 343 344 // For a transform such as reassociation, the cost equation is 345 // conservatively calculated so that we must improve the depth (data 346 // dependency cycles) in the critical path to proceed with the transform. 347 // Being conservative also protects against inaccuracies in the underlying 348 // machine trace metrics and CPU models. 349 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) { 350 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth "); 351 LLVM_DEBUG(NewRootDepth < RootDepth 352 ? dbgs() << "\t and it does it\n" 353 : dbgs() << "\t but it does NOT do it\n"); 354 return NewRootDepth < RootDepth; 355 } 356 357 // A more flexible cost calculation for the critical path includes the slack 358 // of the original code sequence. This may allow the transform to proceed 359 // even if the instruction depths (data dependency cycles) become worse. 360 361 // Account for the latency of the inserted and deleted instructions by 362 unsigned NewRootLatency, RootLatency; 363 std::tie(NewRootLatency, RootLatency) = 364 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace); 365 366 unsigned RootSlack = BlockTrace.getInstrSlack(*Root); 367 unsigned NewCycleCount = NewRootDepth + NewRootLatency; 368 unsigned OldCycleCount = 369 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0); 370 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency 371 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: " 372 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate 373 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount 374 << "\n\tRootDepth + RootLatency + RootSlack = " 375 << OldCycleCount;); 376 LLVM_DEBUG(NewCycleCount <= OldCycleCount 377 ? dbgs() << "\n\t It IMPROVES PathLen because" 378 : dbgs() << "\n\t It DOES NOT improve PathLen because"); 379 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount 380 << ", OldCycleCount = " << OldCycleCount << "\n"); 381 382 return NewCycleCount <= OldCycleCount; 383 } 384 385 /// helper routine to convert instructions into SC 386 void MachineCombiner::instr2instrSC( 387 SmallVectorImpl<MachineInstr *> &Instrs, 388 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) { 389 for (auto *InstrPtr : Instrs) { 390 unsigned Opc = InstrPtr->getOpcode(); 391 unsigned Idx = TII->get(Opc).getSchedClass(); 392 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx); 393 InstrsSC.push_back(SC); 394 } 395 } 396 397 /// True when the new instructions do not increase resource length 398 bool MachineCombiner::preservesResourceLen( 399 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, 400 SmallVectorImpl<MachineInstr *> &InsInstrs, 401 SmallVectorImpl<MachineInstr *> &DelInstrs) { 402 if (!TSchedModel.hasInstrSchedModel()) 403 return true; 404 405 // Compute current resource length 406 407 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB); 408 SmallVector <const MachineBasicBlock *, 1> MBBarr; 409 MBBarr.push_back(MBB); 410 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); 411 412 // Deal with SC rather than Instructions. 413 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC; 414 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC; 415 416 instr2instrSC(InsInstrs, InsInstrsSC); 417 instr2instrSC(DelInstrs, DelInstrsSC); 418 419 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC); 420 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC); 421 422 // Compute new resource length. 423 unsigned ResLenAfterCombine = 424 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); 425 426 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: " 427 << ResLenBeforeCombine 428 << " and after: " << ResLenAfterCombine << "\n";); 429 LLVM_DEBUG( 430 ResLenAfterCombine <= 431 ResLenBeforeCombine + TII->getExtendResourceLenLimit() 432 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n" 433 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource " 434 "Length\n"); 435 436 return ResLenAfterCombine <= 437 ResLenBeforeCombine + TII->getExtendResourceLenLimit(); 438 } 439 440 /// \returns true when new instruction sequence should be generated 441 /// independent if it lengthens critical path or not 442 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize, 443 bool OptForSize) { 444 if (OptForSize && (NewSize < OldSize)) 445 return true; 446 if (!TSchedModel.hasInstrSchedModelOrItineraries()) 447 return true; 448 return false; 449 } 450 451 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction 452 /// depths if requested. 453 /// 454 /// \param MBB basic block to insert instructions in 455 /// \param MI current machine instruction 456 /// \param InsInstrs new instructions to insert in \p MBB 457 /// \param DelInstrs instruction to delete from \p MBB 458 /// \param MinInstr is a pointer to the machine trace information 459 /// \param RegUnits set of live registers, needed to compute instruction depths 460 /// \param TII is target instruction info, used to call target hook 461 /// \param Pattern is used to call target hook finalizeInsInstrs 462 /// \param IncrementalUpdate if true, compute instruction depths incrementally, 463 /// otherwise invalidate the trace 464 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, 465 SmallVector<MachineInstr *, 16> InsInstrs, 466 SmallVector<MachineInstr *, 16> DelInstrs, 467 MachineTraceMetrics::Ensemble *MinInstr, 468 SparseSet<LiveRegUnit> &RegUnits, 469 const TargetInstrInfo *TII, 470 MachineCombinerPattern Pattern, 471 bool IncrementalUpdate) { 472 // If we want to fix up some placeholder for some target, do it now. 473 // We need this because in genAlternativeCodeSequence, we have not decided the 474 // better pattern InsInstrs or DelInstrs, so we don't want generate some 475 // sideeffect to the function. For example we need to delay the constant pool 476 // entry creation here after InsInstrs is selected as better pattern. 477 // Otherwise the constant pool entry created for InsInstrs will not be deleted 478 // even if InsInstrs is not the better pattern. 479 TII->finalizeInsInstrs(MI, Pattern, InsInstrs); 480 481 for (auto *InstrPtr : InsInstrs) 482 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr); 483 484 for (auto *InstrPtr : DelInstrs) { 485 InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval(); 486 // Erase all LiveRegs defined by the removed instruction 487 for (auto I = RegUnits.begin(); I != RegUnits.end(); ) { 488 if (I->MI == InstrPtr) 489 I = RegUnits.erase(I); 490 else 491 I++; 492 } 493 } 494 495 if (IncrementalUpdate) 496 for (auto *InstrPtr : InsInstrs) 497 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits); 498 else 499 MinInstr->invalidate(MBB); 500 501 NumInstCombined++; 502 } 503 504 // Check that the difference between original and new latency is decreasing for 505 // later patterns. This helps to discover sub-optimal pattern orderings. 506 void MachineCombiner::verifyPatternOrder( 507 MachineBasicBlock *MBB, MachineInstr &Root, 508 SmallVector<MachineCombinerPattern, 16> &Patterns) { 509 long PrevLatencyDiff = std::numeric_limits<long>::max(); 510 (void)PrevLatencyDiff; // Variable is used in assert only. 511 for (auto P : Patterns) { 512 SmallVector<MachineInstr *, 16> InsInstrs; 513 SmallVector<MachineInstr *, 16> DelInstrs; 514 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 515 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs, 516 InstrIdxForVirtReg); 517 // Found pattern, but did not generate alternative sequence. 518 // This can happen e.g. when an immediate could not be materialized 519 // in a single instruction. 520 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries()) 521 continue; 522 523 unsigned NewRootLatency, RootLatency; 524 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences( 525 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB)); 526 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency); 527 assert(CurrentLatencyDiff <= PrevLatencyDiff && 528 "Current pattern is better than previous pattern."); 529 PrevLatencyDiff = CurrentLatencyDiff; 530 } 531 } 532 533 /// Substitute a slow code sequence with a faster one by 534 /// evaluating instruction combining pattern. 535 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction 536 /// combining based on machine trace metrics. Only combine a sequence of 537 /// instructions when this neither lengthens the critical path nor increases 538 /// resource pressure. When optimizing for codesize always combine when the new 539 /// sequence is shorter. 540 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { 541 bool Changed = false; 542 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); 543 544 bool IncrementalUpdate = false; 545 auto BlockIter = MBB->begin(); 546 decltype(BlockIter) LastUpdate; 547 // Check if the block is in a loop. 548 const MachineLoop *ML = MLI->getLoopFor(MBB); 549 if (!MinInstr) 550 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); 551 552 SparseSet<LiveRegUnit> RegUnits; 553 RegUnits.setUniverse(TRI->getNumRegUnits()); 554 555 bool OptForSize = OptSize || llvm::shouldOptimizeForSize(MBB, PSI, MBFI); 556 557 bool DoRegPressureReduce = 558 TII->shouldReduceRegisterPressure(MBB, &RegClassInfo); 559 560 while (BlockIter != MBB->end()) { 561 auto &MI = *BlockIter++; 562 SmallVector<MachineCombinerPattern, 16> Patterns; 563 // The motivating example is: 564 // 565 // MUL Other MUL_op1 MUL_op2 Other 566 // \ / \ | / 567 // ADD/SUB => MADD/MSUB 568 // (=Root) (=NewRoot) 569 570 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is 571 // usually beneficial for code size it unfortunately can hurt performance 572 // when the ADD is on the critical path, but the MUL is not. With the 573 // substitution the MUL becomes part of the critical path (in form of the 574 // MADD) and can lengthen it on architectures where the MADD latency is 575 // longer than the ADD latency. 576 // 577 // For each instruction we check if it can be the root of a combiner 578 // pattern. Then for each pattern the new code sequence in form of MI is 579 // generated and evaluated. When the efficiency criteria (don't lengthen 580 // critical path, don't use more resources) is met the new sequence gets 581 // hooked up into the basic block before the old sequence is removed. 582 // 583 // The algorithm does not try to evaluate all patterns and pick the best. 584 // This is only an artificial restriction though. In practice there is 585 // mostly one pattern, and getMachineCombinerPatterns() can order patterns 586 // based on an internal cost heuristic. If 587 // machine-combiner-verify-pattern-order is enabled, all patterns are 588 // checked to ensure later patterns do not provide better latency savings. 589 590 if (!TII->getMachineCombinerPatterns(MI, Patterns, DoRegPressureReduce)) 591 continue; 592 593 if (VerifyPatternOrder) 594 verifyPatternOrder(MBB, MI, Patterns); 595 596 for (auto P : Patterns) { 597 SmallVector<MachineInstr *, 16> InsInstrs; 598 SmallVector<MachineInstr *, 16> DelInstrs; 599 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 600 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, 601 InstrIdxForVirtReg); 602 unsigned NewInstCount = InsInstrs.size(); 603 unsigned OldInstCount = DelInstrs.size(); 604 // Found pattern, but did not generate alternative sequence. 605 // This can happen e.g. when an immediate could not be materialized 606 // in a single instruction. 607 if (!NewInstCount) 608 continue; 609 610 LLVM_DEBUG(if (dump_intrs) { 611 dbgs() << "\tFor the Pattern (" << (int)P 612 << ") these instructions could be removed\n"; 613 for (auto const *InstrPtr : DelInstrs) 614 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 615 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 616 dbgs() << "\tThese instructions could replace the removed ones\n"; 617 for (auto const *InstrPtr : InsInstrs) 618 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 619 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 620 }); 621 622 bool SubstituteAlways = false; 623 if (ML && TII->isThroughputPattern(P)) 624 SubstituteAlways = true; 625 626 if (IncrementalUpdate && LastUpdate != BlockIter) { 627 // Update depths since the last incremental update. 628 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits); 629 LastUpdate = BlockIter; 630 } 631 632 if (DoRegPressureReduce && 633 getCombinerObjective(P) == 634 CombinerObjective::MustReduceRegisterPressure) { 635 if (MBB->size() > inc_threshold) { 636 // Use incremental depth updates for basic blocks above threshold 637 IncrementalUpdate = true; 638 LastUpdate = BlockIter; 639 } 640 if (reduceRegisterPressure(MI, MBB, InsInstrs, DelInstrs, P)) { 641 // Replace DelInstrs with InsInstrs. 642 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 643 RegUnits, TII, P, IncrementalUpdate); 644 Changed |= true; 645 646 // Go back to previous instruction as it may have ILP reassociation 647 // opportunity. 648 BlockIter--; 649 break; 650 } 651 } 652 653 // Substitute when we optimize for codesize and the new sequence has 654 // fewer instructions OR 655 // the new sequence neither lengthens the critical path nor increases 656 // resource pressure. 657 if (SubstituteAlways || 658 doSubstitute(NewInstCount, OldInstCount, OptForSize)) { 659 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 660 RegUnits, TII, P, IncrementalUpdate); 661 // Eagerly stop after the first pattern fires. 662 Changed = true; 663 break; 664 } else { 665 // For big basic blocks, we only compute the full trace the first time 666 // we hit this. We do not invalidate the trace, but instead update the 667 // instruction depths incrementally. 668 // NOTE: Only the instruction depths up to MI are accurate. All other 669 // trace information is not updated. 670 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); 671 Traces->verifyAnalysis(); 672 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs, 673 InstrIdxForVirtReg, P, 674 !IncrementalUpdate) && 675 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) { 676 if (MBB->size() > inc_threshold) { 677 // Use incremental depth updates for basic blocks above treshold 678 IncrementalUpdate = true; 679 LastUpdate = BlockIter; 680 } 681 682 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 683 RegUnits, TII, P, IncrementalUpdate); 684 685 // Eagerly stop after the first pattern fires. 686 Changed = true; 687 break; 688 } 689 // Cleanup instructions of the alternative code sequence. There is no 690 // use for them. 691 MachineFunction *MF = MBB->getParent(); 692 for (auto *InstrPtr : InsInstrs) 693 MF->DeleteMachineInstr(InstrPtr); 694 } 695 InstrIdxForVirtReg.clear(); 696 } 697 } 698 699 if (Changed && IncrementalUpdate) 700 Traces->invalidate(MBB); 701 return Changed; 702 } 703 704 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { 705 STI = &MF.getSubtarget(); 706 TII = STI->getInstrInfo(); 707 TRI = STI->getRegisterInfo(); 708 SchedModel = STI->getSchedModel(); 709 TSchedModel.init(STI); 710 MRI = &MF.getRegInfo(); 711 MLI = &getAnalysis<MachineLoopInfo>(); 712 Traces = &getAnalysis<MachineTraceMetrics>(); 713 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 714 MBFI = (PSI && PSI->hasProfileSummary()) ? 715 &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() : 716 nullptr; 717 MinInstr = nullptr; 718 OptSize = MF.getFunction().hasOptSize(); 719 RegClassInfo.runOnMachineFunction(MF); 720 721 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); 722 if (!TII->useMachineCombiner()) { 723 LLVM_DEBUG( 724 dbgs() 725 << " Skipping pass: Target does not support machine combiner\n"); 726 return false; 727 } 728 729 bool Changed = false; 730 731 // Try to combine instructions. 732 for (auto &MBB : MF) 733 Changed |= combineInstructions(&MBB); 734 735 return Changed; 736 } 737