1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs loop invariant code motion, attempting to remove as much 10 // code from the body of a loop as possible. It does this by either hoisting 11 // code into the preheader block, or by sinking code to the exit blocks if it is 12 // safe. This pass also promotes must-aliased memory locations in the loop to 13 // live in registers, thus hoisting and sinking "invariant" loads and stores. 14 // 15 // Hoisting operations out of loops is a canonicalization transform. It 16 // enables and simplifies subsequent optimizations in the middle-end. 17 // Rematerialization of hoisted instructions to reduce register pressure is the 18 // responsibility of the back-end, which has more accurate information about 19 // register pressure and also handles other optimizations than LICM that 20 // increase live-ranges. 21 // 22 // This pass uses alias analysis for two purposes: 23 // 24 // 1. Moving loop invariant loads and calls out of loops. If we can determine 25 // that a load or call inside of a loop never aliases anything stored to, 26 // we can hoist it or sink it like any other instruction. 27 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 28 // the loop, we try to move the store to happen AFTER the loop instead of 29 // inside of the loop. This can only happen if a few conditions are true: 30 // A. The pointer stored through is loop invariant 31 // B. There are no stores or loads in the loop which _may_ alias the 32 // pointer. There are no calls in the loop which mod/ref the pointer. 33 // If these conditions are true, we can promote the loads and stores in the 34 // loop of the pointer to use a temporary alloca'd variable. We then use 35 // the SSAUpdater to construct the appropriate SSA form for the value. 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LICM.h" 40 #include "llvm/ADT/PriorityWorklist.h" 41 #include "llvm/ADT/SetOperations.h" 42 #include "llvm/ADT/Statistic.h" 43 #include "llvm/Analysis/AliasAnalysis.h" 44 #include "llvm/Analysis/AliasSetTracker.h" 45 #include "llvm/Analysis/CaptureTracking.h" 46 #include "llvm/Analysis/ConstantFolding.h" 47 #include "llvm/Analysis/GuardUtils.h" 48 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 49 #include "llvm/Analysis/Loads.h" 50 #include "llvm/Analysis/LoopInfo.h" 51 #include "llvm/Analysis/LoopIterator.h" 52 #include "llvm/Analysis/LoopNestAnalysis.h" 53 #include "llvm/Analysis/LoopPass.h" 54 #include "llvm/Analysis/MemorySSA.h" 55 #include "llvm/Analysis/MemorySSAUpdater.h" 56 #include "llvm/Analysis/MustExecute.h" 57 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 58 #include "llvm/Analysis/ScalarEvolution.h" 59 #include "llvm/Analysis/TargetLibraryInfo.h" 60 #include "llvm/Analysis/TargetTransformInfo.h" 61 #include "llvm/Analysis/ValueTracking.h" 62 #include "llvm/IR/CFG.h" 63 #include "llvm/IR/Constants.h" 64 #include "llvm/IR/DataLayout.h" 65 #include "llvm/IR/DebugInfoMetadata.h" 66 #include "llvm/IR/DerivedTypes.h" 67 #include "llvm/IR/Dominators.h" 68 #include "llvm/IR/Instructions.h" 69 #include "llvm/IR/IntrinsicInst.h" 70 #include "llvm/IR/LLVMContext.h" 71 #include "llvm/IR/Metadata.h" 72 #include "llvm/IR/PatternMatch.h" 73 #include "llvm/IR/PredIteratorCache.h" 74 #include "llvm/InitializePasses.h" 75 #include "llvm/Support/CommandLine.h" 76 #include "llvm/Support/Debug.h" 77 #include "llvm/Support/raw_ostream.h" 78 #include "llvm/Transforms/Scalar.h" 79 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 80 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 81 #include "llvm/Transforms/Utils/Local.h" 82 #include "llvm/Transforms/Utils/LoopUtils.h" 83 #include "llvm/Transforms/Utils/SSAUpdater.h" 84 #include <algorithm> 85 #include <utility> 86 using namespace llvm; 87 88 namespace llvm { 89 class BlockFrequencyInfo; 90 class LPMUpdater; 91 } // namespace llvm 92 93 #define DEBUG_TYPE "licm" 94 95 STATISTIC(NumCreatedBlocks, "Number of blocks created"); 96 STATISTIC(NumClonedBranches, "Number of branches cloned"); 97 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 98 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 99 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 100 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 101 STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); 102 103 /// Memory promotion is enabled by default. 104 static cl::opt<bool> 105 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), 106 cl::desc("Disable memory promotion in LICM pass")); 107 108 static cl::opt<bool> ControlFlowHoisting( 109 "licm-control-flow-hoisting", cl::Hidden, cl::init(false), 110 cl::desc("Enable control flow (and PHI) hoisting in LICM")); 111 112 static cl::opt<uint32_t> MaxNumUsesTraversed( 113 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), 114 cl::desc("Max num uses visited for identifying load " 115 "invariance in loop using invariant start (default = 8)")); 116 117 // Experimental option to allow imprecision in LICM in pathological cases, in 118 // exchange for faster compile. This is to be removed if MemorySSA starts to 119 // address the same issue. This flag applies only when LICM uses MemorySSA 120 // instead on AliasSetTracker. LICM calls MemorySSAWalker's 121 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect 122 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, 123 // which may not be precise, since optimizeUses is capped. The result is 124 // correct, but we may not get as "far up" as possible to get which access is 125 // clobbering the one queried. 126 cl::opt<unsigned> llvm::SetLicmMssaOptCap( 127 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, 128 cl::desc("Enable imprecision in LICM in pathological cases, in exchange " 129 "for faster compile. Caps the MemorySSA clobbering calls.")); 130 131 // Experimentally, memory promotion carries less importance than sinking and 132 // hoisting. Limit when we do promotion when using MemorySSA, in order to save 133 // compile time. 134 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( 135 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, 136 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " 137 "effect. When MSSA in LICM is enabled, then this is the maximum " 138 "number of accesses allowed to be present in a loop in order to " 139 "enable memory promotion.")); 140 141 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 142 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 143 const LoopSafetyInfo *SafetyInfo, 144 TargetTransformInfo *TTI, bool &FreeInLoop, 145 bool LoopNestMode); 146 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 147 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 148 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 149 OptimizationRemarkEmitter *ORE); 150 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 151 BlockFrequencyInfo *BFI, const Loop *CurLoop, 152 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 153 OptimizationRemarkEmitter *ORE); 154 static bool isSafeToExecuteUnconditionally( 155 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, 156 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, 157 OptimizationRemarkEmitter *ORE, const Instruction *CtxI, 158 bool AllowSpeculation); 159 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 160 AliasSetTracker *CurAST, Loop *CurLoop, 161 AAResults *AA); 162 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 163 Loop *CurLoop, Instruction &I, 164 SinkAndHoistLICMFlags &Flags); 165 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA, 166 MemoryUse &MU); 167 static Instruction *cloneInstructionInExitBlock( 168 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 169 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); 170 171 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 172 MemorySSAUpdater *MSSAU); 173 174 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 175 ICFLoopSafetyInfo &SafetyInfo, 176 MemorySSAUpdater *MSSAU, ScalarEvolution *SE); 177 178 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 179 function_ref<void(Instruction *)> Fn); 180 static SmallVector<SmallSetVector<Value *, 8>, 0> 181 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L); 182 183 namespace { 184 struct LoopInvariantCodeMotion { 185 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 186 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, 187 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA, 188 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false); 189 190 LoopInvariantCodeMotion(unsigned LicmMssaOptCap, 191 unsigned LicmMssaNoAccForPromotionCap, 192 bool LicmAllowSpeculation) 193 : LicmMssaOptCap(LicmMssaOptCap), 194 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 195 LicmAllowSpeculation(LicmAllowSpeculation) {} 196 197 private: 198 unsigned LicmMssaOptCap; 199 unsigned LicmMssaNoAccForPromotionCap; 200 bool LicmAllowSpeculation; 201 }; 202 203 struct LegacyLICMPass : public LoopPass { 204 static char ID; // Pass identification, replacement for typeid 205 LegacyLICMPass( 206 unsigned LicmMssaOptCap = SetLicmMssaOptCap, 207 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap, 208 bool LicmAllowSpeculation = true) 209 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 210 LicmAllowSpeculation) { 211 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 212 } 213 214 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 215 if (skipLoop(L)) 216 return false; 217 218 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block " 219 << L->getHeader()->getNameOrAsOperand() << "\n"); 220 221 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 222 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA(); 223 bool hasProfileData = L->getHeader()->getParent()->hasProfileData(); 224 BlockFrequencyInfo *BFI = 225 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() 226 : nullptr; 227 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 228 // pass. Function analyses need to be preserved across loop transformations 229 // but ORE cannot be preserved (see comment before the pass definition). 230 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 231 return LICM.runOnLoop( 232 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), 233 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 234 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI, 235 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 236 *L->getHeader()->getParent()), 237 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 238 *L->getHeader()->getParent()), 239 SE ? &SE->getSE() : nullptr, MSSA, &ORE); 240 } 241 242 /// This transformation requires natural loop information & requires that 243 /// loop preheaders be inserted into the CFG... 244 /// 245 void getAnalysisUsage(AnalysisUsage &AU) const override { 246 AU.addPreserved<DominatorTreeWrapperPass>(); 247 AU.addPreserved<LoopInfoWrapperPass>(); 248 AU.addRequired<TargetLibraryInfoWrapperPass>(); 249 AU.addRequired<MemorySSAWrapperPass>(); 250 AU.addPreserved<MemorySSAWrapperPass>(); 251 AU.addRequired<TargetTransformInfoWrapperPass>(); 252 getLoopAnalysisUsage(AU); 253 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 254 AU.addPreserved<LazyBlockFrequencyInfoPass>(); 255 AU.addPreserved<LazyBranchProbabilityInfoPass>(); 256 } 257 258 private: 259 LoopInvariantCodeMotion LICM; 260 }; 261 } // namespace 262 263 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, 264 LoopStandardAnalysisResults &AR, LPMUpdater &) { 265 if (!AR.MSSA) 266 report_fatal_error("LICM requires MemorySSA (loop-mssa)"); 267 268 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 269 // pass. Function analyses need to be preserved across loop transformations 270 // but ORE cannot be preserved (see comment before the pass definition). 271 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 272 273 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap, 274 Opts.AllowSpeculation); 275 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI, 276 &AR.SE, AR.MSSA, &ORE)) 277 return PreservedAnalyses::all(); 278 279 auto PA = getLoopPassPreservedAnalyses(); 280 281 PA.preserve<DominatorTreeAnalysis>(); 282 PA.preserve<LoopAnalysis>(); 283 PA.preserve<MemorySSAAnalysis>(); 284 285 return PA; 286 } 287 288 void LICMPass::printPipeline( 289 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 290 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline( 291 OS, MapClassName2PassName); 292 293 OS << "<"; 294 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation"; 295 OS << ">"; 296 } 297 298 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM, 299 LoopStandardAnalysisResults &AR, 300 LPMUpdater &) { 301 if (!AR.MSSA) 302 report_fatal_error("LNICM requires MemorySSA (loop-mssa)"); 303 304 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 305 // pass. Function analyses need to be preserved across loop transformations 306 // but ORE cannot be preserved (see comment before the pass definition). 307 OptimizationRemarkEmitter ORE(LN.getParent()); 308 309 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap, 310 Opts.AllowSpeculation); 311 312 Loop &OutermostLoop = LN.getOutermostLoop(); 313 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI, 314 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true); 315 316 if (!Changed) 317 return PreservedAnalyses::all(); 318 319 auto PA = getLoopPassPreservedAnalyses(); 320 321 PA.preserve<DominatorTreeAnalysis>(); 322 PA.preserve<LoopAnalysis>(); 323 PA.preserve<MemorySSAAnalysis>(); 324 325 return PA; 326 } 327 328 void LNICMPass::printPipeline( 329 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) { 330 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline( 331 OS, MapClassName2PassName); 332 333 OS << "<"; 334 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation"; 335 OS << ">"; 336 } 337 338 char LegacyLICMPass::ID = 0; 339 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 340 false, false) 341 INITIALIZE_PASS_DEPENDENCY(LoopPass) 342 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 343 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 344 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 345 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass) 346 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 347 false) 348 349 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 350 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, 351 unsigned LicmMssaNoAccForPromotionCap, 352 bool LicmAllowSpeculation) { 353 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 354 LicmAllowSpeculation); 355 } 356 357 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L, 358 MemorySSA *MSSA) 359 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap, 360 IsSink, L, MSSA) {} 361 362 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags( 363 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, 364 Loop *L, MemorySSA *MSSA) 365 : LicmMssaOptCap(LicmMssaOptCap), 366 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 367 IsSink(IsSink) { 368 assert(((L != nullptr) == (MSSA != nullptr)) && 369 "Unexpected values for SinkAndHoistLICMFlags"); 370 if (!MSSA) 371 return; 372 373 unsigned AccessCapCount = 0; 374 for (auto *BB : L->getBlocks()) 375 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 376 for (const auto &MA : *Accesses) { 377 (void)MA; 378 ++AccessCapCount; 379 if (AccessCapCount > LicmMssaNoAccForPromotionCap) { 380 NoOfMemAccTooLarge = true; 381 return; 382 } 383 } 384 } 385 386 /// Hoist expressions out of the specified loop. Note, alias info for inner 387 /// loop is not preserved so it is not a good idea to run LICM multiple 388 /// times on one loop. 389 bool LoopInvariantCodeMotion::runOnLoop( 390 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 391 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 392 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, 393 bool LoopNestMode) { 394 bool Changed = false; 395 396 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 397 MSSA->ensureOptimizedUses(); 398 399 // If this loop has metadata indicating that LICM is not to be performed then 400 // just exit. 401 if (hasDisableLICMTransformsHint(L)) { 402 return false; 403 } 404 405 // Don't sink stores from loops with coroutine suspend instructions. 406 // LICM would sink instructions into the default destination of 407 // the coroutine switch. The default destination of the switch is to 408 // handle the case where the coroutine is suspended, by which point the 409 // coroutine frame may have been destroyed. No instruction can be sunk there. 410 // FIXME: This would unfortunately hurt the performance of coroutines, however 411 // there is currently no general solution for this. Similar issues could also 412 // potentially happen in other passes where instructions are being moved 413 // across that edge. 414 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) { 415 return llvm::any_of(*BB, [](Instruction &I) { 416 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 417 return II && II->getIntrinsicID() == Intrinsic::coro_suspend; 418 }); 419 }); 420 421 MemorySSAUpdater MSSAU(MSSA); 422 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 423 /*IsSink=*/true, L, MSSA); 424 425 // Get the preheader block to move instructions into... 426 BasicBlock *Preheader = L->getLoopPreheader(); 427 428 // Compute loop safety information. 429 ICFLoopSafetyInfo SafetyInfo; 430 SafetyInfo.computeLoopSafetyInfo(L); 431 432 // We want to visit all of the instructions in this loop... that are not parts 433 // of our subloops (they have already had their invariants hoisted out of 434 // their loop, into this loop, so there is no need to process the BODIES of 435 // the subloops). 436 // 437 // Traverse the body of the loop in depth first order on the dominator tree so 438 // that we are guaranteed to see definitions before we see uses. This allows 439 // us to sink instructions in one pass, without iteration. After sinking 440 // instructions, we perform another pass to hoist them out of the loop. 441 if (L->hasDedicatedExits()) 442 Changed |= LoopNestMode 443 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, 444 DT, BFI, TLI, TTI, L, &MSSAU, 445 &SafetyInfo, Flags, ORE) 446 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, 447 TLI, TTI, L, &MSSAU, &SafetyInfo, Flags, ORE); 448 Flags.setIsSink(false); 449 if (Preheader) 450 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L, 451 &MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode, 452 LicmAllowSpeculation); 453 454 // Now that all loop invariants have been removed from the loop, promote any 455 // memory references to scalars that we can. 456 // Don't sink stores from loops without dedicated block exits. Exits 457 // containing indirect branches are not transformed by loop simplify, 458 // make sure we catch that. An additional load may be generated in the 459 // preheader for SSA updater, so also avoid sinking when no preheader 460 // is available. 461 if (!DisablePromotion && Preheader && L->hasDedicatedExits() && 462 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) { 463 // Figure out the loop exits and their insertion points 464 SmallVector<BasicBlock *, 8> ExitBlocks; 465 L->getUniqueExitBlocks(ExitBlocks); 466 467 // We can't insert into a catchswitch. 468 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { 469 return isa<CatchSwitchInst>(Exit->getTerminator()); 470 }); 471 472 if (!HasCatchSwitch) { 473 SmallVector<Instruction *, 8> InsertPts; 474 SmallVector<MemoryAccess *, 8> MSSAInsertPts; 475 InsertPts.reserve(ExitBlocks.size()); 476 MSSAInsertPts.reserve(ExitBlocks.size()); 477 for (BasicBlock *ExitBlock : ExitBlocks) { 478 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 479 MSSAInsertPts.push_back(nullptr); 480 } 481 482 PredIteratorCache PIC; 483 484 // Promoting one set of accesses may make the pointers for another set 485 // loop invariant, so run this in a loop. 486 bool Promoted = false; 487 bool LocalPromoted; 488 do { 489 LocalPromoted = false; 490 for (const SmallSetVector<Value *, 8> &PointerMustAliases : 491 collectPromotionCandidates(MSSA, AA, L)) { 492 LocalPromoted |= promoteLoopAccessesToScalars( 493 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, 494 DT, TLI, L, &MSSAU, &SafetyInfo, ORE, LicmAllowSpeculation); 495 } 496 Promoted |= LocalPromoted; 497 } while (LocalPromoted); 498 499 // Once we have promoted values across the loop body we have to 500 // recursively reform LCSSA as any nested loop may now have values defined 501 // within the loop used in the outer loop. 502 // FIXME: This is really heavy handed. It would be a bit better to use an 503 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 504 // it as it went. 505 if (Promoted) 506 formLCSSARecursively(*L, *DT, LI, SE); 507 508 Changed |= Promoted; 509 } 510 } 511 512 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 513 // specifically moving instructions across the loop boundary and so it is 514 // especially in need of basic functional correctness checking here. 515 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 516 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && 517 "Parent loop not left in LCSSA form after LICM!"); 518 519 if (VerifyMemorySSA) 520 MSSA->verifyMemorySSA(); 521 522 if (Changed && SE) 523 SE->forgetLoopDispositions(L); 524 return Changed; 525 } 526 527 /// Walk the specified region of the CFG (defined by all blocks dominated by 528 /// the specified block, and that are in the current loop) in reverse depth 529 /// first order w.r.t the DominatorTree. This allows us to visit uses before 530 /// definitions, allowing us to sink a loop body in one pass without iteration. 531 /// 532 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 533 DominatorTree *DT, BlockFrequencyInfo *BFI, 534 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 535 Loop *CurLoop, MemorySSAUpdater *MSSAU, 536 ICFLoopSafetyInfo *SafetyInfo, 537 SinkAndHoistLICMFlags &Flags, 538 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) { 539 540 // Verify inputs. 541 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 542 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && 543 "Unexpected input to sinkRegion."); 544 545 // We want to visit children before parents. We will enque all the parents 546 // before their children in the worklist and process the worklist in reverse 547 // order. 548 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); 549 550 bool Changed = false; 551 for (DomTreeNode *DTN : reverse(Worklist)) { 552 BasicBlock *BB = DTN->getBlock(); 553 // Only need to process the contents of this block if it is not part of a 554 // subloop (which would already have been processed). 555 if (inSubLoop(BB, CurLoop, LI)) 556 continue; 557 558 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 559 Instruction &I = *--II; 560 561 // The instruction is not used in the loop if it is dead. In this case, 562 // we just delete it instead of sinking it. 563 if (isInstructionTriviallyDead(&I, TLI)) { 564 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 565 salvageKnowledge(&I); 566 salvageDebugInfo(I); 567 ++II; 568 eraseInstruction(I, *SafetyInfo, MSSAU); 569 Changed = true; 570 continue; 571 } 572 573 // Check to see if we can sink this instruction to the exit blocks 574 // of the loop. We can do this if the all users of the instruction are 575 // outside of the loop. In this case, it doesn't even matter if the 576 // operands of the instruction are loop invariant. 577 // 578 bool FreeInLoop = false; 579 bool LoopNestMode = OutermostLoop != nullptr; 580 if (!I.mayHaveSideEffects() && 581 isNotUsedOrFreeInLoop(I, LoopNestMode ? OutermostLoop : CurLoop, 582 SafetyInfo, TTI, FreeInLoop, LoopNestMode) && 583 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/nullptr, MSSAU, true, 584 &Flags, ORE)) { 585 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) { 586 if (!FreeInLoop) { 587 ++II; 588 salvageDebugInfo(I); 589 eraseInstruction(I, *SafetyInfo, MSSAU); 590 } 591 Changed = true; 592 } 593 } 594 } 595 } 596 if (VerifyMemorySSA) 597 MSSAU->getMemorySSA()->verifyMemorySSA(); 598 return Changed; 599 } 600 601 bool llvm::sinkRegionForLoopNest( 602 DomTreeNode *N, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 603 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 604 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, 605 SinkAndHoistLICMFlags &Flags, OptimizationRemarkEmitter *ORE) { 606 607 bool Changed = false; 608 SmallPriorityWorklist<Loop *, 4> Worklist; 609 Worklist.insert(CurLoop); 610 appendLoopsToWorklist(*CurLoop, Worklist); 611 while (!Worklist.empty()) { 612 Loop *L = Worklist.pop_back_val(); 613 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, 614 TTI, L, MSSAU, SafetyInfo, Flags, ORE, CurLoop); 615 } 616 return Changed; 617 } 618 619 namespace { 620 // This is a helper class for hoistRegion to make it able to hoist control flow 621 // in order to be able to hoist phis. The way this works is that we initially 622 // start hoisting to the loop preheader, and when we see a loop invariant branch 623 // we make note of this. When we then come to hoist an instruction that's 624 // conditional on such a branch we duplicate the branch and the relevant control 625 // flow, then hoist the instruction into the block corresponding to its original 626 // block in the duplicated control flow. 627 class ControlFlowHoister { 628 private: 629 // Information about the loop we are hoisting from 630 LoopInfo *LI; 631 DominatorTree *DT; 632 Loop *CurLoop; 633 MemorySSAUpdater *MSSAU; 634 635 // A map of blocks in the loop to the block their instructions will be hoisted 636 // to. 637 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; 638 639 // The branches that we can hoist, mapped to the block that marks a 640 // convergence point of their control flow. 641 DenseMap<BranchInst *, BasicBlock *> HoistableBranches; 642 643 public: 644 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, 645 MemorySSAUpdater *MSSAU) 646 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} 647 648 void registerPossiblyHoistableBranch(BranchInst *BI) { 649 // We can only hoist conditional branches with loop invariant operands. 650 if (!ControlFlowHoisting || !BI->isConditional() || 651 !CurLoop->hasLoopInvariantOperands(BI)) 652 return; 653 654 // The branch destinations need to be in the loop, and we don't gain 655 // anything by duplicating conditional branches with duplicate successors, 656 // as it's essentially the same as an unconditional branch. 657 BasicBlock *TrueDest = BI->getSuccessor(0); 658 BasicBlock *FalseDest = BI->getSuccessor(1); 659 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || 660 TrueDest == FalseDest) 661 return; 662 663 // We can hoist BI if one branch destination is the successor of the other, 664 // or both have common successor which we check by seeing if the 665 // intersection of their successors is non-empty. 666 // TODO: This could be expanded to allowing branches where both ends 667 // eventually converge to a single block. 668 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; 669 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); 670 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); 671 BasicBlock *CommonSucc = nullptr; 672 if (TrueDestSucc.count(FalseDest)) { 673 CommonSucc = FalseDest; 674 } else if (FalseDestSucc.count(TrueDest)) { 675 CommonSucc = TrueDest; 676 } else { 677 set_intersect(TrueDestSucc, FalseDestSucc); 678 // If there's one common successor use that. 679 if (TrueDestSucc.size() == 1) 680 CommonSucc = *TrueDestSucc.begin(); 681 // If there's more than one pick whichever appears first in the block list 682 // (we can't use the value returned by TrueDestSucc.begin() as it's 683 // unpredicatable which element gets returned). 684 else if (!TrueDestSucc.empty()) { 685 Function *F = TrueDest->getParent(); 686 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; 687 auto It = llvm::find_if(*F, IsSucc); 688 assert(It != F->end() && "Could not find successor in function"); 689 CommonSucc = &*It; 690 } 691 } 692 // The common successor has to be dominated by the branch, as otherwise 693 // there will be some other path to the successor that will not be 694 // controlled by this branch so any phi we hoist would be controlled by the 695 // wrong condition. This also takes care of avoiding hoisting of loop back 696 // edges. 697 // TODO: In some cases this could be relaxed if the successor is dominated 698 // by another block that's been hoisted and we can guarantee that the 699 // control flow has been replicated exactly. 700 if (CommonSucc && DT->dominates(BI, CommonSucc)) 701 HoistableBranches[BI] = CommonSucc; 702 } 703 704 bool canHoistPHI(PHINode *PN) { 705 // The phi must have loop invariant operands. 706 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) 707 return false; 708 // We can hoist phis if the block they are in is the target of hoistable 709 // branches which cover all of the predecessors of the block. 710 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; 711 BasicBlock *BB = PN->getParent(); 712 for (BasicBlock *PredBB : predecessors(BB)) 713 PredecessorBlocks.insert(PredBB); 714 // If we have less predecessor blocks than predecessors then the phi will 715 // have more than one incoming value for the same block which we can't 716 // handle. 717 // TODO: This could be handled be erasing some of the duplicate incoming 718 // values. 719 if (PredecessorBlocks.size() != pred_size(BB)) 720 return false; 721 for (auto &Pair : HoistableBranches) { 722 if (Pair.second == BB) { 723 // Which blocks are predecessors via this branch depends on if the 724 // branch is triangle-like or diamond-like. 725 if (Pair.first->getSuccessor(0) == BB) { 726 PredecessorBlocks.erase(Pair.first->getParent()); 727 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 728 } else if (Pair.first->getSuccessor(1) == BB) { 729 PredecessorBlocks.erase(Pair.first->getParent()); 730 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 731 } else { 732 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 733 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 734 } 735 } 736 } 737 // PredecessorBlocks will now be empty if for every predecessor of BB we 738 // found a hoistable branch source. 739 return PredecessorBlocks.empty(); 740 } 741 742 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { 743 if (!ControlFlowHoisting) 744 return CurLoop->getLoopPreheader(); 745 // If BB has already been hoisted, return that 746 if (HoistDestinationMap.count(BB)) 747 return HoistDestinationMap[BB]; 748 749 // Check if this block is conditional based on a pending branch 750 auto HasBBAsSuccessor = 751 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { 752 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || 753 Pair.first->getSuccessor(1) == BB); 754 }; 755 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor); 756 757 // If not involved in a pending branch, hoist to preheader 758 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); 759 if (It == HoistableBranches.end()) { 760 LLVM_DEBUG(dbgs() << "LICM using " 761 << InitialPreheader->getNameOrAsOperand() 762 << " as hoist destination for " 763 << BB->getNameOrAsOperand() << "\n"); 764 HoistDestinationMap[BB] = InitialPreheader; 765 return InitialPreheader; 766 } 767 BranchInst *BI = It->first; 768 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == 769 HoistableBranches.end() && 770 "BB is expected to be the target of at most one branch"); 771 772 LLVMContext &C = BB->getContext(); 773 BasicBlock *TrueDest = BI->getSuccessor(0); 774 BasicBlock *FalseDest = BI->getSuccessor(1); 775 BasicBlock *CommonSucc = HoistableBranches[BI]; 776 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); 777 778 // Create hoisted versions of blocks that currently don't have them 779 auto CreateHoistedBlock = [&](BasicBlock *Orig) { 780 if (HoistDestinationMap.count(Orig)) 781 return HoistDestinationMap[Orig]; 782 BasicBlock *New = 783 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); 784 HoistDestinationMap[Orig] = New; 785 DT->addNewBlock(New, HoistTarget); 786 if (CurLoop->getParentLoop()) 787 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); 788 ++NumCreatedBlocks; 789 LLVM_DEBUG(dbgs() << "LICM created " << New->getName() 790 << " as hoist destination for " << Orig->getName() 791 << "\n"); 792 return New; 793 }; 794 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); 795 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); 796 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); 797 798 // Link up these blocks with branches. 799 if (!HoistCommonSucc->getTerminator()) { 800 // The new common successor we've generated will branch to whatever that 801 // hoist target branched to. 802 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); 803 assert(TargetSucc && "Expected hoist target to have a single successor"); 804 HoistCommonSucc->moveBefore(TargetSucc); 805 BranchInst::Create(TargetSucc, HoistCommonSucc); 806 } 807 if (!HoistTrueDest->getTerminator()) { 808 HoistTrueDest->moveBefore(HoistCommonSucc); 809 BranchInst::Create(HoistCommonSucc, HoistTrueDest); 810 } 811 if (!HoistFalseDest->getTerminator()) { 812 HoistFalseDest->moveBefore(HoistCommonSucc); 813 BranchInst::Create(HoistCommonSucc, HoistFalseDest); 814 } 815 816 // If BI is being cloned to what was originally the preheader then 817 // HoistCommonSucc will now be the new preheader. 818 if (HoistTarget == InitialPreheader) { 819 // Phis in the loop header now need to use the new preheader. 820 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); 821 MSSAU->wireOldPredecessorsToNewImmediatePredecessor( 822 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); 823 // The new preheader dominates the loop header. 824 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); 825 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); 826 DT->changeImmediateDominator(HeaderNode, PreheaderNode); 827 // The preheader hoist destination is now the new preheader, with the 828 // exception of the hoist destination of this branch. 829 for (auto &Pair : HoistDestinationMap) 830 if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) 831 Pair.second = HoistCommonSucc; 832 } 833 834 // Now finally clone BI. 835 ReplaceInstWithInst( 836 HoistTarget->getTerminator(), 837 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); 838 ++NumClonedBranches; 839 840 assert(CurLoop->getLoopPreheader() && 841 "Hoisting blocks should not have destroyed preheader"); 842 return HoistDestinationMap[BB]; 843 } 844 }; 845 } // namespace 846 847 /// Walk the specified region of the CFG (defined by all blocks dominated by 848 /// the specified block, and that are in the current loop) in depth first 849 /// order w.r.t the DominatorTree. This allows us to visit definitions before 850 /// uses, allowing us to hoist a loop body in one pass without iteration. 851 /// 852 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 853 DominatorTree *DT, BlockFrequencyInfo *BFI, 854 TargetLibraryInfo *TLI, Loop *CurLoop, 855 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 856 ICFLoopSafetyInfo *SafetyInfo, 857 SinkAndHoistLICMFlags &Flags, 858 OptimizationRemarkEmitter *ORE, bool LoopNestMode, 859 bool AllowSpeculation) { 860 // Verify inputs. 861 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 862 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && 863 "Unexpected input to hoistRegion."); 864 865 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); 866 867 // Keep track of instructions that have been hoisted, as they may need to be 868 // re-hoisted if they end up not dominating all of their uses. 869 SmallVector<Instruction *, 16> HoistedInstructions; 870 871 // For PHI hoisting to work we need to hoist blocks before their successors. 872 // We can do this by iterating through the blocks in the loop in reverse 873 // post-order. 874 LoopBlocksRPO Worklist(CurLoop); 875 Worklist.perform(LI); 876 bool Changed = false; 877 for (BasicBlock *BB : Worklist) { 878 // Only need to process the contents of this block if it is not part of a 879 // subloop (which would already have been processed). 880 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI)) 881 continue; 882 883 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 884 // Try constant folding this instruction. If all the operands are 885 // constants, it is technically hoistable, but it would be better to 886 // just fold it. 887 if (Constant *C = ConstantFoldInstruction( 888 &I, I.getModule()->getDataLayout(), TLI)) { 889 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C 890 << '\n'); 891 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). 892 I.replaceAllUsesWith(C); 893 if (isInstructionTriviallyDead(&I, TLI)) 894 eraseInstruction(I, *SafetyInfo, MSSAU); 895 Changed = true; 896 continue; 897 } 898 899 // Try hoisting the instruction out to the preheader. We can only do 900 // this if all of the operands of the instruction are loop invariant and 901 // if it is safe to hoist the instruction. We also check block frequency 902 // to make sure instruction only gets hoisted into colder blocks. 903 // TODO: It may be safe to hoist if we are hoisting to a conditional block 904 // and we have accurately duplicated the control flow from the loop header 905 // to that block. 906 if (CurLoop->hasLoopInvariantOperands(&I) && 907 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/ nullptr, MSSAU, 908 true, &Flags, ORE) && 909 isSafeToExecuteUnconditionally( 910 I, DT, TLI, CurLoop, SafetyInfo, ORE, 911 CurLoop->getLoopPreheader()->getTerminator(), AllowSpeculation)) { 912 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 913 MSSAU, SE, ORE); 914 HoistedInstructions.push_back(&I); 915 Changed = true; 916 continue; 917 } 918 919 // Attempt to remove floating point division out of the loop by 920 // converting it to a reciprocal multiplication. 921 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() && 922 CurLoop->isLoopInvariant(I.getOperand(1))) { 923 auto Divisor = I.getOperand(1); 924 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); 925 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); 926 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); 927 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); 928 ReciprocalDivisor->insertBefore(&I); 929 930 auto Product = 931 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); 932 Product->setFastMathFlags(I.getFastMathFlags()); 933 SafetyInfo->insertInstructionTo(Product, I.getParent()); 934 Product->insertAfter(&I); 935 I.replaceAllUsesWith(Product); 936 eraseInstruction(I, *SafetyInfo, MSSAU); 937 938 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), 939 SafetyInfo, MSSAU, SE, ORE); 940 HoistedInstructions.push_back(ReciprocalDivisor); 941 Changed = true; 942 continue; 943 } 944 945 auto IsInvariantStart = [&](Instruction &I) { 946 using namespace PatternMatch; 947 return I.use_empty() && 948 match(&I, m_Intrinsic<Intrinsic::invariant_start>()); 949 }; 950 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { 951 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 952 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); 953 }; 954 if ((IsInvariantStart(I) || isGuard(&I)) && 955 CurLoop->hasLoopInvariantOperands(&I) && 956 MustExecuteWithoutWritesBefore(I)) { 957 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 958 MSSAU, SE, ORE); 959 HoistedInstructions.push_back(&I); 960 Changed = true; 961 continue; 962 } 963 964 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 965 if (CFH.canHoistPHI(PN)) { 966 // Redirect incoming blocks first to ensure that we create hoisted 967 // versions of those blocks before we hoist the phi. 968 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) 969 PN->setIncomingBlock( 970 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); 971 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 972 MSSAU, SE, ORE); 973 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected"); 974 Changed = true; 975 continue; 976 } 977 } 978 979 // Remember possibly hoistable branches so we can actually hoist them 980 // later if needed. 981 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) 982 CFH.registerPossiblyHoistableBranch(BI); 983 } 984 } 985 986 // If we hoisted instructions to a conditional block they may not dominate 987 // their uses that weren't hoisted (such as phis where some operands are not 988 // loop invariant). If so make them unconditional by moving them to their 989 // immediate dominator. We iterate through the instructions in reverse order 990 // which ensures that when we rehoist an instruction we rehoist its operands, 991 // and also keep track of where in the block we are rehoisting to to make sure 992 // that we rehoist instructions before the instructions that use them. 993 Instruction *HoistPoint = nullptr; 994 if (ControlFlowHoisting) { 995 for (Instruction *I : reverse(HoistedInstructions)) { 996 if (!llvm::all_of(I->uses(), 997 [&](Use &U) { return DT->dominates(I, U); })) { 998 BasicBlock *Dominator = 999 DT->getNode(I->getParent())->getIDom()->getBlock(); 1000 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { 1001 if (HoistPoint) 1002 assert(DT->dominates(Dominator, HoistPoint->getParent()) && 1003 "New hoist point expected to dominate old hoist point"); 1004 HoistPoint = Dominator->getTerminator(); 1005 } 1006 LLVM_DEBUG(dbgs() << "LICM rehoisting to " 1007 << HoistPoint->getParent()->getNameOrAsOperand() 1008 << ": " << *I << "\n"); 1009 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE); 1010 HoistPoint = I; 1011 Changed = true; 1012 } 1013 } 1014 } 1015 if (VerifyMemorySSA) 1016 MSSAU->getMemorySSA()->verifyMemorySSA(); 1017 1018 // Now that we've finished hoisting make sure that LI and DT are still 1019 // valid. 1020 #ifdef EXPENSIVE_CHECKS 1021 if (Changed) { 1022 assert(DT->verify(DominatorTree::VerificationLevel::Fast) && 1023 "Dominator tree verification failed"); 1024 LI->verify(*DT); 1025 } 1026 #endif 1027 1028 return Changed; 1029 } 1030 1031 // Return true if LI is invariant within scope of the loop. LI is invariant if 1032 // CurLoop is dominated by an invariant.start representing the same memory 1033 // location and size as the memory location LI loads from, and also the 1034 // invariant.start has no uses. 1035 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, 1036 Loop *CurLoop) { 1037 Value *Addr = LI->getOperand(0); 1038 const DataLayout &DL = LI->getModule()->getDataLayout(); 1039 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); 1040 1041 // It is not currently possible for clang to generate an invariant.start 1042 // intrinsic with scalable vector types because we don't support thread local 1043 // sizeless types and we don't permit sizeless types in structs or classes. 1044 // Furthermore, even if support is added for this in future the intrinsic 1045 // itself is defined to have a size of -1 for variable sized objects. This 1046 // makes it impossible to verify if the intrinsic envelops our region of 1047 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8> 1048 // types would have a -1 parameter, but the former is clearly double the size 1049 // of the latter. 1050 if (LocSizeInBits.isScalable()) 1051 return false; 1052 1053 // if the type is i8 addrspace(x)*, we know this is the type of 1054 // llvm.invariant.start operand 1055 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), 1056 LI->getPointerAddressSpace()); 1057 unsigned BitcastsVisited = 0; 1058 // Look through bitcasts until we reach the i8* type (this is invariant.start 1059 // operand type). 1060 while (Addr->getType() != PtrInt8Ty) { 1061 auto *BC = dyn_cast<BitCastInst>(Addr); 1062 // Avoid traversing high number of bitcast uses. 1063 if (++BitcastsVisited > MaxNumUsesTraversed || !BC) 1064 return false; 1065 Addr = BC->getOperand(0); 1066 } 1067 // If we've ended up at a global/constant, bail. We shouldn't be looking at 1068 // uselists for non-local Values in a loop pass. 1069 if (isa<Constant>(Addr)) 1070 return false; 1071 1072 unsigned UsesVisited = 0; 1073 // Traverse all uses of the load operand value, to see if invariant.start is 1074 // one of the uses, and whether it dominates the load instruction. 1075 for (auto *U : Addr->users()) { 1076 // Avoid traversing for Load operand with high number of users. 1077 if (++UsesVisited > MaxNumUsesTraversed) 1078 return false; 1079 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1080 // If there are escaping uses of invariant.start instruction, the load maybe 1081 // non-invariant. 1082 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || 1083 !II->use_empty()) 1084 continue; 1085 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0)); 1086 // The intrinsic supports having a -1 argument for variable sized objects 1087 // so we should check for that here. 1088 if (InvariantSize->isNegative()) 1089 continue; 1090 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8; 1091 // Confirm the invariant.start location size contains the load operand size 1092 // in bits. Also, the invariant.start should dominate the load, and we 1093 // should not hoist the load out of a loop that contains this dominating 1094 // invariant.start. 1095 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits && 1096 DT->properlyDominates(II->getParent(), CurLoop->getHeader())) 1097 return true; 1098 } 1099 1100 return false; 1101 } 1102 1103 namespace { 1104 /// Return true if-and-only-if we know how to (mechanically) both hoist and 1105 /// sink a given instruction out of a loop. Does not address legality 1106 /// concerns such as aliasing or speculation safety. 1107 bool isHoistableAndSinkableInst(Instruction &I) { 1108 // Only these instructions are hoistable/sinkable. 1109 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 1110 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) || 1111 isa<BinaryOperator>(I) || isa<SelectInst>(I) || 1112 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || 1113 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 1114 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || 1115 isa<InsertValueInst>(I) || isa<FreezeInst>(I)); 1116 } 1117 /// Return true if all of the alias sets within this AST are known not to 1118 /// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop. 1119 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, 1120 const Loop *L) { 1121 if (CurAST) { 1122 for (AliasSet &AS : *CurAST) { 1123 if (!AS.isForwardingAliasSet() && AS.isMod()) { 1124 return false; 1125 } 1126 } 1127 return true; 1128 } else { /*MSSAU*/ 1129 for (auto *BB : L->getBlocks()) 1130 if (MSSAU->getMemorySSA()->getBlockDefs(BB)) 1131 return false; 1132 return true; 1133 } 1134 } 1135 1136 /// Return true if I is the only Instruction with a MemoryAccess in L. 1137 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, 1138 const MemorySSAUpdater *MSSAU) { 1139 for (auto *BB : L->getBlocks()) 1140 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { 1141 int NotAPhi = 0; 1142 for (const auto &Acc : *Accs) { 1143 if (isa<MemoryPhi>(&Acc)) 1144 continue; 1145 const auto *MUD = cast<MemoryUseOrDef>(&Acc); 1146 if (MUD->getMemoryInst() != I || NotAPhi++ == 1) 1147 return false; 1148 } 1149 } 1150 return true; 1151 } 1152 } 1153 1154 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 1155 Loop *CurLoop, AliasSetTracker *CurAST, 1156 MemorySSAUpdater *MSSAU, 1157 bool TargetExecutesOncePerLoop, 1158 SinkAndHoistLICMFlags *Flags, 1159 OptimizationRemarkEmitter *ORE) { 1160 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 1161 "Either AliasSetTracker or MemorySSA should be initialized."); 1162 1163 // If we don't understand the instruction, bail early. 1164 if (!isHoistableAndSinkableInst(I)) 1165 return false; 1166 1167 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; 1168 if (MSSA) 1169 assert(Flags != nullptr && "Flags cannot be null."); 1170 1171 // Loads have extra constraints we have to verify before we can hoist them. 1172 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 1173 if (!LI->isUnordered()) 1174 return false; // Don't sink/hoist volatile or ordered atomic loads! 1175 1176 // Loads from constant memory are always safe to move, even if they end up 1177 // in the same alias set as something that ends up being modified. 1178 if (AA->pointsToConstantMemory(LI->getOperand(0))) 1179 return true; 1180 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1181 return true; 1182 1183 if (LI->isAtomic() && !TargetExecutesOncePerLoop) 1184 return false; // Don't risk duplicating unordered loads 1185 1186 // This checks for an invariant.start dominating the load. 1187 if (isLoadInvariantInLoop(LI, DT, CurLoop)) 1188 return true; 1189 1190 bool Invalidated; 1191 if (CurAST) 1192 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, 1193 CurLoop, AA); 1194 else 1195 Invalidated = pointerInvalidatedByLoopWithMSSA( 1196 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags); 1197 // Check loop-invariant address because this may also be a sinkable load 1198 // whose address is not necessarily loop-invariant. 1199 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1200 ORE->emit([&]() { 1201 return OptimizationRemarkMissed( 1202 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) 1203 << "failed to move load with loop-invariant address " 1204 "because the loop may invalidate its value"; 1205 }); 1206 1207 return !Invalidated; 1208 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1209 // Don't sink or hoist dbg info; it's legal, but not useful. 1210 if (isa<DbgInfoIntrinsic>(I)) 1211 return false; 1212 1213 // Don't sink calls which can throw. 1214 if (CI->mayThrow()) 1215 return false; 1216 1217 // Convergent attribute has been used on operations that involve 1218 // inter-thread communication which results are implicitly affected by the 1219 // enclosing control flows. It is not safe to hoist or sink such operations 1220 // across control flow. 1221 if (CI->isConvergent()) 1222 return false; 1223 1224 using namespace PatternMatch; 1225 if (match(CI, m_Intrinsic<Intrinsic::assume>())) 1226 // Assumes don't actually alias anything or throw 1227 return true; 1228 1229 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) 1230 // Widenable conditions don't actually alias anything or throw 1231 return true; 1232 1233 // Handle simple cases by querying alias analysis. 1234 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); 1235 if (Behavior == FMRB_DoesNotAccessMemory) 1236 return true; 1237 if (AAResults::onlyReadsMemory(Behavior)) { 1238 // A readonly argmemonly function only reads from memory pointed to by 1239 // it's arguments with arbitrary offsets. If we can prove there are no 1240 // writes to this memory in the loop, we can hoist or sink. 1241 if (AAResults::onlyAccessesArgPointees(Behavior)) { 1242 // TODO: expand to writeable arguments 1243 for (Value *Op : CI->args()) 1244 if (Op->getType()->isPointerTy()) { 1245 bool Invalidated; 1246 if (CurAST) 1247 Invalidated = pointerInvalidatedByLoop( 1248 MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA); 1249 else 1250 Invalidated = pointerInvalidatedByLoopWithMSSA( 1251 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I, 1252 *Flags); 1253 if (Invalidated) 1254 return false; 1255 } 1256 return true; 1257 } 1258 1259 // If this call only reads from memory and there are no writes to memory 1260 // in the loop, we can hoist or sink the call as appropriate. 1261 if (isReadOnly(CurAST, MSSAU, CurLoop)) 1262 return true; 1263 } 1264 1265 // FIXME: This should use mod/ref information to see if we can hoist or 1266 // sink the call. 1267 1268 return false; 1269 } else if (auto *FI = dyn_cast<FenceInst>(&I)) { 1270 // Fences alias (most) everything to provide ordering. For the moment, 1271 // just give up if there are any other memory operations in the loop. 1272 if (CurAST) { 1273 auto Begin = CurAST->begin(); 1274 assert(Begin != CurAST->end() && "must contain FI"); 1275 if (std::next(Begin) != CurAST->end()) 1276 // constant memory for instance, TODO: handle better 1277 return false; 1278 auto *UniqueI = Begin->getUniqueInstruction(); 1279 if (!UniqueI) 1280 // other memory op, give up 1281 return false; 1282 (void)FI; // suppress unused variable warning 1283 assert(UniqueI == FI && "AS must contain FI"); 1284 return true; 1285 } else // MSSAU 1286 return isOnlyMemoryAccess(FI, CurLoop, MSSAU); 1287 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 1288 if (!SI->isUnordered()) 1289 return false; // Don't sink/hoist volatile or ordered atomic store! 1290 1291 // We can only hoist a store that we can prove writes a value which is not 1292 // read or overwritten within the loop. For those cases, we fallback to 1293 // load store promotion instead. TODO: We can extend this to cases where 1294 // there is exactly one write to the location and that write dominates an 1295 // arbitrary number of reads in the loop. 1296 if (CurAST) { 1297 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); 1298 1299 if (AS.isRef() || !AS.isMustAlias()) 1300 // Quick exit test, handled by the full path below as well. 1301 return false; 1302 auto *UniqueI = AS.getUniqueInstruction(); 1303 if (!UniqueI) 1304 // other memory op, give up 1305 return false; 1306 assert(UniqueI == SI && "AS must contain SI"); 1307 return true; 1308 } else { // MSSAU 1309 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) 1310 return true; 1311 // If there are more accesses than the Promotion cap or no "quota" to 1312 // check clobber, then give up as we're not walking a list that long. 1313 if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls()) 1314 return false; 1315 // If there are interfering Uses (i.e. their defining access is in the 1316 // loop), or ordered loads (stored as Defs!), don't move this store. 1317 // Could do better here, but this is conservatively correct. 1318 // TODO: Cache set of Uses on the first walk in runOnLoop, update when 1319 // moving accesses. Can also extend to dominating uses. 1320 auto *SIMD = MSSA->getMemoryAccess(SI); 1321 for (auto *BB : CurLoop->getBlocks()) 1322 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 1323 for (const auto &MA : *Accesses) 1324 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { 1325 auto *MD = MU->getDefiningAccess(); 1326 if (!MSSA->isLiveOnEntryDef(MD) && 1327 CurLoop->contains(MD->getBlock())) 1328 return false; 1329 // Disable hoisting past potentially interfering loads. Optimized 1330 // Uses may point to an access outside the loop, as getClobbering 1331 // checks the previous iteration when walking the backedge. 1332 // FIXME: More precise: no Uses that alias SI. 1333 if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU)) 1334 return false; 1335 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { 1336 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { 1337 (void)LI; // Silence warning. 1338 assert(!LI->isUnordered() && "Expected unordered load"); 1339 return false; 1340 } 1341 // Any call, while it may not be clobbering SI, it may be a use. 1342 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { 1343 // Check if the call may read from the memory location written 1344 // to by SI. Check CI's attributes and arguments; the number of 1345 // such checks performed is limited above by NoOfMemAccTooLarge. 1346 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); 1347 if (isModOrRefSet(MRI)) 1348 return false; 1349 } 1350 } 1351 } 1352 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); 1353 Flags->incrementClobberingCalls(); 1354 // If there are no clobbering Defs in the loop, store is safe to hoist. 1355 return MSSA->isLiveOnEntryDef(Source) || 1356 !CurLoop->contains(Source->getBlock()); 1357 } 1358 } 1359 1360 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing"); 1361 1362 // We've established mechanical ability and aliasing, it's up to the caller 1363 // to check fault safety 1364 return true; 1365 } 1366 1367 /// Returns true if a PHINode is a trivially replaceable with an 1368 /// Instruction. 1369 /// This is true when all incoming values are that instruction. 1370 /// This pattern occurs most often with LCSSA PHI nodes. 1371 /// 1372 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { 1373 for (const Value *IncValue : PN.incoming_values()) 1374 if (IncValue != &I) 1375 return false; 1376 1377 return true; 1378 } 1379 1380 /// Return true if the instruction is free in the loop. 1381 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, 1382 const TargetTransformInfo *TTI) { 1383 1384 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1385 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) != 1386 TargetTransformInfo::TCC_Free) 1387 return false; 1388 // For a GEP, we cannot simply use getUserCost because currently it 1389 // optimistically assumes that a GEP will fold into addressing mode 1390 // regardless of its users. 1391 const BasicBlock *BB = GEP->getParent(); 1392 for (const User *U : GEP->users()) { 1393 const Instruction *UI = cast<Instruction>(U); 1394 if (CurLoop->contains(UI) && 1395 (BB != UI->getParent() || 1396 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) 1397 return false; 1398 } 1399 return true; 1400 } else 1401 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1402 TargetTransformInfo::TCC_Free; 1403 } 1404 1405 /// Return true if the only users of this instruction are outside of 1406 /// the loop. If this is true, we can sink the instruction to the exit 1407 /// blocks of the loop. 1408 /// 1409 /// We also return true if the instruction could be folded away in lowering. 1410 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). 1411 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 1412 const LoopSafetyInfo *SafetyInfo, 1413 TargetTransformInfo *TTI, bool &FreeInLoop, 1414 bool LoopNestMode) { 1415 const auto &BlockColors = SafetyInfo->getBlockColors(); 1416 bool IsFree = isFreeInLoop(I, CurLoop, TTI); 1417 for (const User *U : I.users()) { 1418 const Instruction *UI = cast<Instruction>(U); 1419 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1420 const BasicBlock *BB = PN->getParent(); 1421 // We cannot sink uses in catchswitches. 1422 if (isa<CatchSwitchInst>(BB->getTerminator())) 1423 return false; 1424 1425 // We need to sink a callsite to a unique funclet. Avoid sinking if the 1426 // phi use is too muddled. 1427 if (isa<CallInst>(I)) 1428 if (!BlockColors.empty() && 1429 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 1430 return false; 1431 1432 if (LoopNestMode) { 1433 while (isa<PHINode>(UI) && UI->hasOneUser() && 1434 UI->getNumOperands() == 1) { 1435 if (!CurLoop->contains(UI)) 1436 break; 1437 UI = cast<Instruction>(UI->user_back()); 1438 } 1439 } 1440 } 1441 1442 if (CurLoop->contains(UI)) { 1443 if (IsFree) { 1444 FreeInLoop = true; 1445 continue; 1446 } 1447 return false; 1448 } 1449 } 1450 return true; 1451 } 1452 1453 static Instruction *cloneInstructionInExitBlock( 1454 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 1455 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { 1456 Instruction *New; 1457 if (auto *CI = dyn_cast<CallInst>(&I)) { 1458 const auto &BlockColors = SafetyInfo->getBlockColors(); 1459 1460 // Sinking call-sites need to be handled differently from other 1461 // instructions. The cloned call-site needs a funclet bundle operand 1462 // appropriate for its location in the CFG. 1463 SmallVector<OperandBundleDef, 1> OpBundles; 1464 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 1465 BundleIdx != BundleEnd; ++BundleIdx) { 1466 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 1467 if (Bundle.getTagID() == LLVMContext::OB_funclet) 1468 continue; 1469 1470 OpBundles.emplace_back(Bundle); 1471 } 1472 1473 if (!BlockColors.empty()) { 1474 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 1475 assert(CV.size() == 1 && "non-unique color for exit block!"); 1476 BasicBlock *BBColor = CV.front(); 1477 Instruction *EHPad = BBColor->getFirstNonPHI(); 1478 if (EHPad->isEHPad()) 1479 OpBundles.emplace_back("funclet", EHPad); 1480 } 1481 1482 New = CallInst::Create(CI, OpBundles); 1483 } else { 1484 New = I.clone(); 1485 } 1486 1487 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); 1488 if (!I.getName().empty()) 1489 New->setName(I.getName() + ".le"); 1490 1491 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 1492 // Create a new MemoryAccess and let MemorySSA set its defining access. 1493 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1494 New, nullptr, New->getParent(), MemorySSA::Beginning); 1495 if (NewMemAcc) { 1496 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) 1497 MSSAU->insertDef(MemDef, /*RenameUses=*/true); 1498 else { 1499 auto *MemUse = cast<MemoryUse>(NewMemAcc); 1500 MSSAU->insertUse(MemUse, /*RenameUses=*/true); 1501 } 1502 } 1503 } 1504 1505 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that 1506 // this is particularly cheap because we can rip off the PHI node that we're 1507 // replacing for the number and blocks of the predecessors. 1508 // OPT: If this shows up in a profile, we can instead finish sinking all 1509 // invariant instructions, and then walk their operands to re-establish 1510 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 1511 // sinking bottom-up. 1512 for (Use &Op : New->operands()) 1513 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) { 1514 auto *OInst = cast<Instruction>(Op.get()); 1515 PHINode *OpPN = 1516 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 1517 OInst->getName() + ".lcssa", &ExitBlock.front()); 1518 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 1519 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 1520 Op = OpPN; 1521 } 1522 return New; 1523 } 1524 1525 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 1526 MemorySSAUpdater *MSSAU) { 1527 if (MSSAU) 1528 MSSAU->removeMemoryAccess(&I); 1529 SafetyInfo.removeInstruction(&I); 1530 I.eraseFromParent(); 1531 } 1532 1533 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 1534 ICFLoopSafetyInfo &SafetyInfo, 1535 MemorySSAUpdater *MSSAU, 1536 ScalarEvolution *SE) { 1537 SafetyInfo.removeInstruction(&I); 1538 SafetyInfo.insertInstructionTo(&I, Dest.getParent()); 1539 I.moveBefore(&Dest); 1540 if (MSSAU) 1541 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( 1542 MSSAU->getMemorySSA()->getMemoryAccess(&I))) 1543 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), 1544 MemorySSA::BeforeTerminator); 1545 if (SE) 1546 SE->forgetValue(&I); 1547 } 1548 1549 static Instruction *sinkThroughTriviallyReplaceablePHI( 1550 PHINode *TPN, Instruction *I, LoopInfo *LI, 1551 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, 1552 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, 1553 MemorySSAUpdater *MSSAU) { 1554 assert(isTriviallyReplaceablePHI(*TPN, *I) && 1555 "Expect only trivially replaceable PHI"); 1556 BasicBlock *ExitBlock = TPN->getParent(); 1557 Instruction *New; 1558 auto It = SunkCopies.find(ExitBlock); 1559 if (It != SunkCopies.end()) 1560 New = It->second; 1561 else 1562 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock( 1563 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); 1564 return New; 1565 } 1566 1567 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { 1568 BasicBlock *BB = PN->getParent(); 1569 if (!BB->canSplitPredecessors()) 1570 return false; 1571 // It's not impossible to split EHPad blocks, but if BlockColors already exist 1572 // it require updating BlockColors for all offspring blocks accordingly. By 1573 // skipping such corner case, we can make updating BlockColors after splitting 1574 // predecessor fairly simple. 1575 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) 1576 return false; 1577 for (BasicBlock *BBPred : predecessors(BB)) { 1578 if (isa<IndirectBrInst>(BBPred->getTerminator()) || 1579 isa<CallBrInst>(BBPred->getTerminator())) 1580 return false; 1581 } 1582 return true; 1583 } 1584 1585 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, 1586 LoopInfo *LI, const Loop *CurLoop, 1587 LoopSafetyInfo *SafetyInfo, 1588 MemorySSAUpdater *MSSAU) { 1589 #ifndef NDEBUG 1590 SmallVector<BasicBlock *, 32> ExitBlocks; 1591 CurLoop->getUniqueExitBlocks(ExitBlocks); 1592 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1593 ExitBlocks.end()); 1594 #endif 1595 BasicBlock *ExitBB = PN->getParent(); 1596 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."); 1597 1598 // Split predecessors of the loop exit to make instructions in the loop are 1599 // exposed to exit blocks through trivially replaceable PHIs while keeping the 1600 // loop in the canonical form where each predecessor of each exit block should 1601 // be contained within the loop. For example, this will convert the loop below 1602 // from 1603 // 1604 // LB1: 1605 // %v1 = 1606 // br %LE, %LB2 1607 // LB2: 1608 // %v2 = 1609 // br %LE, %LB1 1610 // LE: 1611 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable 1612 // 1613 // to 1614 // 1615 // LB1: 1616 // %v1 = 1617 // br %LE.split, %LB2 1618 // LB2: 1619 // %v2 = 1620 // br %LE.split2, %LB1 1621 // LE.split: 1622 // %p1 = phi [%v1, %LB1] <-- trivially replaceable 1623 // br %LE 1624 // LE.split2: 1625 // %p2 = phi [%v2, %LB2] <-- trivially replaceable 1626 // br %LE 1627 // LE: 1628 // %p = phi [%p1, %LE.split], [%p2, %LE.split2] 1629 // 1630 const auto &BlockColors = SafetyInfo->getBlockColors(); 1631 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); 1632 while (!PredBBs.empty()) { 1633 BasicBlock *PredBB = *PredBBs.begin(); 1634 assert(CurLoop->contains(PredBB) && 1635 "Expect all predecessors are in the loop"); 1636 if (PN->getBasicBlockIndex(PredBB) >= 0) { 1637 BasicBlock *NewPred = SplitBlockPredecessors( 1638 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); 1639 // Since we do not allow splitting EH-block with BlockColors in 1640 // canSplitPredecessors(), we can simply assign predecessor's color to 1641 // the new block. 1642 if (!BlockColors.empty()) 1643 // Grab a reference to the ColorVector to be inserted before getting the 1644 // reference to the vector we are copying because inserting the new 1645 // element in BlockColors might cause the map to be reallocated. 1646 SafetyInfo->copyColors(NewPred, PredBB); 1647 } 1648 PredBBs.remove(PredBB); 1649 } 1650 } 1651 1652 /// When an instruction is found to only be used outside of the loop, this 1653 /// function moves it to the exit blocks and patches up SSA form as needed. 1654 /// This method is guaranteed to remove the original instruction from its 1655 /// position, and may either delete it or move it to outside of the loop. 1656 /// 1657 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 1658 BlockFrequencyInfo *BFI, const Loop *CurLoop, 1659 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 1660 OptimizationRemarkEmitter *ORE) { 1661 bool Changed = false; 1662 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 1663 1664 // Iterate over users to be ready for actual sinking. Replace users via 1665 // unreachable blocks with undef and make all user PHIs trivially replaceable. 1666 SmallPtrSet<Instruction *, 8> VisitedUsers; 1667 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { 1668 auto *User = cast<Instruction>(*UI); 1669 Use &U = UI.getUse(); 1670 ++UI; 1671 1672 if (VisitedUsers.count(User) || CurLoop->contains(User)) 1673 continue; 1674 1675 if (!DT->isReachableFromEntry(User->getParent())) { 1676 U = UndefValue::get(I.getType()); 1677 Changed = true; 1678 continue; 1679 } 1680 1681 // The user must be a PHI node. 1682 PHINode *PN = cast<PHINode>(User); 1683 1684 // Surprisingly, instructions can be used outside of loops without any 1685 // exits. This can only happen in PHI nodes if the incoming block is 1686 // unreachable. 1687 BasicBlock *BB = PN->getIncomingBlock(U); 1688 if (!DT->isReachableFromEntry(BB)) { 1689 U = UndefValue::get(I.getType()); 1690 Changed = true; 1691 continue; 1692 } 1693 1694 VisitedUsers.insert(PN); 1695 if (isTriviallyReplaceablePHI(*PN, I)) 1696 continue; 1697 1698 if (!canSplitPredecessors(PN, SafetyInfo)) 1699 return Changed; 1700 1701 // Split predecessors of the PHI so that we can make users trivially 1702 // replaceable. 1703 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); 1704 1705 // Should rebuild the iterators, as they may be invalidated by 1706 // splitPredecessorsOfLoopExit(). 1707 UI = I.user_begin(); 1708 UE = I.user_end(); 1709 } 1710 1711 if (VisitedUsers.empty()) 1712 return Changed; 1713 1714 ORE->emit([&]() { 1715 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) 1716 << "sinking " << ore::NV("Inst", &I); 1717 }); 1718 if (isa<LoadInst>(I)) 1719 ++NumMovedLoads; 1720 else if (isa<CallInst>(I)) 1721 ++NumMovedCalls; 1722 ++NumSunk; 1723 1724 #ifndef NDEBUG 1725 SmallVector<BasicBlock *, 32> ExitBlocks; 1726 CurLoop->getUniqueExitBlocks(ExitBlocks); 1727 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1728 ExitBlocks.end()); 1729 #endif 1730 1731 // Clones of this instruction. Don't create more than one per exit block! 1732 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 1733 1734 // If this instruction is only used outside of the loop, then all users are 1735 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 1736 // the instruction. 1737 // First check if I is worth sinking for all uses. Sink only when it is worth 1738 // across all uses. 1739 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); 1740 for (auto *UI : Users) { 1741 auto *User = cast<Instruction>(UI); 1742 1743 if (CurLoop->contains(User)) 1744 continue; 1745 1746 PHINode *PN = cast<PHINode>(User); 1747 assert(ExitBlockSet.count(PN->getParent()) && 1748 "The LCSSA PHI is not in an exit block!"); 1749 1750 // The PHI must be trivially replaceable. 1751 Instruction *New = sinkThroughTriviallyReplaceablePHI( 1752 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); 1753 PN->replaceAllUsesWith(New); 1754 eraseInstruction(*PN, *SafetyInfo, nullptr); 1755 Changed = true; 1756 } 1757 return Changed; 1758 } 1759 1760 /// When an instruction is found to only use loop invariant operands that 1761 /// is safe to hoist, this instruction is called to do the dirty work. 1762 /// 1763 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 1764 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 1765 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 1766 OptimizationRemarkEmitter *ORE) { 1767 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": " 1768 << I << "\n"); 1769 ORE->emit([&]() { 1770 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " 1771 << ore::NV("Inst", &I); 1772 }); 1773 1774 // Metadata can be dependent on conditions we are hoisting above. 1775 // Conservatively strip all metadata on the instruction unless we were 1776 // guaranteed to execute I if we entered the loop, in which case the metadata 1777 // is valid in the loop preheader. 1778 // Similarly, If I is a call and it is not guaranteed to execute in the loop, 1779 // then moving to the preheader means we should strip attributes on the call 1780 // that can cause UB since we may be hoisting above conditions that allowed 1781 // inferring those attributes. They may not be valid at the preheader. 1782 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) && 1783 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 1784 // time in isGuaranteedToExecute if we don't actually have anything to 1785 // drop. It is a compile time optimization, not required for correctness. 1786 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) 1787 I.dropUndefImplyingAttrsAndUnknownMetadata(); 1788 1789 if (isa<PHINode>(I)) 1790 // Move the new node to the end of the phi list in the destination block. 1791 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE); 1792 else 1793 // Move the new node to the destination block, before its terminator. 1794 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE); 1795 1796 I.updateLocationAfterHoist(); 1797 1798 if (isa<LoadInst>(I)) 1799 ++NumMovedLoads; 1800 else if (isa<CallInst>(I)) 1801 ++NumMovedCalls; 1802 ++NumHoisted; 1803 } 1804 1805 /// Only sink or hoist an instruction if it is not a trapping instruction, 1806 /// or if the instruction is known not to trap when moved to the preheader. 1807 /// or if it is a trapping instruction and is guaranteed to execute. 1808 static bool isSafeToExecuteUnconditionally( 1809 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, 1810 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, 1811 OptimizationRemarkEmitter *ORE, const Instruction *CtxI, 1812 bool AllowSpeculation) { 1813 if (AllowSpeculation && isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI)) 1814 return true; 1815 1816 bool GuaranteedToExecute = 1817 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); 1818 1819 if (!GuaranteedToExecute) { 1820 auto *LI = dyn_cast<LoadInst>(&Inst); 1821 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1822 ORE->emit([&]() { 1823 return OptimizationRemarkMissed( 1824 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) 1825 << "failed to hoist load with loop-invariant address " 1826 "because load is conditionally executed"; 1827 }); 1828 } 1829 1830 return GuaranteedToExecute; 1831 } 1832 1833 namespace { 1834 class LoopPromoter : public LoadAndStorePromoter { 1835 Value *SomePtr; // Designated pointer to store to. 1836 const SmallSetVector<Value *, 8> &PointerMustAliases; 1837 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 1838 SmallVectorImpl<Instruction *> &LoopInsertPts; 1839 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; 1840 PredIteratorCache &PredCache; 1841 MemorySSAUpdater *MSSAU; 1842 LoopInfo &LI; 1843 DebugLoc DL; 1844 Align Alignment; 1845 bool UnorderedAtomic; 1846 AAMDNodes AATags; 1847 ICFLoopSafetyInfo &SafetyInfo; 1848 bool CanInsertStoresInExitBlocks; 1849 1850 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi 1851 // (if legal) if doing so would add an out-of-loop use to an instruction 1852 // defined in-loop. 1853 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 1854 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB)) 1855 return V; 1856 1857 Instruction *I = cast<Instruction>(V); 1858 // We need to create an LCSSA PHI node for the incoming value and 1859 // store that. 1860 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 1861 I->getName() + ".lcssa", &BB->front()); 1862 for (BasicBlock *Pred : PredCache.get(BB)) 1863 PN->addIncoming(I, Pred); 1864 return PN; 1865 } 1866 1867 public: 1868 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 1869 const SmallSetVector<Value *, 8> &PMA, 1870 SmallVectorImpl<BasicBlock *> &LEB, 1871 SmallVectorImpl<Instruction *> &LIP, 1872 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, 1873 MemorySSAUpdater *MSSAU, LoopInfo &li, DebugLoc dl, 1874 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags, 1875 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks) 1876 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), 1877 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), 1878 PredCache(PIC), MSSAU(MSSAU), LI(li), DL(std::move(dl)), 1879 Alignment(Alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), 1880 SafetyInfo(SafetyInfo), 1881 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks) {} 1882 1883 bool isInstInList(Instruction *I, 1884 const SmallVectorImpl<Instruction *> &) const override { 1885 Value *Ptr; 1886 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1887 Ptr = LI->getOperand(0); 1888 else 1889 Ptr = cast<StoreInst>(I)->getPointerOperand(); 1890 return PointerMustAliases.count(Ptr); 1891 } 1892 1893 void insertStoresInLoopExitBlocks() { 1894 // Insert stores after in the loop exit blocks. Each exit block gets a 1895 // store of the live-out values that feed them. Since we've already told 1896 // the SSA updater about the defs in the loop and the preheader 1897 // definition, it is all set and we can start using it. 1898 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 1899 BasicBlock *ExitBlock = LoopExitBlocks[i]; 1900 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 1901 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 1902 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 1903 Instruction *InsertPos = LoopInsertPts[i]; 1904 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 1905 if (UnorderedAtomic) 1906 NewSI->setOrdering(AtomicOrdering::Unordered); 1907 NewSI->setAlignment(Alignment); 1908 NewSI->setDebugLoc(DL); 1909 if (AATags) 1910 NewSI->setAAMetadata(AATags); 1911 1912 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; 1913 MemoryAccess *NewMemAcc; 1914 if (!MSSAInsertPoint) { 1915 NewMemAcc = MSSAU->createMemoryAccessInBB( 1916 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); 1917 } else { 1918 NewMemAcc = 1919 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); 1920 } 1921 MSSAInsertPts[i] = NewMemAcc; 1922 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1923 // FIXME: true for safety, false may still be correct. 1924 } 1925 } 1926 1927 void doExtraRewritesBeforeFinalDeletion() override { 1928 if (CanInsertStoresInExitBlocks) 1929 insertStoresInLoopExitBlocks(); 1930 } 1931 1932 void instructionDeleted(Instruction *I) const override { 1933 SafetyInfo.removeInstruction(I); 1934 MSSAU->removeMemoryAccess(I); 1935 } 1936 1937 bool shouldDelete(Instruction *I) const override { 1938 if (isa<StoreInst>(I)) 1939 return CanInsertStoresInExitBlocks; 1940 return true; 1941 } 1942 }; 1943 1944 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L, 1945 DominatorTree *DT) { 1946 // We can perform the captured-before check against any instruction in the 1947 // loop header, as the loop header is reachable from any instruction inside 1948 // the loop. 1949 // TODO: ReturnCaptures=true shouldn't be necessary here. 1950 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true, 1951 /* StoreCaptures */ true, 1952 L->getHeader()->getTerminator(), DT); 1953 } 1954 1955 /// Return true if we can prove that a caller cannot inspect the object if an 1956 /// unwind occurs inside the loop. 1957 bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L, 1958 DominatorTree *DT) { 1959 bool RequiresNoCaptureBeforeUnwind; 1960 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind)) 1961 return false; 1962 1963 return !RequiresNoCaptureBeforeUnwind || 1964 isNotCapturedBeforeOrInLoop(Object, L, DT); 1965 } 1966 1967 } // namespace 1968 1969 /// Try to promote memory values to scalars by sinking stores out of the 1970 /// loop and moving loads to before the loop. We do this by looping over 1971 /// the stores in the loop, looking for stores to Must pointers which are 1972 /// loop invariant. 1973 /// 1974 bool llvm::promoteLoopAccessesToScalars( 1975 const SmallSetVector<Value *, 8> &PointerMustAliases, 1976 SmallVectorImpl<BasicBlock *> &ExitBlocks, 1977 SmallVectorImpl<Instruction *> &InsertPts, 1978 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, 1979 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, 1980 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, 1981 OptimizationRemarkEmitter *ORE, bool AllowSpeculation) { 1982 // Verify inputs. 1983 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 1984 SafetyInfo != nullptr && 1985 "Unexpected Input to promoteLoopAccessesToScalars"); 1986 1987 Value *SomePtr = *PointerMustAliases.begin(); 1988 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1989 1990 // It is not safe to promote a load/store from the loop if the load/store is 1991 // conditional. For example, turning: 1992 // 1993 // for () { if (c) *P += 1; } 1994 // 1995 // into: 1996 // 1997 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 1998 // 1999 // is not safe, because *P may only be valid to access if 'c' is true. 2000 // 2001 // The safety property divides into two parts: 2002 // p1) The memory may not be dereferenceable on entry to the loop. In this 2003 // case, we can't insert the required load in the preheader. 2004 // p2) The memory model does not allow us to insert a store along any dynamic 2005 // path which did not originally have one. 2006 // 2007 // If at least one store is guaranteed to execute, both properties are 2008 // satisfied, and promotion is legal. 2009 // 2010 // This, however, is not a necessary condition. Even if no store/load is 2011 // guaranteed to execute, we can still establish these properties. 2012 // We can establish (p1) by proving that hoisting the load into the preheader 2013 // is safe (i.e. proving dereferenceability on all paths through the loop). We 2014 // can use any access within the alias set to prove dereferenceability, 2015 // since they're all must alias. 2016 // 2017 // There are two ways establish (p2): 2018 // a) Prove the location is thread-local. In this case the memory model 2019 // requirement does not apply, and stores are safe to insert. 2020 // b) Prove a store dominates every exit block. In this case, if an exit 2021 // blocks is reached, the original dynamic path would have taken us through 2022 // the store, so inserting a store into the exit block is safe. Note that this 2023 // is different from the store being guaranteed to execute. For instance, 2024 // if an exception is thrown on the first iteration of the loop, the original 2025 // store is never executed, but the exit blocks are not executed either. 2026 2027 bool DereferenceableInPH = false; 2028 bool SafeToInsertStore = false; 2029 bool FoundLoadToPromote = false; 2030 2031 SmallVector<Instruction *, 64> LoopUses; 2032 2033 // We start with an alignment of one and try to find instructions that allow 2034 // us to prove better alignment. 2035 Align Alignment; 2036 // Keep track of which types of access we see 2037 bool SawUnorderedAtomic = false; 2038 bool SawNotAtomic = false; 2039 AAMDNodes AATags; 2040 2041 const DataLayout &MDL = Preheader->getModule()->getDataLayout(); 2042 2043 bool IsKnownThreadLocalObject = false; 2044 if (SafetyInfo->anyBlockMayThrow()) { 2045 // If a loop can throw, we have to insert a store along each unwind edge. 2046 // That said, we can't actually make the unwind edge explicit. Therefore, 2047 // we have to prove that the store is dead along the unwind edge. We do 2048 // this by proving that the caller can't have a reference to the object 2049 // after return and thus can't possibly load from the object. 2050 Value *Object = getUnderlyingObject(SomePtr); 2051 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT)) 2052 return false; 2053 // Subtlety: Alloca's aren't visible to callers, but *are* potentially 2054 // visible to other threads if captured and used during their lifetimes. 2055 IsKnownThreadLocalObject = !isa<AllocaInst>(Object); 2056 } 2057 2058 // Check that all accesses to pointers in the aliass set use the same type. 2059 // We cannot (yet) promote a memory location that is loaded and stored in 2060 // different sizes. While we are at it, collect alignment and AA info. 2061 Type *AccessTy = nullptr; 2062 for (Value *ASIV : PointerMustAliases) { 2063 for (Use &U : ASIV->uses()) { 2064 // Ignore instructions that are outside the loop. 2065 Instruction *UI = dyn_cast<Instruction>(U.getUser()); 2066 if (!UI || !CurLoop->contains(UI)) 2067 continue; 2068 2069 // If there is an non-load/store instruction in the loop, we can't promote 2070 // it. 2071 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { 2072 if (!Load->isUnordered()) 2073 return false; 2074 2075 SawUnorderedAtomic |= Load->isAtomic(); 2076 SawNotAtomic |= !Load->isAtomic(); 2077 FoundLoadToPromote = true; 2078 2079 Align InstAlignment = Load->getAlign(); 2080 2081 // Note that proving a load safe to speculate requires proving 2082 // sufficient alignment at the target location. Proving it guaranteed 2083 // to execute does as well. Thus we can increase our guaranteed 2084 // alignment as well. 2085 if (!DereferenceableInPH || (InstAlignment > Alignment)) 2086 if (isSafeToExecuteUnconditionally( 2087 *Load, DT, TLI, CurLoop, SafetyInfo, ORE, 2088 Preheader->getTerminator(), AllowSpeculation)) { 2089 DereferenceableInPH = true; 2090 Alignment = std::max(Alignment, InstAlignment); 2091 } 2092 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 2093 // Stores *of* the pointer are not interesting, only stores *to* the 2094 // pointer. 2095 if (U.getOperandNo() != StoreInst::getPointerOperandIndex()) 2096 continue; 2097 if (!Store->isUnordered()) 2098 return false; 2099 2100 SawUnorderedAtomic |= Store->isAtomic(); 2101 SawNotAtomic |= !Store->isAtomic(); 2102 2103 // If the store is guaranteed to execute, both properties are satisfied. 2104 // We may want to check if a store is guaranteed to execute even if we 2105 // already know that promotion is safe, since it may have higher 2106 // alignment than any other guaranteed stores, in which case we can 2107 // raise the alignment on the promoted store. 2108 Align InstAlignment = Store->getAlign(); 2109 2110 if (!DereferenceableInPH || !SafeToInsertStore || 2111 (InstAlignment > Alignment)) { 2112 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { 2113 DereferenceableInPH = true; 2114 SafeToInsertStore = true; 2115 Alignment = std::max(Alignment, InstAlignment); 2116 } 2117 } 2118 2119 // If a store dominates all exit blocks, it is safe to sink. 2120 // As explained above, if an exit block was executed, a dominating 2121 // store must have been executed at least once, so we are not 2122 // introducing stores on paths that did not have them. 2123 // Note that this only looks at explicit exit blocks. If we ever 2124 // start sinking stores into unwind edges (see above), this will break. 2125 if (!SafeToInsertStore) 2126 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { 2127 return DT->dominates(Store->getParent(), Exit); 2128 }); 2129 2130 // If the store is not guaranteed to execute, we may still get 2131 // deref info through it. 2132 if (!DereferenceableInPH) { 2133 DereferenceableInPH = isDereferenceableAndAlignedPointer( 2134 Store->getPointerOperand(), Store->getValueOperand()->getType(), 2135 Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI); 2136 } 2137 } else 2138 return false; // Not a load or store. 2139 2140 if (!AccessTy) 2141 AccessTy = getLoadStoreType(UI); 2142 else if (AccessTy != getLoadStoreType(UI)) 2143 return false; 2144 2145 // Merge the AA tags. 2146 if (LoopUses.empty()) { 2147 // On the first load/store, just take its AA tags. 2148 AATags = UI->getAAMetadata(); 2149 } else if (AATags) { 2150 AATags = AATags.merge(UI->getAAMetadata()); 2151 } 2152 2153 LoopUses.push_back(UI); 2154 } 2155 } 2156 2157 // If we found both an unordered atomic instruction and a non-atomic memory 2158 // access, bail. We can't blindly promote non-atomic to atomic since we 2159 // might not be able to lower the result. We can't downgrade since that 2160 // would violate memory model. Also, align 0 is an error for atomics. 2161 if (SawUnorderedAtomic && SawNotAtomic) 2162 return false; 2163 2164 // If we're inserting an atomic load in the preheader, we must be able to 2165 // lower it. We're only guaranteed to be able to lower naturally aligned 2166 // atomics. 2167 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy)) 2168 return false; 2169 2170 // If we couldn't prove we can hoist the load, bail. 2171 if (!DereferenceableInPH) 2172 return false; 2173 2174 // We know we can hoist the load, but don't have a guaranteed store. 2175 // Check whether the location is thread-local. If it is, then we can insert 2176 // stores along paths which originally didn't have them without violating the 2177 // memory model. 2178 if (!SafeToInsertStore) { 2179 if (IsKnownThreadLocalObject) 2180 SafeToInsertStore = true; 2181 else { 2182 Value *Object = getUnderlyingObject(SomePtr); 2183 SafeToInsertStore = 2184 (isNoAliasCall(Object) || isa<AllocaInst>(Object)) && 2185 isNotCapturedBeforeOrInLoop(Object, CurLoop, DT); 2186 } 2187 } 2188 2189 // If we've still failed to prove we can sink the store, hoist the load 2190 // only, if possible. 2191 if (!SafeToInsertStore && !FoundLoadToPromote) 2192 // If we cannot hoist the load either, give up. 2193 return false; 2194 2195 // Lets do the promotion! 2196 if (SafeToInsertStore) 2197 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr 2198 << '\n'); 2199 else 2200 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr 2201 << '\n'); 2202 2203 ORE->emit([&]() { 2204 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", 2205 LoopUses[0]) 2206 << "Moving accesses to memory location out of the loop"; 2207 }); 2208 ++NumPromoted; 2209 2210 // Look at all the loop uses, and try to merge their locations. 2211 std::vector<const DILocation *> LoopUsesLocs; 2212 for (auto U : LoopUses) 2213 LoopUsesLocs.push_back(U->getDebugLoc().get()); 2214 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs)); 2215 2216 // We use the SSAUpdater interface to insert phi nodes as required. 2217 SmallVector<PHINode *, 16> NewPHIs; 2218 SSAUpdater SSA(&NewPHIs); 2219 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, 2220 InsertPts, MSSAInsertPts, PIC, MSSAU, *LI, DL, 2221 Alignment, SawUnorderedAtomic, AATags, *SafetyInfo, 2222 SafeToInsertStore); 2223 2224 // Set up the preheader to have a definition of the value. It is the live-out 2225 // value from the preheader that uses in the loop will use. 2226 LoadInst *PreheaderLoad = new LoadInst( 2227 AccessTy, SomePtr, SomePtr->getName() + ".promoted", 2228 Preheader->getTerminator()); 2229 if (SawUnorderedAtomic) 2230 PreheaderLoad->setOrdering(AtomicOrdering::Unordered); 2231 PreheaderLoad->setAlignment(Alignment); 2232 PreheaderLoad->setDebugLoc(DebugLoc()); 2233 if (AATags) 2234 PreheaderLoad->setAAMetadata(AATags); 2235 SSA.AddAvailableValue(Preheader, PreheaderLoad); 2236 2237 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( 2238 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); 2239 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); 2240 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); 2241 2242 if (VerifyMemorySSA) 2243 MSSAU->getMemorySSA()->verifyMemorySSA(); 2244 // Rewrite all the loads in the loop and remember all the definitions from 2245 // stores in the loop. 2246 Promoter.run(LoopUses); 2247 2248 if (VerifyMemorySSA) 2249 MSSAU->getMemorySSA()->verifyMemorySSA(); 2250 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 2251 if (PreheaderLoad->use_empty()) 2252 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU); 2253 2254 return true; 2255 } 2256 2257 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 2258 function_ref<void(Instruction *)> Fn) { 2259 for (const BasicBlock *BB : L->blocks()) 2260 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 2261 for (const auto &Access : *Accesses) 2262 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access)) 2263 Fn(MUD->getMemoryInst()); 2264 } 2265 2266 static SmallVector<SmallSetVector<Value *, 8>, 0> 2267 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) { 2268 AliasSetTracker AST(*AA); 2269 2270 auto IsPotentiallyPromotable = [L](const Instruction *I) { 2271 if (const auto *SI = dyn_cast<StoreInst>(I)) 2272 return L->isLoopInvariant(SI->getPointerOperand()); 2273 if (const auto *LI = dyn_cast<LoadInst>(I)) 2274 return L->isLoopInvariant(LI->getPointerOperand()); 2275 return false; 2276 }; 2277 2278 // Populate AST with potentially promotable accesses. 2279 SmallPtrSet<Value *, 16> AttemptingPromotion; 2280 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2281 if (IsPotentiallyPromotable(I)) { 2282 AttemptingPromotion.insert(I); 2283 AST.add(I); 2284 } 2285 }); 2286 2287 // We're only interested in must-alias sets that contain a mod. 2288 SmallVector<const AliasSet *, 8> Sets; 2289 for (AliasSet &AS : AST) 2290 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias()) 2291 Sets.push_back(&AS); 2292 2293 if (Sets.empty()) 2294 return {}; // Nothing to promote... 2295 2296 // Discard any sets for which there is an aliasing non-promotable access. 2297 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2298 if (AttemptingPromotion.contains(I)) 2299 return; 2300 2301 llvm::erase_if(Sets, [&](const AliasSet *AS) { 2302 return AS->aliasesUnknownInst(I, *AA); 2303 }); 2304 }); 2305 2306 SmallVector<SmallSetVector<Value *, 8>, 0> Result; 2307 for (const AliasSet *Set : Sets) { 2308 SmallSetVector<Value *, 8> PointerMustAliases; 2309 for (const auto &ASI : *Set) 2310 PointerMustAliases.insert(ASI.getValue()); 2311 Result.push_back(std::move(PointerMustAliases)); 2312 } 2313 2314 return Result; 2315 } 2316 2317 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 2318 AliasSetTracker *CurAST, Loop *CurLoop, 2319 AAResults *AA) { 2320 return CurAST->getAliasSetFor(MemLoc).isMod(); 2321 } 2322 2323 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 2324 Loop *CurLoop, Instruction &I, 2325 SinkAndHoistLICMFlags &Flags) { 2326 // For hoisting, use the walker to determine safety 2327 if (!Flags.getIsSink()) { 2328 MemoryAccess *Source; 2329 // See declaration of SetLicmMssaOptCap for usage details. 2330 if (Flags.tooManyClobberingCalls()) 2331 Source = MU->getDefiningAccess(); 2332 else { 2333 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); 2334 Flags.incrementClobberingCalls(); 2335 } 2336 return !MSSA->isLiveOnEntryDef(Source) && 2337 CurLoop->contains(Source->getBlock()); 2338 } 2339 2340 // For sinking, we'd need to check all Defs below this use. The getClobbering 2341 // call will look on the backedge of the loop, but will check aliasing with 2342 // the instructions on the previous iteration. 2343 // For example: 2344 // for (i ... ) 2345 // load a[i] ( Use (LoE) 2346 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. 2347 // i++; 2348 // The load sees no clobbering inside the loop, as the backedge alias check 2349 // does phi translation, and will check aliasing against store a[i-1]. 2350 // However sinking the load outside the loop, below the store is incorrect. 2351 2352 // For now, only sink if there are no Defs in the loop, and the existing ones 2353 // precede the use and are in the same block. 2354 // FIXME: Increase precision: Safe to sink if Use post dominates the Def; 2355 // needs PostDominatorTreeAnalysis. 2356 // FIXME: More precise: no Defs that alias this Use. 2357 if (Flags.tooManyMemoryAccesses()) 2358 return true; 2359 for (auto *BB : CurLoop->getBlocks()) 2360 if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU)) 2361 return true; 2362 // When sinking, the source block may not be part of the loop so check it. 2363 if (!CurLoop->contains(&I)) 2364 return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU); 2365 2366 return false; 2367 } 2368 2369 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA, 2370 MemoryUse &MU) { 2371 if (const auto *Accesses = MSSA.getBlockDefs(&BB)) 2372 for (const auto &MA : *Accesses) 2373 if (const auto *MD = dyn_cast<MemoryDef>(&MA)) 2374 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU)) 2375 return true; 2376 return false; 2377 } 2378 2379 /// Little predicate that returns true if the specified basic block is in 2380 /// a subloop of the current one, not the current one itself. 2381 /// 2382 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 2383 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 2384 return LI->getLoopFor(BB) != CurLoop; 2385 } 2386